Show More
@@ -1,543 +1,539 b'' | |||||
1 | from __future__ import absolute_import |
|
1 | from __future__ import absolute_import | |
2 |
|
2 | |||
3 | import collections |
|
3 | import collections | |
4 | import errno |
|
4 | import errno | |
5 | import hashlib |
|
5 | import hashlib | |
6 | import mmap |
|
6 | import mmap | |
7 | import os |
|
7 | import os | |
8 | import struct |
|
8 | import struct | |
9 | import time |
|
9 | import time | |
10 |
|
10 | |||
11 | from mercurial.i18n import _ |
|
11 | from mercurial.i18n import _ | |
12 | from mercurial import ( |
|
12 | from mercurial import ( | |
13 | policy, |
|
13 | policy, | |
14 | pycompat, |
|
14 | pycompat, | |
15 | util, |
|
15 | util, | |
16 | vfs as vfsmod, |
|
16 | vfs as vfsmod, | |
17 | ) |
|
17 | ) | |
18 | from . import shallowutil |
|
18 | from . import shallowutil | |
19 |
|
19 | |||
20 | osutil = policy.importmod(r'osutil') |
|
20 | osutil = policy.importmod(r'osutil') | |
21 |
|
21 | |||
22 | # The pack version supported by this implementation. This will need to be |
|
22 | # The pack version supported by this implementation. This will need to be | |
23 | # rev'd whenever the byte format changes. Ex: changing the fanout prefix, |
|
23 | # rev'd whenever the byte format changes. Ex: changing the fanout prefix, | |
24 | # changing any of the int sizes, changing the delta algorithm, etc. |
|
24 | # changing any of the int sizes, changing the delta algorithm, etc. | |
25 | PACKVERSIONSIZE = 1 |
|
25 | PACKVERSIONSIZE = 1 | |
26 | INDEXVERSIONSIZE = 2 |
|
26 | INDEXVERSIONSIZE = 2 | |
27 |
|
27 | |||
28 | FANOUTSTART = INDEXVERSIONSIZE |
|
28 | FANOUTSTART = INDEXVERSIONSIZE | |
29 |
|
29 | |||
30 | # Constant that indicates a fanout table entry hasn't been filled in. (This does |
|
30 | # Constant that indicates a fanout table entry hasn't been filled in. (This does | |
31 | # not get serialized) |
|
31 | # not get serialized) | |
32 | EMPTYFANOUT = -1 |
|
32 | EMPTYFANOUT = -1 | |
33 |
|
33 | |||
34 | # The fanout prefix is the number of bytes that can be addressed by the fanout |
|
34 | # The fanout prefix is the number of bytes that can be addressed by the fanout | |
35 | # table. Example: a fanout prefix of 1 means we use the first byte of a hash to |
|
35 | # table. Example: a fanout prefix of 1 means we use the first byte of a hash to | |
36 | # look in the fanout table (which will be 2^8 entries long). |
|
36 | # look in the fanout table (which will be 2^8 entries long). | |
37 | SMALLFANOUTPREFIX = 1 |
|
37 | SMALLFANOUTPREFIX = 1 | |
38 | LARGEFANOUTPREFIX = 2 |
|
38 | LARGEFANOUTPREFIX = 2 | |
39 |
|
39 | |||
40 | # The number of entries in the index at which point we switch to a large fanout. |
|
40 | # The number of entries in the index at which point we switch to a large fanout. | |
41 | # It is chosen to balance the linear scan through a sparse fanout, with the |
|
41 | # It is chosen to balance the linear scan through a sparse fanout, with the | |
42 | # size of the bisect in actual index. |
|
42 | # size of the bisect in actual index. | |
43 | # 2^16 / 8 was chosen because it trades off (1 step fanout scan + 5 step |
|
43 | # 2^16 / 8 was chosen because it trades off (1 step fanout scan + 5 step | |
44 | # bisect) with (8 step fanout scan + 1 step bisect) |
|
44 | # bisect) with (8 step fanout scan + 1 step bisect) | |
45 | # 5 step bisect = log(2^16 / 8 / 255) # fanout |
|
45 | # 5 step bisect = log(2^16 / 8 / 255) # fanout | |
46 | # 10 step fanout scan = 2^16 / (2^16 / 8) # fanout space divided by entries |
|
46 | # 10 step fanout scan = 2^16 / (2^16 / 8) # fanout space divided by entries | |
47 | SMALLFANOUTCUTOFF = 2**16 / 8 |
|
47 | SMALLFANOUTCUTOFF = 2**16 / 8 | |
48 |
|
48 | |||
49 | # The amount of time to wait between checking for new packs. This prevents an |
|
49 | # The amount of time to wait between checking for new packs. This prevents an | |
50 | # exception when data is moved to a new pack after the process has already |
|
50 | # exception when data is moved to a new pack after the process has already | |
51 | # loaded the pack list. |
|
51 | # loaded the pack list. | |
52 | REFRESHRATE = 0.1 |
|
52 | REFRESHRATE = 0.1 | |
53 |
|
53 | |||
54 | if pycompat.isposix: |
|
54 | if pycompat.isposix: | |
55 | # With glibc 2.7+ the 'e' flag uses O_CLOEXEC when opening. |
|
55 | # With glibc 2.7+ the 'e' flag uses O_CLOEXEC when opening. | |
56 | # The 'e' flag will be ignored on older versions of glibc. |
|
56 | # The 'e' flag will be ignored on older versions of glibc. | |
57 | PACKOPENMODE = 'rbe' |
|
57 | PACKOPENMODE = 'rbe' | |
58 | else: |
|
58 | else: | |
59 | PACKOPENMODE = 'rb' |
|
59 | PACKOPENMODE = 'rb' | |
60 |
|
60 | |||
61 | class _cachebackedpacks(object): |
|
61 | class _cachebackedpacks(object): | |
62 | def __init__(self, packs, cachesize): |
|
62 | def __init__(self, packs, cachesize): | |
63 | self._packs = set(packs) |
|
63 | self._packs = set(packs) | |
64 | self._lrucache = util.lrucachedict(cachesize) |
|
64 | self._lrucache = util.lrucachedict(cachesize) | |
65 | self._lastpack = None |
|
65 | self._lastpack = None | |
66 |
|
66 | |||
67 | # Avoid cold start of the cache by populating the most recent packs |
|
67 | # Avoid cold start of the cache by populating the most recent packs | |
68 | # in the cache. |
|
68 | # in the cache. | |
69 | for i in reversed(range(min(cachesize, len(packs)))): |
|
69 | for i in reversed(range(min(cachesize, len(packs)))): | |
70 | self._movetofront(packs[i]) |
|
70 | self._movetofront(packs[i]) | |
71 |
|
71 | |||
72 | def _movetofront(self, pack): |
|
72 | def _movetofront(self, pack): | |
73 | # This effectively makes pack the first entry in the cache. |
|
73 | # This effectively makes pack the first entry in the cache. | |
74 | self._lrucache[pack] = True |
|
74 | self._lrucache[pack] = True | |
75 |
|
75 | |||
76 | def _registerlastpackusage(self): |
|
76 | def _registerlastpackusage(self): | |
77 | if self._lastpack is not None: |
|
77 | if self._lastpack is not None: | |
78 | self._movetofront(self._lastpack) |
|
78 | self._movetofront(self._lastpack) | |
79 | self._lastpack = None |
|
79 | self._lastpack = None | |
80 |
|
80 | |||
81 | def add(self, pack): |
|
81 | def add(self, pack): | |
82 | self._registerlastpackusage() |
|
82 | self._registerlastpackusage() | |
83 |
|
83 | |||
84 | # This method will mostly be called when packs are not in cache. |
|
84 | # This method will mostly be called when packs are not in cache. | |
85 | # Therefore, adding pack to the cache. |
|
85 | # Therefore, adding pack to the cache. | |
86 | self._movetofront(pack) |
|
86 | self._movetofront(pack) | |
87 | self._packs.add(pack) |
|
87 | self._packs.add(pack) | |
88 |
|
88 | |||
89 | def __iter__(self): |
|
89 | def __iter__(self): | |
90 | self._registerlastpackusage() |
|
90 | self._registerlastpackusage() | |
91 |
|
91 | |||
92 | # Cache iteration is based on LRU. |
|
92 | # Cache iteration is based on LRU. | |
93 | for pack in self._lrucache: |
|
93 | for pack in self._lrucache: | |
94 | self._lastpack = pack |
|
94 | self._lastpack = pack | |
95 | yield pack |
|
95 | yield pack | |
96 |
|
96 | |||
97 | cachedpacks = set(pack for pack in self._lrucache) |
|
97 | cachedpacks = set(pack for pack in self._lrucache) | |
98 | # Yield for paths not in the cache. |
|
98 | # Yield for paths not in the cache. | |
99 | for pack in self._packs - cachedpacks: |
|
99 | for pack in self._packs - cachedpacks: | |
100 | self._lastpack = pack |
|
100 | self._lastpack = pack | |
101 | yield pack |
|
101 | yield pack | |
102 |
|
102 | |||
103 | # Data not found in any pack. |
|
103 | # Data not found in any pack. | |
104 | self._lastpack = None |
|
104 | self._lastpack = None | |
105 |
|
105 | |||
106 | class basepackstore(object): |
|
106 | class basepackstore(object): | |
107 | # Default cache size limit for the pack files. |
|
107 | # Default cache size limit for the pack files. | |
108 | DEFAULTCACHESIZE = 100 |
|
108 | DEFAULTCACHESIZE = 100 | |
109 |
|
109 | |||
110 | def __init__(self, ui, path): |
|
110 | def __init__(self, ui, path): | |
111 | self.ui = ui |
|
111 | self.ui = ui | |
112 | self.path = path |
|
112 | self.path = path | |
113 |
|
113 | |||
114 | # lastrefesh is 0 so we'll immediately check for new packs on the first |
|
114 | # lastrefesh is 0 so we'll immediately check for new packs on the first | |
115 | # failure. |
|
115 | # failure. | |
116 | self.lastrefresh = 0 |
|
116 | self.lastrefresh = 0 | |
117 |
|
117 | |||
118 | packs = [] |
|
118 | packs = [] | |
119 | for filepath, __, __ in self._getavailablepackfilessorted(): |
|
119 | for filepath, __, __ in self._getavailablepackfilessorted(): | |
120 | try: |
|
120 | try: | |
121 | pack = self.getpack(filepath) |
|
121 | pack = self.getpack(filepath) | |
122 | except Exception as ex: |
|
122 | except Exception as ex: | |
123 | # An exception may be thrown if the pack file is corrupted |
|
123 | # An exception may be thrown if the pack file is corrupted | |
124 | # somehow. Log a warning but keep going in this case, just |
|
124 | # somehow. Log a warning but keep going in this case, just | |
125 | # skipping this pack file. |
|
125 | # skipping this pack file. | |
126 | # |
|
126 | # | |
127 | # If this is an ENOENT error then don't even bother logging. |
|
127 | # If this is an ENOENT error then don't even bother logging. | |
128 | # Someone could have removed the file since we retrieved the |
|
128 | # Someone could have removed the file since we retrieved the | |
129 | # list of paths. |
|
129 | # list of paths. | |
130 | if getattr(ex, 'errno', None) != errno.ENOENT: |
|
130 | if getattr(ex, 'errno', None) != errno.ENOENT: | |
131 | ui.warn(_('unable to load pack %s: %s\n') % (filepath, ex)) |
|
131 | ui.warn(_('unable to load pack %s: %s\n') % (filepath, ex)) | |
132 | continue |
|
132 | continue | |
133 | packs.append(pack) |
|
133 | packs.append(pack) | |
134 |
|
134 | |||
135 | self.packs = _cachebackedpacks(packs, self.DEFAULTCACHESIZE) |
|
135 | self.packs = _cachebackedpacks(packs, self.DEFAULTCACHESIZE) | |
136 |
|
136 | |||
137 | def _getavailablepackfiles(self): |
|
137 | def _getavailablepackfiles(self): | |
138 | """For each pack file (a index/data file combo), yields: |
|
138 | """For each pack file (a index/data file combo), yields: | |
139 | (full path without extension, mtime, size) |
|
139 | (full path without extension, mtime, size) | |
140 |
|
140 | |||
141 | mtime will be the mtime of the index/data file (whichever is newer) |
|
141 | mtime will be the mtime of the index/data file (whichever is newer) | |
142 | size is the combined size of index/data file |
|
142 | size is the combined size of index/data file | |
143 | """ |
|
143 | """ | |
144 | indexsuffixlen = len(self.INDEXSUFFIX) |
|
144 | indexsuffixlen = len(self.INDEXSUFFIX) | |
145 | packsuffixlen = len(self.PACKSUFFIX) |
|
145 | packsuffixlen = len(self.PACKSUFFIX) | |
146 |
|
146 | |||
147 | ids = set() |
|
147 | ids = set() | |
148 | sizes = collections.defaultdict(lambda: 0) |
|
148 | sizes = collections.defaultdict(lambda: 0) | |
149 | mtimes = collections.defaultdict(lambda: []) |
|
149 | mtimes = collections.defaultdict(lambda: []) | |
150 | try: |
|
150 | try: | |
151 | for filename, type, stat in osutil.listdir(self.path, stat=True): |
|
151 | for filename, type, stat in osutil.listdir(self.path, stat=True): | |
152 | id = None |
|
152 | id = None | |
153 | if filename[-indexsuffixlen:] == self.INDEXSUFFIX: |
|
153 | if filename[-indexsuffixlen:] == self.INDEXSUFFIX: | |
154 | id = filename[:-indexsuffixlen] |
|
154 | id = filename[:-indexsuffixlen] | |
155 | elif filename[-packsuffixlen:] == self.PACKSUFFIX: |
|
155 | elif filename[-packsuffixlen:] == self.PACKSUFFIX: | |
156 | id = filename[:-packsuffixlen] |
|
156 | id = filename[:-packsuffixlen] | |
157 |
|
157 | |||
158 | # Since we expect to have two files corresponding to each ID |
|
158 | # Since we expect to have two files corresponding to each ID | |
159 | # (the index file and the pack file), we can yield once we see |
|
159 | # (the index file and the pack file), we can yield once we see | |
160 | # it twice. |
|
160 | # it twice. | |
161 | if id: |
|
161 | if id: | |
162 | sizes[id] += stat.st_size # Sum both files' sizes together |
|
162 | sizes[id] += stat.st_size # Sum both files' sizes together | |
163 | mtimes[id].append(stat.st_mtime) |
|
163 | mtimes[id].append(stat.st_mtime) | |
164 | if id in ids: |
|
164 | if id in ids: | |
165 | yield (os.path.join(self.path, id), max(mtimes[id]), |
|
165 | yield (os.path.join(self.path, id), max(mtimes[id]), | |
166 | sizes[id]) |
|
166 | sizes[id]) | |
167 | else: |
|
167 | else: | |
168 | ids.add(id) |
|
168 | ids.add(id) | |
169 | except OSError as ex: |
|
169 | except OSError as ex: | |
170 | if ex.errno != errno.ENOENT: |
|
170 | if ex.errno != errno.ENOENT: | |
171 | raise |
|
171 | raise | |
172 |
|
172 | |||
173 | def _getavailablepackfilessorted(self): |
|
173 | def _getavailablepackfilessorted(self): | |
174 | """Like `_getavailablepackfiles`, but also sorts the files by mtime, |
|
174 | """Like `_getavailablepackfiles`, but also sorts the files by mtime, | |
175 | yielding newest files first. |
|
175 | yielding newest files first. | |
176 |
|
176 | |||
177 | This is desirable, since it is more likely newer packfiles have more |
|
177 | This is desirable, since it is more likely newer packfiles have more | |
178 | desirable data. |
|
178 | desirable data. | |
179 | """ |
|
179 | """ | |
180 | files = [] |
|
180 | files = [] | |
181 | for path, mtime, size in self._getavailablepackfiles(): |
|
181 | for path, mtime, size in self._getavailablepackfiles(): | |
182 | files.append((mtime, size, path)) |
|
182 | files.append((mtime, size, path)) | |
183 | files = sorted(files, reverse=True) |
|
183 | files = sorted(files, reverse=True) | |
184 | for mtime, size, path in files: |
|
184 | for mtime, size, path in files: | |
185 | yield path, mtime, size |
|
185 | yield path, mtime, size | |
186 |
|
186 | |||
187 | def gettotalsizeandcount(self): |
|
187 | def gettotalsizeandcount(self): | |
188 | """Returns the total disk size (in bytes) of all the pack files in |
|
188 | """Returns the total disk size (in bytes) of all the pack files in | |
189 | this store, and the count of pack files. |
|
189 | this store, and the count of pack files. | |
190 |
|
190 | |||
191 | (This might be smaller than the total size of the ``self.path`` |
|
191 | (This might be smaller than the total size of the ``self.path`` | |
192 | directory, since this only considers fuly-writen pack files, and not |
|
192 | directory, since this only considers fuly-writen pack files, and not | |
193 | temporary files or other detritus on the directory.) |
|
193 | temporary files or other detritus on the directory.) | |
194 | """ |
|
194 | """ | |
195 | totalsize = 0 |
|
195 | totalsize = 0 | |
196 | count = 0 |
|
196 | count = 0 | |
197 | for __, __, size in self._getavailablepackfiles(): |
|
197 | for __, __, size in self._getavailablepackfiles(): | |
198 | totalsize += size |
|
198 | totalsize += size | |
199 | count += 1 |
|
199 | count += 1 | |
200 | return totalsize, count |
|
200 | return totalsize, count | |
201 |
|
201 | |||
202 | def getmetrics(self): |
|
202 | def getmetrics(self): | |
203 | """Returns metrics on the state of this store.""" |
|
203 | """Returns metrics on the state of this store.""" | |
204 | size, count = self.gettotalsizeandcount() |
|
204 | size, count = self.gettotalsizeandcount() | |
205 | return { |
|
205 | return { | |
206 | 'numpacks': count, |
|
206 | 'numpacks': count, | |
207 | 'totalpacksize': size, |
|
207 | 'totalpacksize': size, | |
208 | } |
|
208 | } | |
209 |
|
209 | |||
210 | def getpack(self, path): |
|
210 | def getpack(self, path): | |
211 | raise NotImplementedError() |
|
211 | raise NotImplementedError() | |
212 |
|
212 | |||
213 | def getmissing(self, keys): |
|
213 | def getmissing(self, keys): | |
214 | missing = keys |
|
214 | missing = keys | |
215 | for pack in self.packs: |
|
215 | for pack in self.packs: | |
216 | missing = pack.getmissing(missing) |
|
216 | missing = pack.getmissing(missing) | |
217 |
|
217 | |||
218 | # Ensures better performance of the cache by keeping the most |
|
218 | # Ensures better performance of the cache by keeping the most | |
219 | # recently accessed pack at the beginning in subsequent iterations. |
|
219 | # recently accessed pack at the beginning in subsequent iterations. | |
220 | if not missing: |
|
220 | if not missing: | |
221 | return missing |
|
221 | return missing | |
222 |
|
222 | |||
223 | if missing: |
|
223 | if missing: | |
224 | for pack in self.refresh(): |
|
224 | for pack in self.refresh(): | |
225 | missing = pack.getmissing(missing) |
|
225 | missing = pack.getmissing(missing) | |
226 |
|
226 | |||
227 | return missing |
|
227 | return missing | |
228 |
|
228 | |||
229 | def markledger(self, ledger, options=None): |
|
229 | def markledger(self, ledger, options=None): | |
230 | for pack in self.packs: |
|
230 | for pack in self.packs: | |
231 | pack.markledger(ledger) |
|
231 | pack.markledger(ledger) | |
232 |
|
232 | |||
233 | def markforrefresh(self): |
|
233 | def markforrefresh(self): | |
234 | """Tells the store that there may be new pack files, so the next time it |
|
234 | """Tells the store that there may be new pack files, so the next time it | |
235 | has a lookup miss it should check for new files.""" |
|
235 | has a lookup miss it should check for new files.""" | |
236 | self.lastrefresh = 0 |
|
236 | self.lastrefresh = 0 | |
237 |
|
237 | |||
238 | def refresh(self): |
|
238 | def refresh(self): | |
239 | """Checks for any new packs on disk, adds them to the main pack list, |
|
239 | """Checks for any new packs on disk, adds them to the main pack list, | |
240 | and returns a list of just the new packs.""" |
|
240 | and returns a list of just the new packs.""" | |
241 | now = time.time() |
|
241 | now = time.time() | |
242 |
|
242 | |||
243 | # If we experience a lot of misses (like in the case of getmissing() on |
|
243 | # If we experience a lot of misses (like in the case of getmissing() on | |
244 | # new objects), let's only actually check disk for new stuff every once |
|
244 | # new objects), let's only actually check disk for new stuff every once | |
245 | # in a while. Generally this code path should only ever matter when a |
|
245 | # in a while. Generally this code path should only ever matter when a | |
246 | # repack is going on in the background, and that should be pretty rare |
|
246 | # repack is going on in the background, and that should be pretty rare | |
247 | # to have that happen twice in quick succession. |
|
247 | # to have that happen twice in quick succession. | |
248 | newpacks = [] |
|
248 | newpacks = [] | |
249 | if now > self.lastrefresh + REFRESHRATE: |
|
249 | if now > self.lastrefresh + REFRESHRATE: | |
250 | self.lastrefresh = now |
|
250 | self.lastrefresh = now | |
251 | previous = set(p.path for p in self.packs) |
|
251 | previous = set(p.path for p in self.packs) | |
252 | for filepath, __, __ in self._getavailablepackfilessorted(): |
|
252 | for filepath, __, __ in self._getavailablepackfilessorted(): | |
253 | if filepath not in previous: |
|
253 | if filepath not in previous: | |
254 | newpack = self.getpack(filepath) |
|
254 | newpack = self.getpack(filepath) | |
255 | newpacks.append(newpack) |
|
255 | newpacks.append(newpack) | |
256 | self.packs.add(newpack) |
|
256 | self.packs.add(newpack) | |
257 |
|
257 | |||
258 | return newpacks |
|
258 | return newpacks | |
259 |
|
259 | |||
260 | class versionmixin(object): |
|
260 | class versionmixin(object): | |
261 | # Mix-in for classes with multiple supported versions |
|
261 | # Mix-in for classes with multiple supported versions | |
262 | VERSION = None |
|
262 | VERSION = None | |
263 |
SUPPORTED_VERSIONS = [ |
|
263 | SUPPORTED_VERSIONS = [2] | |
264 |
|
264 | |||
265 | def _checkversion(self, version): |
|
265 | def _checkversion(self, version): | |
266 | if version in self.SUPPORTED_VERSIONS: |
|
266 | if version in self.SUPPORTED_VERSIONS: | |
267 | if self.VERSION is None: |
|
267 | if self.VERSION is None: | |
268 | # only affect this instance |
|
268 | # only affect this instance | |
269 | self.VERSION = version |
|
269 | self.VERSION = version | |
270 | elif self.VERSION != version: |
|
270 | elif self.VERSION != version: | |
271 | raise RuntimeError('inconsistent version: %s' % version) |
|
271 | raise RuntimeError('inconsistent version: %s' % version) | |
272 | else: |
|
272 | else: | |
273 | raise RuntimeError('unsupported version: %s' % version) |
|
273 | raise RuntimeError('unsupported version: %s' % version) | |
274 |
|
274 | |||
275 | class basepack(versionmixin): |
|
275 | class basepack(versionmixin): | |
276 | # The maximum amount we should read via mmap before remmaping so the old |
|
276 | # The maximum amount we should read via mmap before remmaping so the old | |
277 | # pages can be released (100MB) |
|
277 | # pages can be released (100MB) | |
278 | MAXPAGEDIN = 100 * 1024**2 |
|
278 | MAXPAGEDIN = 100 * 1024**2 | |
279 |
|
279 | |||
280 |
SUPPORTED_VERSIONS = [ |
|
280 | SUPPORTED_VERSIONS = [2] | |
281 |
|
281 | |||
282 | def __init__(self, path): |
|
282 | def __init__(self, path): | |
283 | self.path = path |
|
283 | self.path = path | |
284 | self.packpath = path + self.PACKSUFFIX |
|
284 | self.packpath = path + self.PACKSUFFIX | |
285 | self.indexpath = path + self.INDEXSUFFIX |
|
285 | self.indexpath = path + self.INDEXSUFFIX | |
286 |
|
286 | |||
287 | self.indexsize = os.stat(self.indexpath).st_size |
|
287 | self.indexsize = os.stat(self.indexpath).st_size | |
288 | self.datasize = os.stat(self.packpath).st_size |
|
288 | self.datasize = os.stat(self.packpath).st_size | |
289 |
|
289 | |||
290 | self._index = None |
|
290 | self._index = None | |
291 | self._data = None |
|
291 | self._data = None | |
292 | self.freememory() # initialize the mmap |
|
292 | self.freememory() # initialize the mmap | |
293 |
|
293 | |||
294 | version = struct.unpack('!B', self._data[:PACKVERSIONSIZE])[0] |
|
294 | version = struct.unpack('!B', self._data[:PACKVERSIONSIZE])[0] | |
295 | self._checkversion(version) |
|
295 | self._checkversion(version) | |
296 |
|
296 | |||
297 | version, config = struct.unpack('!BB', self._index[:INDEXVERSIONSIZE]) |
|
297 | version, config = struct.unpack('!BB', self._index[:INDEXVERSIONSIZE]) | |
298 | self._checkversion(version) |
|
298 | self._checkversion(version) | |
299 |
|
299 | |||
300 | if 0b10000000 & config: |
|
300 | if 0b10000000 & config: | |
301 | self.params = indexparams(LARGEFANOUTPREFIX, version) |
|
301 | self.params = indexparams(LARGEFANOUTPREFIX, version) | |
302 | else: |
|
302 | else: | |
303 | self.params = indexparams(SMALLFANOUTPREFIX, version) |
|
303 | self.params = indexparams(SMALLFANOUTPREFIX, version) | |
304 |
|
304 | |||
305 | @util.propertycache |
|
305 | @util.propertycache | |
306 | def _fanouttable(self): |
|
306 | def _fanouttable(self): | |
307 | params = self.params |
|
307 | params = self.params | |
308 | rawfanout = self._index[FANOUTSTART:FANOUTSTART + params.fanoutsize] |
|
308 | rawfanout = self._index[FANOUTSTART:FANOUTSTART + params.fanoutsize] | |
309 | fanouttable = [] |
|
309 | fanouttable = [] | |
310 | for i in pycompat.xrange(0, params.fanoutcount): |
|
310 | for i in pycompat.xrange(0, params.fanoutcount): | |
311 | loc = i * 4 |
|
311 | loc = i * 4 | |
312 | fanoutentry = struct.unpack('!I', rawfanout[loc:loc + 4])[0] |
|
312 | fanoutentry = struct.unpack('!I', rawfanout[loc:loc + 4])[0] | |
313 | fanouttable.append(fanoutentry) |
|
313 | fanouttable.append(fanoutentry) | |
314 | return fanouttable |
|
314 | return fanouttable | |
315 |
|
315 | |||
316 | @util.propertycache |
|
316 | @util.propertycache | |
317 | def _indexend(self): |
|
317 | def _indexend(self): | |
318 | if self.VERSION == 0: |
|
318 | nodecount = struct.unpack_from('!Q', self._index, | |
319 | return self.indexsize |
|
319 | self.params.indexstart - 8)[0] | |
320 | else: |
|
320 | return self.params.indexstart + nodecount * self.INDEXENTRYLENGTH | |
321 | nodecount = struct.unpack_from('!Q', self._index, |
|
|||
322 | self.params.indexstart - 8)[0] |
|
|||
323 | return self.params.indexstart + nodecount * self.INDEXENTRYLENGTH |
|
|||
324 |
|
321 | |||
325 | def freememory(self): |
|
322 | def freememory(self): | |
326 | """Unmap and remap the memory to free it up after known expensive |
|
323 | """Unmap and remap the memory to free it up after known expensive | |
327 | operations. Return True if self._data and self._index were reloaded. |
|
324 | operations. Return True if self._data and self._index were reloaded. | |
328 | """ |
|
325 | """ | |
329 | if self._index: |
|
326 | if self._index: | |
330 | if self._pagedin < self.MAXPAGEDIN: |
|
327 | if self._pagedin < self.MAXPAGEDIN: | |
331 | return False |
|
328 | return False | |
332 |
|
329 | |||
333 | self._index.close() |
|
330 | self._index.close() | |
334 | self._data.close() |
|
331 | self._data.close() | |
335 |
|
332 | |||
336 | # TODO: use an opener/vfs to access these paths |
|
333 | # TODO: use an opener/vfs to access these paths | |
337 | with open(self.indexpath, PACKOPENMODE) as indexfp: |
|
334 | with open(self.indexpath, PACKOPENMODE) as indexfp: | |
338 | # memory-map the file, size 0 means whole file |
|
335 | # memory-map the file, size 0 means whole file | |
339 | self._index = mmap.mmap(indexfp.fileno(), 0, |
|
336 | self._index = mmap.mmap(indexfp.fileno(), 0, | |
340 | access=mmap.ACCESS_READ) |
|
337 | access=mmap.ACCESS_READ) | |
341 | with open(self.packpath, PACKOPENMODE) as datafp: |
|
338 | with open(self.packpath, PACKOPENMODE) as datafp: | |
342 | self._data = mmap.mmap(datafp.fileno(), 0, access=mmap.ACCESS_READ) |
|
339 | self._data = mmap.mmap(datafp.fileno(), 0, access=mmap.ACCESS_READ) | |
343 |
|
340 | |||
344 | self._pagedin = 0 |
|
341 | self._pagedin = 0 | |
345 | return True |
|
342 | return True | |
346 |
|
343 | |||
347 | def getmissing(self, keys): |
|
344 | def getmissing(self, keys): | |
348 | raise NotImplementedError() |
|
345 | raise NotImplementedError() | |
349 |
|
346 | |||
350 | def markledger(self, ledger, options=None): |
|
347 | def markledger(self, ledger, options=None): | |
351 | raise NotImplementedError() |
|
348 | raise NotImplementedError() | |
352 |
|
349 | |||
353 | def cleanup(self, ledger): |
|
350 | def cleanup(self, ledger): | |
354 | raise NotImplementedError() |
|
351 | raise NotImplementedError() | |
355 |
|
352 | |||
356 | def __iter__(self): |
|
353 | def __iter__(self): | |
357 | raise NotImplementedError() |
|
354 | raise NotImplementedError() | |
358 |
|
355 | |||
359 | def iterentries(self): |
|
356 | def iterentries(self): | |
360 | raise NotImplementedError() |
|
357 | raise NotImplementedError() | |
361 |
|
358 | |||
362 | class mutablebasepack(versionmixin): |
|
359 | class mutablebasepack(versionmixin): | |
363 |
|
360 | |||
364 |
def __init__(self, ui, packdir, version= |
|
361 | def __init__(self, ui, packdir, version=2): | |
365 | self._checkversion(version) |
|
362 | self._checkversion(version) | |
366 |
|
363 | # TODO(augie): make this configurable | ||
|
364 | self._compressor = 'GZ' | |||
367 | opener = vfsmod.vfs(packdir) |
|
365 | opener = vfsmod.vfs(packdir) | |
368 | opener.createmode = 0o444 |
|
366 | opener.createmode = 0o444 | |
369 | self.opener = opener |
|
367 | self.opener = opener | |
370 |
|
368 | |||
371 | self.entries = {} |
|
369 | self.entries = {} | |
372 |
|
370 | |||
373 | shallowutil.mkstickygroupdir(ui, packdir) |
|
371 | shallowutil.mkstickygroupdir(ui, packdir) | |
374 | self.packfp, self.packpath = opener.mkstemp( |
|
372 | self.packfp, self.packpath = opener.mkstemp( | |
375 | suffix=self.PACKSUFFIX + '-tmp') |
|
373 | suffix=self.PACKSUFFIX + '-tmp') | |
376 | self.idxfp, self.idxpath = opener.mkstemp( |
|
374 | self.idxfp, self.idxpath = opener.mkstemp( | |
377 | suffix=self.INDEXSUFFIX + '-tmp') |
|
375 | suffix=self.INDEXSUFFIX + '-tmp') | |
378 | self.packfp = os.fdopen(self.packfp, 'w+') |
|
376 | self.packfp = os.fdopen(self.packfp, 'w+') | |
379 | self.idxfp = os.fdopen(self.idxfp, 'w+') |
|
377 | self.idxfp = os.fdopen(self.idxfp, 'w+') | |
380 | self.sha = hashlib.sha1() |
|
378 | self.sha = hashlib.sha1() | |
381 | self._closed = False |
|
379 | self._closed = False | |
382 |
|
380 | |||
383 | # The opener provides no way of doing permission fixup on files created |
|
381 | # The opener provides no way of doing permission fixup on files created | |
384 | # via mkstemp, so we must fix it ourselves. We can probably fix this |
|
382 | # via mkstemp, so we must fix it ourselves. We can probably fix this | |
385 | # upstream in vfs.mkstemp so we don't need to use the private method. |
|
383 | # upstream in vfs.mkstemp so we don't need to use the private method. | |
386 | opener._fixfilemode(opener.join(self.packpath)) |
|
384 | opener._fixfilemode(opener.join(self.packpath)) | |
387 | opener._fixfilemode(opener.join(self.idxpath)) |
|
385 | opener._fixfilemode(opener.join(self.idxpath)) | |
388 |
|
386 | |||
389 | # Write header |
|
387 | # Write header | |
390 | # TODO: make it extensible (ex: allow specifying compression algorithm, |
|
388 | # TODO: make it extensible (ex: allow specifying compression algorithm, | |
391 | # a flexible key/value header, delta algorithm, fanout size, etc) |
|
389 | # a flexible key/value header, delta algorithm, fanout size, etc) | |
392 | versionbuf = struct.pack('!B', self.VERSION) # unsigned 1 byte int |
|
390 | versionbuf = struct.pack('!B', self.VERSION) # unsigned 1 byte int | |
393 | self.writeraw(versionbuf) |
|
391 | self.writeraw(versionbuf) | |
394 |
|
392 | |||
395 | def __enter__(self): |
|
393 | def __enter__(self): | |
396 | return self |
|
394 | return self | |
397 |
|
395 | |||
398 | def __exit__(self, exc_type, exc_value, traceback): |
|
396 | def __exit__(self, exc_type, exc_value, traceback): | |
399 | if exc_type is None: |
|
397 | if exc_type is None: | |
400 | self.close() |
|
398 | self.close() | |
401 | else: |
|
399 | else: | |
402 | self.abort() |
|
400 | self.abort() | |
403 |
|
401 | |||
404 | def abort(self): |
|
402 | def abort(self): | |
405 | # Unclean exit |
|
403 | # Unclean exit | |
406 | self._cleantemppacks() |
|
404 | self._cleantemppacks() | |
407 |
|
405 | |||
408 | def writeraw(self, data): |
|
406 | def writeraw(self, data): | |
409 | self.packfp.write(data) |
|
407 | self.packfp.write(data) | |
410 | self.sha.update(data) |
|
408 | self.sha.update(data) | |
411 |
|
409 | |||
412 | def close(self, ledger=None): |
|
410 | def close(self, ledger=None): | |
413 | if self._closed: |
|
411 | if self._closed: | |
414 | return |
|
412 | return | |
415 |
|
413 | |||
416 | try: |
|
414 | try: | |
417 | sha = self.sha.hexdigest() |
|
415 | sha = self.sha.hexdigest() | |
418 | self.packfp.close() |
|
416 | self.packfp.close() | |
419 | self.writeindex() |
|
417 | self.writeindex() | |
420 |
|
418 | |||
421 | if len(self.entries) == 0: |
|
419 | if len(self.entries) == 0: | |
422 | # Empty pack |
|
420 | # Empty pack | |
423 | self._cleantemppacks() |
|
421 | self._cleantemppacks() | |
424 | self._closed = True |
|
422 | self._closed = True | |
425 | return None |
|
423 | return None | |
426 |
|
424 | |||
427 | self.opener.rename(self.packpath, sha + self.PACKSUFFIX) |
|
425 | self.opener.rename(self.packpath, sha + self.PACKSUFFIX) | |
428 | try: |
|
426 | try: | |
429 | self.opener.rename(self.idxpath, sha + self.INDEXSUFFIX) |
|
427 | self.opener.rename(self.idxpath, sha + self.INDEXSUFFIX) | |
430 | except Exception as ex: |
|
428 | except Exception as ex: | |
431 | try: |
|
429 | try: | |
432 | self.opener.unlink(sha + self.PACKSUFFIX) |
|
430 | self.opener.unlink(sha + self.PACKSUFFIX) | |
433 | except Exception: |
|
431 | except Exception: | |
434 | pass |
|
432 | pass | |
435 | # Throw exception 'ex' explicitly since a normal 'raise' would |
|
433 | # Throw exception 'ex' explicitly since a normal 'raise' would | |
436 | # potentially throw an exception from the unlink cleanup. |
|
434 | # potentially throw an exception from the unlink cleanup. | |
437 | raise ex |
|
435 | raise ex | |
438 | except Exception: |
|
436 | except Exception: | |
439 | # Clean up temp packs in all exception cases |
|
437 | # Clean up temp packs in all exception cases | |
440 | self._cleantemppacks() |
|
438 | self._cleantemppacks() | |
441 | raise |
|
439 | raise | |
442 |
|
440 | |||
443 | self._closed = True |
|
441 | self._closed = True | |
444 | result = self.opener.join(sha) |
|
442 | result = self.opener.join(sha) | |
445 | if ledger: |
|
443 | if ledger: | |
446 | ledger.addcreated(result) |
|
444 | ledger.addcreated(result) | |
447 | return result |
|
445 | return result | |
448 |
|
446 | |||
449 | def _cleantemppacks(self): |
|
447 | def _cleantemppacks(self): | |
450 | try: |
|
448 | try: | |
451 | self.opener.unlink(self.packpath) |
|
449 | self.opener.unlink(self.packpath) | |
452 | except Exception: |
|
450 | except Exception: | |
453 | pass |
|
451 | pass | |
454 | try: |
|
452 | try: | |
455 | self.opener.unlink(self.idxpath) |
|
453 | self.opener.unlink(self.idxpath) | |
456 | except Exception: |
|
454 | except Exception: | |
457 | pass |
|
455 | pass | |
458 |
|
456 | |||
459 | def writeindex(self): |
|
457 | def writeindex(self): | |
460 | rawindex = '' |
|
458 | rawindex = '' | |
461 |
|
459 | |||
462 | largefanout = len(self.entries) > SMALLFANOUTCUTOFF |
|
460 | largefanout = len(self.entries) > SMALLFANOUTCUTOFF | |
463 | if largefanout: |
|
461 | if largefanout: | |
464 | params = indexparams(LARGEFANOUTPREFIX, self.VERSION) |
|
462 | params = indexparams(LARGEFANOUTPREFIX, self.VERSION) | |
465 | else: |
|
463 | else: | |
466 | params = indexparams(SMALLFANOUTPREFIX, self.VERSION) |
|
464 | params = indexparams(SMALLFANOUTPREFIX, self.VERSION) | |
467 |
|
465 | |||
468 | fanouttable = [EMPTYFANOUT] * params.fanoutcount |
|
466 | fanouttable = [EMPTYFANOUT] * params.fanoutcount | |
469 |
|
467 | |||
470 | # Precompute the location of each entry |
|
468 | # Precompute the location of each entry | |
471 | locations = {} |
|
469 | locations = {} | |
472 | count = 0 |
|
470 | count = 0 | |
473 | for node in sorted(self.entries.iterkeys()): |
|
471 | for node in sorted(self.entries.iterkeys()): | |
474 | location = count * self.INDEXENTRYLENGTH |
|
472 | location = count * self.INDEXENTRYLENGTH | |
475 | locations[node] = location |
|
473 | locations[node] = location | |
476 | count += 1 |
|
474 | count += 1 | |
477 |
|
475 | |||
478 | # Must use [0] on the unpack result since it's always a tuple. |
|
476 | # Must use [0] on the unpack result since it's always a tuple. | |
479 | fanoutkey = struct.unpack(params.fanoutstruct, |
|
477 | fanoutkey = struct.unpack(params.fanoutstruct, | |
480 | node[:params.fanoutprefix])[0] |
|
478 | node[:params.fanoutprefix])[0] | |
481 | if fanouttable[fanoutkey] == EMPTYFANOUT: |
|
479 | if fanouttable[fanoutkey] == EMPTYFANOUT: | |
482 | fanouttable[fanoutkey] = location |
|
480 | fanouttable[fanoutkey] = location | |
483 |
|
481 | |||
484 | rawfanouttable = '' |
|
482 | rawfanouttable = '' | |
485 | last = 0 |
|
483 | last = 0 | |
486 | for offset in fanouttable: |
|
484 | for offset in fanouttable: | |
487 | offset = offset if offset != EMPTYFANOUT else last |
|
485 | offset = offset if offset != EMPTYFANOUT else last | |
488 | last = offset |
|
486 | last = offset | |
489 | rawfanouttable += struct.pack('!I', offset) |
|
487 | rawfanouttable += struct.pack('!I', offset) | |
490 |
|
488 | |||
491 | rawentrieslength = struct.pack('!Q', len(self.entries)) |
|
489 | rawentrieslength = struct.pack('!Q', len(self.entries)) | |
492 |
|
490 | |||
493 | # The index offset is the it's location in the file. So after the 2 byte |
|
491 | # The index offset is the it's location in the file. So after the 2 byte | |
494 | # header and the fanouttable. |
|
492 | # header and the fanouttable. | |
495 | rawindex = self.createindex(locations, 2 + len(rawfanouttable)) |
|
493 | rawindex = self.createindex(locations, 2 + len(rawfanouttable)) | |
496 |
|
494 | |||
497 | self._writeheader(params) |
|
495 | self._writeheader(params) | |
498 | self.idxfp.write(rawfanouttable) |
|
496 | self.idxfp.write(rawfanouttable) | |
499 | if self.VERSION == 1: |
|
497 | self.idxfp.write(rawentrieslength) | |
500 | self.idxfp.write(rawentrieslength) |
|
|||
501 | self.idxfp.write(rawindex) |
|
498 | self.idxfp.write(rawindex) | |
502 | self.idxfp.close() |
|
499 | self.idxfp.close() | |
503 |
|
500 | |||
504 | def createindex(self, nodelocations): |
|
501 | def createindex(self, nodelocations): | |
505 | raise NotImplementedError() |
|
502 | raise NotImplementedError() | |
506 |
|
503 | |||
507 | def _writeheader(self, indexparams): |
|
504 | def _writeheader(self, indexparams): | |
508 | # Index header |
|
505 | # Index header | |
509 | # <version: 1 byte> |
|
506 | # <version: 1 byte> | |
510 | # <large fanout: 1 bit> # 1 means 2^16, 0 means 2^8 |
|
507 | # <large fanout: 1 bit> # 1 means 2^16, 0 means 2^8 | |
511 | # <unused: 7 bit> # future use (compression, delta format, etc) |
|
508 | # <unused: 7 bit> # future use (compression, delta format, etc) | |
512 | config = 0 |
|
509 | config = 0 | |
513 | if indexparams.fanoutprefix == LARGEFANOUTPREFIX: |
|
510 | if indexparams.fanoutprefix == LARGEFANOUTPREFIX: | |
514 | config = 0b10000000 |
|
511 | config = 0b10000000 | |
515 | self.idxfp.write(struct.pack('!BB', self.VERSION, config)) |
|
512 | self.idxfp.write(struct.pack('!BB', self.VERSION, config)) | |
516 |
|
513 | |||
517 | class indexparams(object): |
|
514 | class indexparams(object): | |
518 | __slots__ = ('fanoutprefix', 'fanoutstruct', 'fanoutcount', 'fanoutsize', |
|
515 | __slots__ = ('fanoutprefix', 'fanoutstruct', 'fanoutcount', 'fanoutsize', | |
519 | 'indexstart') |
|
516 | 'indexstart') | |
520 |
|
517 | |||
521 | def __init__(self, prefixsize, version): |
|
518 | def __init__(self, prefixsize, version): | |
522 | self.fanoutprefix = prefixsize |
|
519 | self.fanoutprefix = prefixsize | |
523 |
|
520 | |||
524 | # The struct pack format for fanout table location (i.e. the format that |
|
521 | # The struct pack format for fanout table location (i.e. the format that | |
525 | # converts the node prefix into an integer location in the fanout |
|
522 | # converts the node prefix into an integer location in the fanout | |
526 | # table). |
|
523 | # table). | |
527 | if prefixsize == SMALLFANOUTPREFIX: |
|
524 | if prefixsize == SMALLFANOUTPREFIX: | |
528 | self.fanoutstruct = '!B' |
|
525 | self.fanoutstruct = '!B' | |
529 | elif prefixsize == LARGEFANOUTPREFIX: |
|
526 | elif prefixsize == LARGEFANOUTPREFIX: | |
530 | self.fanoutstruct = '!H' |
|
527 | self.fanoutstruct = '!H' | |
531 | else: |
|
528 | else: | |
532 | raise ValueError("invalid fanout prefix size: %s" % prefixsize) |
|
529 | raise ValueError("invalid fanout prefix size: %s" % prefixsize) | |
533 |
|
530 | |||
534 | # The number of fanout table entries |
|
531 | # The number of fanout table entries | |
535 | self.fanoutcount = 2**(prefixsize * 8) |
|
532 | self.fanoutcount = 2**(prefixsize * 8) | |
536 |
|
533 | |||
537 | # The total bytes used by the fanout table |
|
534 | # The total bytes used by the fanout table | |
538 | self.fanoutsize = self.fanoutcount * 4 |
|
535 | self.fanoutsize = self.fanoutcount * 4 | |
539 |
|
536 | |||
540 | self.indexstart = FANOUTSTART + self.fanoutsize |
|
537 | self.indexstart = FANOUTSTART + self.fanoutsize | |
541 | if version == 1: |
|
538 | # Skip the index length | |
542 | # Skip the index length |
|
539 | self.indexstart += 8 | |
543 | self.indexstart += 8 |
|
@@ -1,470 +1,456 b'' | |||||
1 | from __future__ import absolute_import |
|
1 | from __future__ import absolute_import | |
2 |
|
2 | |||
3 | import struct |
|
3 | import struct | |
4 |
|
4 | |||
5 | from mercurial.node import hex, nullid |
|
5 | from mercurial.node import hex, nullid | |
6 | from mercurial.i18n import _ |
|
6 | from mercurial.i18n import _ | |
7 | from mercurial import ( |
|
7 | from mercurial import ( | |
8 | error, |
|
|||
9 | pycompat, |
|
8 | pycompat, | |
10 | util, |
|
9 | util, | |
11 | ) |
|
10 | ) | |
12 | from . import ( |
|
11 | from . import ( | |
13 | basepack, |
|
12 | basepack, | |
14 | constants, |
|
13 | constants, | |
15 | lz4wrapper, |
|
14 | lz4wrapper, | |
16 | shallowutil, |
|
15 | shallowutil, | |
17 | ) |
|
16 | ) | |
18 |
|
17 | |||
19 | NODELENGTH = 20 |
|
18 | NODELENGTH = 20 | |
20 |
|
19 | |||
21 | # The indicator value in the index for a fulltext entry. |
|
20 | # The indicator value in the index for a fulltext entry. | |
22 | FULLTEXTINDEXMARK = -1 |
|
21 | FULLTEXTINDEXMARK = -1 | |
23 | NOBASEINDEXMARK = -2 |
|
22 | NOBASEINDEXMARK = -2 | |
24 |
|
23 | |||
25 | INDEXSUFFIX = '.dataidx' |
|
24 | INDEXSUFFIX = '.dataidx' | |
26 | PACKSUFFIX = '.datapack' |
|
25 | PACKSUFFIX = '.datapack' | |
27 |
|
26 | |||
28 | class datapackstore(basepack.basepackstore): |
|
27 | class datapackstore(basepack.basepackstore): | |
29 | INDEXSUFFIX = INDEXSUFFIX |
|
28 | INDEXSUFFIX = INDEXSUFFIX | |
30 | PACKSUFFIX = PACKSUFFIX |
|
29 | PACKSUFFIX = PACKSUFFIX | |
31 |
|
30 | |||
32 | def __init__(self, ui, path): |
|
31 | def __init__(self, ui, path): | |
33 | super(datapackstore, self).__init__(ui, path) |
|
32 | super(datapackstore, self).__init__(ui, path) | |
34 |
|
33 | |||
35 | def getpack(self, path): |
|
34 | def getpack(self, path): | |
36 | return datapack(path) |
|
35 | return datapack(path) | |
37 |
|
36 | |||
38 | def get(self, name, node): |
|
37 | def get(self, name, node): | |
39 | raise RuntimeError("must use getdeltachain with datapackstore") |
|
38 | raise RuntimeError("must use getdeltachain with datapackstore") | |
40 |
|
39 | |||
41 | def getmeta(self, name, node): |
|
40 | def getmeta(self, name, node): | |
42 | for pack in self.packs: |
|
41 | for pack in self.packs: | |
43 | try: |
|
42 | try: | |
44 | return pack.getmeta(name, node) |
|
43 | return pack.getmeta(name, node) | |
45 | except KeyError: |
|
44 | except KeyError: | |
46 | pass |
|
45 | pass | |
47 |
|
46 | |||
48 | for pack in self.refresh(): |
|
47 | for pack in self.refresh(): | |
49 | try: |
|
48 | try: | |
50 | return pack.getmeta(name, node) |
|
49 | return pack.getmeta(name, node) | |
51 | except KeyError: |
|
50 | except KeyError: | |
52 | pass |
|
51 | pass | |
53 |
|
52 | |||
54 | raise KeyError((name, hex(node))) |
|
53 | raise KeyError((name, hex(node))) | |
55 |
|
54 | |||
56 | def getdelta(self, name, node): |
|
55 | def getdelta(self, name, node): | |
57 | for pack in self.packs: |
|
56 | for pack in self.packs: | |
58 | try: |
|
57 | try: | |
59 | return pack.getdelta(name, node) |
|
58 | return pack.getdelta(name, node) | |
60 | except KeyError: |
|
59 | except KeyError: | |
61 | pass |
|
60 | pass | |
62 |
|
61 | |||
63 | for pack in self.refresh(): |
|
62 | for pack in self.refresh(): | |
64 | try: |
|
63 | try: | |
65 | return pack.getdelta(name, node) |
|
64 | return pack.getdelta(name, node) | |
66 | except KeyError: |
|
65 | except KeyError: | |
67 | pass |
|
66 | pass | |
68 |
|
67 | |||
69 | raise KeyError((name, hex(node))) |
|
68 | raise KeyError((name, hex(node))) | |
70 |
|
69 | |||
71 | def getdeltachain(self, name, node): |
|
70 | def getdeltachain(self, name, node): | |
72 | for pack in self.packs: |
|
71 | for pack in self.packs: | |
73 | try: |
|
72 | try: | |
74 | return pack.getdeltachain(name, node) |
|
73 | return pack.getdeltachain(name, node) | |
75 | except KeyError: |
|
74 | except KeyError: | |
76 | pass |
|
75 | pass | |
77 |
|
76 | |||
78 | for pack in self.refresh(): |
|
77 | for pack in self.refresh(): | |
79 | try: |
|
78 | try: | |
80 | return pack.getdeltachain(name, node) |
|
79 | return pack.getdeltachain(name, node) | |
81 | except KeyError: |
|
80 | except KeyError: | |
82 | pass |
|
81 | pass | |
83 |
|
82 | |||
84 | raise KeyError((name, hex(node))) |
|
83 | raise KeyError((name, hex(node))) | |
85 |
|
84 | |||
86 | def add(self, name, node, data): |
|
85 | def add(self, name, node, data): | |
87 | raise RuntimeError("cannot add to datapackstore") |
|
86 | raise RuntimeError("cannot add to datapackstore") | |
88 |
|
87 | |||
89 | class datapack(basepack.basepack): |
|
88 | class datapack(basepack.basepack): | |
90 | INDEXSUFFIX = INDEXSUFFIX |
|
89 | INDEXSUFFIX = INDEXSUFFIX | |
91 | PACKSUFFIX = PACKSUFFIX |
|
90 | PACKSUFFIX = PACKSUFFIX | |
92 |
|
91 | |||
93 | # Format is <node><delta offset><pack data offset><pack data size> |
|
92 | # Format is <node><delta offset><pack data offset><pack data size> | |
94 | # See the mutabledatapack doccomment for more details. |
|
93 | # See the mutabledatapack doccomment for more details. | |
95 | INDEXFORMAT = '!20siQQ' |
|
94 | INDEXFORMAT = '!20siQQ' | |
96 | INDEXENTRYLENGTH = 40 |
|
95 | INDEXENTRYLENGTH = 40 | |
97 |
|
96 | |||
98 |
SUPPORTED_VERSIONS = [ |
|
97 | SUPPORTED_VERSIONS = [2] | |
99 |
|
98 | |||
100 | def getmissing(self, keys): |
|
99 | def getmissing(self, keys): | |
101 | missing = [] |
|
100 | missing = [] | |
102 | for name, node in keys: |
|
101 | for name, node in keys: | |
103 | value = self._find(node) |
|
102 | value = self._find(node) | |
104 | if not value: |
|
103 | if not value: | |
105 | missing.append((name, node)) |
|
104 | missing.append((name, node)) | |
106 |
|
105 | |||
107 | return missing |
|
106 | return missing | |
108 |
|
107 | |||
109 | def get(self, name, node): |
|
108 | def get(self, name, node): | |
110 | raise RuntimeError("must use getdeltachain with datapack (%s:%s)" |
|
109 | raise RuntimeError("must use getdeltachain with datapack (%s:%s)" | |
111 | % (name, hex(node))) |
|
110 | % (name, hex(node))) | |
112 |
|
111 | |||
113 | def getmeta(self, name, node): |
|
112 | def getmeta(self, name, node): | |
114 | value = self._find(node) |
|
113 | value = self._find(node) | |
115 | if value is None: |
|
114 | if value is None: | |
116 | raise KeyError((name, hex(node))) |
|
115 | raise KeyError((name, hex(node))) | |
117 |
|
116 | |||
118 | # version 0 does not support metadata |
|
|||
119 | if self.VERSION == 0: |
|
|||
120 | return {} |
|
|||
121 |
|
||||
122 | node, deltabaseoffset, offset, size = value |
|
117 | node, deltabaseoffset, offset, size = value | |
123 | rawentry = self._data[offset:offset + size] |
|
118 | rawentry = self._data[offset:offset + size] | |
124 |
|
119 | |||
125 | # see docstring of mutabledatapack for the format |
|
120 | # see docstring of mutabledatapack for the format | |
126 | offset = 0 |
|
121 | offset = 0 | |
127 | offset += struct.unpack_from('!H', rawentry, offset)[0] + 2 # filename |
|
122 | offset += struct.unpack_from('!H', rawentry, offset)[0] + 2 # filename | |
128 | offset += 40 # node, deltabase node |
|
123 | offset += 40 # node, deltabase node | |
129 | offset += struct.unpack_from('!Q', rawentry, offset)[0] + 8 # delta |
|
124 | offset += struct.unpack_from('!Q', rawentry, offset)[0] + 8 # delta | |
130 |
|
125 | |||
131 | metalen = struct.unpack_from('!I', rawentry, offset)[0] |
|
126 | metalen = struct.unpack_from('!I', rawentry, offset)[0] | |
132 | offset += 4 |
|
127 | offset += 4 | |
133 |
|
128 | |||
134 | meta = shallowutil.parsepackmeta(rawentry[offset:offset + metalen]) |
|
129 | meta = shallowutil.parsepackmeta(rawentry[offset:offset + metalen]) | |
135 |
|
130 | |||
136 | return meta |
|
131 | return meta | |
137 |
|
132 | |||
138 | def getdelta(self, name, node): |
|
133 | def getdelta(self, name, node): | |
139 | value = self._find(node) |
|
134 | value = self._find(node) | |
140 | if value is None: |
|
135 | if value is None: | |
141 | raise KeyError((name, hex(node))) |
|
136 | raise KeyError((name, hex(node))) | |
142 |
|
137 | |||
143 | node, deltabaseoffset, offset, size = value |
|
138 | node, deltabaseoffset, offset, size = value | |
144 | entry = self._readentry(offset, size, getmeta=True) |
|
139 | entry = self._readentry(offset, size, getmeta=True) | |
145 | filename, node, deltabasenode, delta, meta = entry |
|
140 | filename, node, deltabasenode, delta, meta = entry | |
146 |
|
141 | |||
147 | # If we've read a lot of data from the mmap, free some memory. |
|
142 | # If we've read a lot of data from the mmap, free some memory. | |
148 | self.freememory() |
|
143 | self.freememory() | |
149 |
|
144 | |||
150 | return delta, filename, deltabasenode, meta |
|
145 | return delta, filename, deltabasenode, meta | |
151 |
|
146 | |||
152 | def getdeltachain(self, name, node): |
|
147 | def getdeltachain(self, name, node): | |
153 | value = self._find(node) |
|
148 | value = self._find(node) | |
154 | if value is None: |
|
149 | if value is None: | |
155 | raise KeyError((name, hex(node))) |
|
150 | raise KeyError((name, hex(node))) | |
156 |
|
151 | |||
157 | params = self.params |
|
152 | params = self.params | |
158 |
|
153 | |||
159 | # Precompute chains |
|
154 | # Precompute chains | |
160 | chain = [value] |
|
155 | chain = [value] | |
161 | deltabaseoffset = value[1] |
|
156 | deltabaseoffset = value[1] | |
162 | entrylen = self.INDEXENTRYLENGTH |
|
157 | entrylen = self.INDEXENTRYLENGTH | |
163 | while (deltabaseoffset != FULLTEXTINDEXMARK |
|
158 | while (deltabaseoffset != FULLTEXTINDEXMARK | |
164 | and deltabaseoffset != NOBASEINDEXMARK): |
|
159 | and deltabaseoffset != NOBASEINDEXMARK): | |
165 | loc = params.indexstart + deltabaseoffset |
|
160 | loc = params.indexstart + deltabaseoffset | |
166 | value = struct.unpack(self.INDEXFORMAT, |
|
161 | value = struct.unpack(self.INDEXFORMAT, | |
167 | self._index[loc:loc + entrylen]) |
|
162 | self._index[loc:loc + entrylen]) | |
168 | deltabaseoffset = value[1] |
|
163 | deltabaseoffset = value[1] | |
169 | chain.append(value) |
|
164 | chain.append(value) | |
170 |
|
165 | |||
171 | # Read chain data |
|
166 | # Read chain data | |
172 | deltachain = [] |
|
167 | deltachain = [] | |
173 | for node, deltabaseoffset, offset, size in chain: |
|
168 | for node, deltabaseoffset, offset, size in chain: | |
174 | filename, node, deltabasenode, delta = self._readentry(offset, size) |
|
169 | filename, node, deltabasenode, delta = self._readentry(offset, size) | |
175 | deltachain.append((filename, node, filename, deltabasenode, delta)) |
|
170 | deltachain.append((filename, node, filename, deltabasenode, delta)) | |
176 |
|
171 | |||
177 | # If we've read a lot of data from the mmap, free some memory. |
|
172 | # If we've read a lot of data from the mmap, free some memory. | |
178 | self.freememory() |
|
173 | self.freememory() | |
179 |
|
174 | |||
180 | return deltachain |
|
175 | return deltachain | |
181 |
|
176 | |||
182 | def _readentry(self, offset, size, getmeta=False): |
|
177 | def _readentry(self, offset, size, getmeta=False): | |
183 | rawentry = self._data[offset:offset + size] |
|
178 | rawentry = self._data[offset:offset + size] | |
184 | self._pagedin += len(rawentry) |
|
179 | self._pagedin += len(rawentry) | |
185 |
|
180 | |||
186 | # <2 byte len> + <filename> |
|
181 | # <2 byte len> + <filename> | |
187 | lengthsize = 2 |
|
182 | lengthsize = 2 | |
188 | filenamelen = struct.unpack('!H', rawentry[:2])[0] |
|
183 | filenamelen = struct.unpack('!H', rawentry[:2])[0] | |
189 | filename = rawentry[lengthsize:lengthsize + filenamelen] |
|
184 | filename = rawentry[lengthsize:lengthsize + filenamelen] | |
190 |
|
185 | |||
191 | # <20 byte node> + <20 byte deltabase> |
|
186 | # <20 byte node> + <20 byte deltabase> | |
192 | nodestart = lengthsize + filenamelen |
|
187 | nodestart = lengthsize + filenamelen | |
193 | deltabasestart = nodestart + NODELENGTH |
|
188 | deltabasestart = nodestart + NODELENGTH | |
194 | node = rawentry[nodestart:deltabasestart] |
|
189 | node = rawentry[nodestart:deltabasestart] | |
195 | deltabasenode = rawentry[deltabasestart:deltabasestart + NODELENGTH] |
|
190 | deltabasenode = rawentry[deltabasestart:deltabasestart + NODELENGTH] | |
196 |
|
191 | |||
197 | # <8 byte len> + <delta> |
|
192 | # <8 byte len> + <delta> | |
198 | deltastart = deltabasestart + NODELENGTH |
|
193 | deltastart = deltabasestart + NODELENGTH | |
199 | rawdeltalen = rawentry[deltastart:deltastart + 8] |
|
194 | rawdeltalen = rawentry[deltastart:deltastart + 8] | |
200 | deltalen = struct.unpack('!Q', rawdeltalen)[0] |
|
195 | deltalen = struct.unpack('!Q', rawdeltalen)[0] | |
201 |
|
196 | |||
202 | delta = rawentry[deltastart + 8:deltastart + 8 + deltalen] |
|
197 | delta = rawentry[deltastart + 8:deltastart + 8 + deltalen] | |
203 | delta = lz4wrapper.lz4decompress(delta) |
|
198 | delta = lz4wrapper.lz4decompress(delta) | |
204 |
|
199 | |||
205 | if getmeta: |
|
200 | if getmeta: | |
206 | if self.VERSION == 0: |
|
201 | metastart = deltastart + 8 + deltalen | |
207 | meta = {} |
|
202 | metalen = struct.unpack_from('!I', rawentry, metastart)[0] | |
208 | else: |
|
|||
209 | metastart = deltastart + 8 + deltalen |
|
|||
210 | metalen = struct.unpack_from('!I', rawentry, metastart)[0] |
|
|||
211 |
|
203 | |||
212 |
|
|
204 | rawmeta = rawentry[metastart + 4:metastart + 4 + metalen] | |
213 |
|
|
205 | meta = shallowutil.parsepackmeta(rawmeta) | |
214 | return filename, node, deltabasenode, delta, meta |
|
206 | return filename, node, deltabasenode, delta, meta | |
215 | else: |
|
207 | else: | |
216 | return filename, node, deltabasenode, delta |
|
208 | return filename, node, deltabasenode, delta | |
217 |
|
209 | |||
218 | def add(self, name, node, data): |
|
210 | def add(self, name, node, data): | |
219 | raise RuntimeError("cannot add to datapack (%s:%s)" % (name, node)) |
|
211 | raise RuntimeError("cannot add to datapack (%s:%s)" % (name, node)) | |
220 |
|
212 | |||
221 | def _find(self, node): |
|
213 | def _find(self, node): | |
222 | params = self.params |
|
214 | params = self.params | |
223 | fanoutkey = struct.unpack(params.fanoutstruct, |
|
215 | fanoutkey = struct.unpack(params.fanoutstruct, | |
224 | node[:params.fanoutprefix])[0] |
|
216 | node[:params.fanoutprefix])[0] | |
225 | fanout = self._fanouttable |
|
217 | fanout = self._fanouttable | |
226 |
|
218 | |||
227 | start = fanout[fanoutkey] + params.indexstart |
|
219 | start = fanout[fanoutkey] + params.indexstart | |
228 | indexend = self._indexend |
|
220 | indexend = self._indexend | |
229 |
|
221 | |||
230 | # Scan forward to find the first non-same entry, which is the upper |
|
222 | # Scan forward to find the first non-same entry, which is the upper | |
231 | # bound. |
|
223 | # bound. | |
232 | for i in pycompat.xrange(fanoutkey + 1, params.fanoutcount): |
|
224 | for i in pycompat.xrange(fanoutkey + 1, params.fanoutcount): | |
233 | end = fanout[i] + params.indexstart |
|
225 | end = fanout[i] + params.indexstart | |
234 | if end != start: |
|
226 | if end != start: | |
235 | break |
|
227 | break | |
236 | else: |
|
228 | else: | |
237 | end = indexend |
|
229 | end = indexend | |
238 |
|
230 | |||
239 | # Bisect between start and end to find node |
|
231 | # Bisect between start and end to find node | |
240 | index = self._index |
|
232 | index = self._index | |
241 | startnode = index[start:start + NODELENGTH] |
|
233 | startnode = index[start:start + NODELENGTH] | |
242 | endnode = index[end:end + NODELENGTH] |
|
234 | endnode = index[end:end + NODELENGTH] | |
243 | entrylen = self.INDEXENTRYLENGTH |
|
235 | entrylen = self.INDEXENTRYLENGTH | |
244 | if startnode == node: |
|
236 | if startnode == node: | |
245 | entry = index[start:start + entrylen] |
|
237 | entry = index[start:start + entrylen] | |
246 | elif endnode == node: |
|
238 | elif endnode == node: | |
247 | entry = index[end:end + entrylen] |
|
239 | entry = index[end:end + entrylen] | |
248 | else: |
|
240 | else: | |
249 | while start < end - entrylen: |
|
241 | while start < end - entrylen: | |
250 | mid = start + (end - start) / 2 |
|
242 | mid = start + (end - start) / 2 | |
251 | mid = mid - ((mid - params.indexstart) % entrylen) |
|
243 | mid = mid - ((mid - params.indexstart) % entrylen) | |
252 | midnode = index[mid:mid + NODELENGTH] |
|
244 | midnode = index[mid:mid + NODELENGTH] | |
253 | if midnode == node: |
|
245 | if midnode == node: | |
254 | entry = index[mid:mid + entrylen] |
|
246 | entry = index[mid:mid + entrylen] | |
255 | break |
|
247 | break | |
256 | if node > midnode: |
|
248 | if node > midnode: | |
257 | start = mid |
|
249 | start = mid | |
258 | startnode = midnode |
|
250 | startnode = midnode | |
259 | elif node < midnode: |
|
251 | elif node < midnode: | |
260 | end = mid |
|
252 | end = mid | |
261 | endnode = midnode |
|
253 | endnode = midnode | |
262 | else: |
|
254 | else: | |
263 | return None |
|
255 | return None | |
264 |
|
256 | |||
265 | return struct.unpack(self.INDEXFORMAT, entry) |
|
257 | return struct.unpack(self.INDEXFORMAT, entry) | |
266 |
|
258 | |||
267 | def markledger(self, ledger, options=None): |
|
259 | def markledger(self, ledger, options=None): | |
268 | for filename, node in self: |
|
260 | for filename, node in self: | |
269 | ledger.markdataentry(self, filename, node) |
|
261 | ledger.markdataentry(self, filename, node) | |
270 |
|
262 | |||
271 | def cleanup(self, ledger): |
|
263 | def cleanup(self, ledger): | |
272 | entries = ledger.sources.get(self, []) |
|
264 | entries = ledger.sources.get(self, []) | |
273 | allkeys = set(self) |
|
265 | allkeys = set(self) | |
274 | repackedkeys = set((e.filename, e.node) for e in entries if |
|
266 | repackedkeys = set((e.filename, e.node) for e in entries if | |
275 | e.datarepacked or e.gced) |
|
267 | e.datarepacked or e.gced) | |
276 |
|
268 | |||
277 | if len(allkeys - repackedkeys) == 0: |
|
269 | if len(allkeys - repackedkeys) == 0: | |
278 | if self.path not in ledger.created: |
|
270 | if self.path not in ledger.created: | |
279 | util.unlinkpath(self.indexpath, ignoremissing=True) |
|
271 | util.unlinkpath(self.indexpath, ignoremissing=True) | |
280 | util.unlinkpath(self.packpath, ignoremissing=True) |
|
272 | util.unlinkpath(self.packpath, ignoremissing=True) | |
281 |
|
273 | |||
282 | def __iter__(self): |
|
274 | def __iter__(self): | |
283 | for f, n, deltabase, deltalen in self.iterentries(): |
|
275 | for f, n, deltabase, deltalen in self.iterentries(): | |
284 | yield f, n |
|
276 | yield f, n | |
285 |
|
277 | |||
286 | def iterentries(self): |
|
278 | def iterentries(self): | |
287 | # Start at 1 to skip the header |
|
279 | # Start at 1 to skip the header | |
288 | offset = 1 |
|
280 | offset = 1 | |
289 | data = self._data |
|
281 | data = self._data | |
290 | while offset < self.datasize: |
|
282 | while offset < self.datasize: | |
291 | oldoffset = offset |
|
283 | oldoffset = offset | |
292 |
|
284 | |||
293 | # <2 byte len> + <filename> |
|
285 | # <2 byte len> + <filename> | |
294 | filenamelen = struct.unpack('!H', data[offset:offset + 2])[0] |
|
286 | filenamelen = struct.unpack('!H', data[offset:offset + 2])[0] | |
295 | offset += 2 |
|
287 | offset += 2 | |
296 | filename = data[offset:offset + filenamelen] |
|
288 | filename = data[offset:offset + filenamelen] | |
297 | offset += filenamelen |
|
289 | offset += filenamelen | |
298 |
|
290 | |||
299 | # <20 byte node> |
|
291 | # <20 byte node> | |
300 | node = data[offset:offset + constants.NODESIZE] |
|
292 | node = data[offset:offset + constants.NODESIZE] | |
301 | offset += constants.NODESIZE |
|
293 | offset += constants.NODESIZE | |
302 | # <20 byte deltabase> |
|
294 | # <20 byte deltabase> | |
303 | deltabase = data[offset:offset + constants.NODESIZE] |
|
295 | deltabase = data[offset:offset + constants.NODESIZE] | |
304 | offset += constants.NODESIZE |
|
296 | offset += constants.NODESIZE | |
305 |
|
297 | |||
306 | # <8 byte len> + <delta> |
|
298 | # <8 byte len> + <delta> | |
307 | rawdeltalen = data[offset:offset + 8] |
|
299 | rawdeltalen = data[offset:offset + 8] | |
308 | deltalen = struct.unpack('!Q', rawdeltalen)[0] |
|
300 | deltalen = struct.unpack('!Q', rawdeltalen)[0] | |
309 | offset += 8 |
|
301 | offset += 8 | |
310 |
|
302 | |||
311 | # it has to be at least long enough for the lz4 header. |
|
303 | # it has to be at least long enough for the lz4 header. | |
312 | assert deltalen >= 4 |
|
304 | assert deltalen >= 4 | |
313 |
|
305 | |||
314 | # python-lz4 stores the length of the uncompressed field as a |
|
306 | # python-lz4 stores the length of the uncompressed field as a | |
315 | # little-endian 32-bit integer at the start of the data. |
|
307 | # little-endian 32-bit integer at the start of the data. | |
316 | uncompressedlen = struct.unpack('<I', data[offset:offset + 4])[0] |
|
308 | uncompressedlen = struct.unpack('<I', data[offset:offset + 4])[0] | |
317 | offset += deltalen |
|
309 | offset += deltalen | |
318 |
|
310 | |||
319 | if self.VERSION == 1: |
|
311 | # <4 byte len> + <metadata-list> | |
320 | # <4 byte len> + <metadata-list> |
|
312 | metalen = struct.unpack_from('!I', data, offset)[0] | |
321 | metalen = struct.unpack_from('!I', data, offset)[0] |
|
313 | offset += 4 + metalen | |
322 | offset += 4 + metalen |
|
|||
323 |
|
314 | |||
324 | yield (filename, node, deltabase, uncompressedlen) |
|
315 | yield (filename, node, deltabase, uncompressedlen) | |
325 |
|
316 | |||
326 | # If we've read a lot of data from the mmap, free some memory. |
|
317 | # If we've read a lot of data from the mmap, free some memory. | |
327 | self._pagedin += offset - oldoffset |
|
318 | self._pagedin += offset - oldoffset | |
328 | if self.freememory(): |
|
319 | if self.freememory(): | |
329 | data = self._data |
|
320 | data = self._data | |
330 |
|
321 | |||
331 | class mutabledatapack(basepack.mutablebasepack): |
|
322 | class mutabledatapack(basepack.mutablebasepack): | |
332 | """A class for constructing and serializing a datapack file and index. |
|
323 | """A class for constructing and serializing a datapack file and index. | |
333 |
|
324 | |||
334 | A datapack is a pair of files that contain the revision contents for various |
|
325 | A datapack is a pair of files that contain the revision contents for various | |
335 | file revisions in Mercurial. It contains only revision contents (like file |
|
326 | file revisions in Mercurial. It contains only revision contents (like file | |
336 | contents), not any history information. |
|
327 | contents), not any history information. | |
337 |
|
328 | |||
338 | It consists of two files, with the following format. All bytes are in |
|
329 | It consists of two files, with the following format. All bytes are in | |
339 | network byte order (big endian). |
|
330 | network byte order (big endian). | |
340 |
|
331 | |||
341 | .datapack |
|
332 | .datapack | |
342 | The pack itself is a series of revision deltas with some basic header |
|
333 | The pack itself is a series of revision deltas with some basic header | |
343 | information on each. A revision delta may be a fulltext, represented by |
|
334 | information on each. A revision delta may be a fulltext, represented by | |
344 | a deltabasenode equal to the nullid. |
|
335 | a deltabasenode equal to the nullid. | |
345 |
|
336 | |||
346 | datapack = <version: 1 byte> |
|
337 | datapack = <version: 1 byte> | |
347 | [<revision>,...] |
|
338 | [<revision>,...] | |
348 | revision = <filename len: 2 byte unsigned int> |
|
339 | revision = <filename len: 2 byte unsigned int> | |
349 | <filename> |
|
340 | <filename> | |
350 | <node: 20 byte> |
|
341 | <node: 20 byte> | |
351 | <deltabasenode: 20 byte> |
|
342 | <deltabasenode: 20 byte> | |
352 | <delta len: 8 byte unsigned int> |
|
343 | <delta len: 8 byte unsigned int> | |
353 | <delta> |
|
344 | <delta> | |
354 | <metadata-list len: 4 byte unsigned int> [1] |
|
345 | <metadata-list len: 4 byte unsigned int> [1] | |
355 | <metadata-list> [1] |
|
346 | <metadata-list> [1] | |
356 | metadata-list = [<metadata-item>, ...] |
|
347 | metadata-list = [<metadata-item>, ...] | |
357 | metadata-item = <metadata-key: 1 byte> |
|
348 | metadata-item = <metadata-key: 1 byte> | |
358 | <metadata-value len: 2 byte unsigned> |
|
349 | <metadata-value len: 2 byte unsigned> | |
359 | <metadata-value> |
|
350 | <metadata-value> | |
360 |
|
351 | |||
361 | metadata-key could be METAKEYFLAG or METAKEYSIZE or other single byte |
|
352 | metadata-key could be METAKEYFLAG or METAKEYSIZE or other single byte | |
362 | value in the future. |
|
353 | value in the future. | |
363 |
|
354 | |||
364 | .dataidx |
|
355 | .dataidx | |
365 | The index file consists of two parts, the fanout and the index. |
|
356 | The index file consists of two parts, the fanout and the index. | |
366 |
|
357 | |||
367 | The index is a list of index entries, sorted by node (one per revision |
|
358 | The index is a list of index entries, sorted by node (one per revision | |
368 | in the pack). Each entry has: |
|
359 | in the pack). Each entry has: | |
369 |
|
360 | |||
370 | - node (The 20 byte node of the entry; i.e. the commit hash, file node |
|
361 | - node (The 20 byte node of the entry; i.e. the commit hash, file node | |
371 | hash, etc) |
|
362 | hash, etc) | |
372 | - deltabase index offset (The location in the index of the deltabase for |
|
363 | - deltabase index offset (The location in the index of the deltabase for | |
373 | this entry. The deltabase is the next delta in |
|
364 | this entry. The deltabase is the next delta in | |
374 | the chain, with the chain eventually |
|
365 | the chain, with the chain eventually | |
375 | terminating in a full-text, represented by a |
|
366 | terminating in a full-text, represented by a | |
376 | deltabase offset of -1. This lets us compute |
|
367 | deltabase offset of -1. This lets us compute | |
377 | delta chains from the index, then do |
|
368 | delta chains from the index, then do | |
378 | sequential reads from the pack if the revision |
|
369 | sequential reads from the pack if the revision | |
379 | are nearby on disk.) |
|
370 | are nearby on disk.) | |
380 | - pack entry offset (The location of this entry in the datapack) |
|
371 | - pack entry offset (The location of this entry in the datapack) | |
381 | - pack content size (The on-disk length of this entry's pack data) |
|
372 | - pack content size (The on-disk length of this entry's pack data) | |
382 |
|
373 | |||
383 | The fanout is a quick lookup table to reduce the number of steps for |
|
374 | The fanout is a quick lookup table to reduce the number of steps for | |
384 | bisecting the index. It is a series of 4 byte pointers to positions |
|
375 | bisecting the index. It is a series of 4 byte pointers to positions | |
385 | within the index. It has 2^16 entries, which corresponds to hash |
|
376 | within the index. It has 2^16 entries, which corresponds to hash | |
386 | prefixes [0000, 0001,..., FFFE, FFFF]. Example: the pointer in slot |
|
377 | prefixes [0000, 0001,..., FFFE, FFFF]. Example: the pointer in slot | |
387 | 4F0A points to the index position of the first revision whose node |
|
378 | 4F0A points to the index position of the first revision whose node | |
388 | starts with 4F0A. This saves log(2^16)=16 bisect steps. |
|
379 | starts with 4F0A. This saves log(2^16)=16 bisect steps. | |
389 |
|
380 | |||
390 | dataidx = <fanouttable> |
|
381 | dataidx = <fanouttable> | |
391 | <index> |
|
382 | <index> | |
392 | fanouttable = [<index offset: 4 byte unsigned int>,...] (2^16 entries) |
|
383 | fanouttable = [<index offset: 4 byte unsigned int>,...] (2^16 entries) | |
393 | index = [<index entry>,...] |
|
384 | index = [<index entry>,...] | |
394 | indexentry = <node: 20 byte> |
|
385 | indexentry = <node: 20 byte> | |
395 | <deltabase location: 4 byte signed int> |
|
386 | <deltabase location: 4 byte signed int> | |
396 | <pack entry offset: 8 byte unsigned int> |
|
387 | <pack entry offset: 8 byte unsigned int> | |
397 | <pack entry size: 8 byte unsigned int> |
|
388 | <pack entry size: 8 byte unsigned int> | |
398 |
|
389 | |||
399 | [1]: new in version 1. |
|
390 | [1]: new in version 1. | |
400 | """ |
|
391 | """ | |
401 | INDEXSUFFIX = INDEXSUFFIX |
|
392 | INDEXSUFFIX = INDEXSUFFIX | |
402 | PACKSUFFIX = PACKSUFFIX |
|
393 | PACKSUFFIX = PACKSUFFIX | |
403 |
|
394 | |||
404 | # v[01] index format: <node><delta offset><pack data offset><pack data size> |
|
395 | # v[01] index format: <node><delta offset><pack data offset><pack data size> | |
405 | INDEXFORMAT = datapack.INDEXFORMAT |
|
396 | INDEXFORMAT = datapack.INDEXFORMAT | |
406 | INDEXENTRYLENGTH = datapack.INDEXENTRYLENGTH |
|
397 | INDEXENTRYLENGTH = datapack.INDEXENTRYLENGTH | |
407 |
|
398 | |||
408 | # v1 has metadata support |
|
399 | # v1 has metadata support | |
409 |
SUPPORTED_VERSIONS = [ |
|
400 | SUPPORTED_VERSIONS = [2] | |
410 |
|
401 | |||
411 | def add(self, name, node, deltabasenode, delta, metadata=None): |
|
402 | def add(self, name, node, deltabasenode, delta, metadata=None): | |
412 | # metadata is a dict, ex. {METAKEYFLAG: flag} |
|
403 | # metadata is a dict, ex. {METAKEYFLAG: flag} | |
413 | if len(name) > 2**16: |
|
404 | if len(name) > 2**16: | |
414 | raise RuntimeError(_("name too long %s") % name) |
|
405 | raise RuntimeError(_("name too long %s") % name) | |
415 | if len(node) != 20: |
|
406 | if len(node) != 20: | |
416 | raise RuntimeError(_("node should be 20 bytes %s") % node) |
|
407 | raise RuntimeError(_("node should be 20 bytes %s") % node) | |
417 |
|
408 | |||
418 | if node in self.entries: |
|
409 | if node in self.entries: | |
419 | # The revision has already been added |
|
410 | # The revision has already been added | |
420 | return |
|
411 | return | |
421 |
|
412 | |||
422 | # TODO: allow configurable compression |
|
413 | # TODO: allow configurable compression | |
423 | delta = lz4wrapper.lz4compress(delta) |
|
414 | delta = lz4wrapper.lz4compress(delta) | |
424 |
|
415 | |||
425 | rawdata = ''.join(( |
|
416 | rawdata = ''.join(( | |
426 | struct.pack('!H', len(name)), # unsigned 2 byte int |
|
417 | struct.pack('!H', len(name)), # unsigned 2 byte int | |
427 | name, |
|
418 | name, | |
428 | node, |
|
419 | node, | |
429 | deltabasenode, |
|
420 | deltabasenode, | |
430 | struct.pack('!Q', len(delta)), # unsigned 8 byte int |
|
421 | struct.pack('!Q', len(delta)), # unsigned 8 byte int | |
431 | delta, |
|
422 | delta, | |
432 | )) |
|
423 | )) | |
433 |
|
424 | |||
434 | if self.VERSION == 1: |
|
425 | # v1 support metadata | |
435 | # v1 support metadata |
|
426 | rawmeta = shallowutil.buildpackmeta(metadata) | |
436 | rawmeta = shallowutil.buildpackmeta(metadata) |
|
427 | rawdata += struct.pack('!I', len(rawmeta)) # unsigned 4 byte | |
437 | rawdata += struct.pack('!I', len(rawmeta)) # unsigned 4 byte |
|
428 | rawdata += rawmeta | |
438 | rawdata += rawmeta |
|
|||
439 | else: |
|
|||
440 | # v0 cannot store metadata, raise if metadata contains flag |
|
|||
441 | if metadata and metadata.get(constants.METAKEYFLAG, 0) != 0: |
|
|||
442 | raise error.ProgrammingError('v0 pack cannot store flags') |
|
|||
443 |
|
429 | |||
444 | offset = self.packfp.tell() |
|
430 | offset = self.packfp.tell() | |
445 |
|
431 | |||
446 | size = len(rawdata) |
|
432 | size = len(rawdata) | |
447 |
|
433 | |||
448 | self.entries[node] = (deltabasenode, offset, size) |
|
434 | self.entries[node] = (deltabasenode, offset, size) | |
449 |
|
435 | |||
450 | self.writeraw(rawdata) |
|
436 | self.writeraw(rawdata) | |
451 |
|
437 | |||
452 | def createindex(self, nodelocations, indexoffset): |
|
438 | def createindex(self, nodelocations, indexoffset): | |
453 | entries = sorted((n, db, o, s) for n, (db, o, s) |
|
439 | entries = sorted((n, db, o, s) for n, (db, o, s) | |
454 | in self.entries.iteritems()) |
|
440 | in self.entries.iteritems()) | |
455 |
|
441 | |||
456 | rawindex = '' |
|
442 | rawindex = '' | |
457 | fmt = self.INDEXFORMAT |
|
443 | fmt = self.INDEXFORMAT | |
458 | for node, deltabase, offset, size in entries: |
|
444 | for node, deltabase, offset, size in entries: | |
459 | if deltabase == nullid: |
|
445 | if deltabase == nullid: | |
460 | deltabaselocation = FULLTEXTINDEXMARK |
|
446 | deltabaselocation = FULLTEXTINDEXMARK | |
461 | else: |
|
447 | else: | |
462 | # Instead of storing the deltabase node in the index, let's |
|
448 | # Instead of storing the deltabase node in the index, let's | |
463 | # store a pointer directly to the index entry for the deltabase. |
|
449 | # store a pointer directly to the index entry for the deltabase. | |
464 | deltabaselocation = nodelocations.get(deltabase, |
|
450 | deltabaselocation = nodelocations.get(deltabase, | |
465 | NOBASEINDEXMARK) |
|
451 | NOBASEINDEXMARK) | |
466 |
|
452 | |||
467 | entry = struct.pack(fmt, node, deltabaselocation, offset, size) |
|
453 | entry = struct.pack(fmt, node, deltabaselocation, offset, size) | |
468 | rawindex += entry |
|
454 | rawindex += entry | |
469 |
|
455 | |||
470 | return rawindex |
|
456 | return rawindex |
@@ -1,545 +1,520 b'' | |||||
1 | from __future__ import absolute_import |
|
1 | from __future__ import absolute_import | |
2 |
|
2 | |||
3 | import hashlib |
|
3 | import hashlib | |
4 | import struct |
|
4 | import struct | |
5 |
|
5 | |||
6 | from mercurial.node import hex, nullid |
|
6 | from mercurial.node import hex, nullid | |
7 | from mercurial import ( |
|
7 | from mercurial import ( | |
8 | pycompat, |
|
8 | pycompat, | |
9 | util, |
|
9 | util, | |
10 | ) |
|
10 | ) | |
11 | from . import ( |
|
11 | from . import ( | |
12 | basepack, |
|
12 | basepack, | |
13 | constants, |
|
13 | constants, | |
14 | shallowutil, |
|
14 | shallowutil, | |
15 | ) |
|
15 | ) | |
16 |
|
16 | |||
17 | # (filename hash, offset, size) |
|
17 | # (filename hash, offset, size) | |
18 |
INDEXFORMAT |
|
18 | INDEXFORMAT2 = '!20sQQII' | |
19 |
INDEXENTRYLENGTH |
|
19 | INDEXENTRYLENGTH2 = struct.calcsize(INDEXFORMAT2) | |
20 | INDEXFORMAT1 = '!20sQQII' |
|
|||
21 | INDEXENTRYLENGTH1 = struct.calcsize(INDEXFORMAT1) |
|
|||
22 | NODELENGTH = 20 |
|
20 | NODELENGTH = 20 | |
23 |
|
21 | |||
24 | NODEINDEXFORMAT = '!20sQ' |
|
22 | NODEINDEXFORMAT = '!20sQ' | |
25 | NODEINDEXENTRYLENGTH = struct.calcsize(NODEINDEXFORMAT) |
|
23 | NODEINDEXENTRYLENGTH = struct.calcsize(NODEINDEXFORMAT) | |
26 |
|
24 | |||
27 | # (node, p1, p2, linknode) |
|
25 | # (node, p1, p2, linknode) | |
28 | PACKFORMAT = "!20s20s20s20sH" |
|
26 | PACKFORMAT = "!20s20s20s20sH" | |
29 | PACKENTRYLENGTH = 82 |
|
27 | PACKENTRYLENGTH = 82 | |
30 |
|
28 | |||
31 | ENTRYCOUNTSIZE = 4 |
|
29 | ENTRYCOUNTSIZE = 4 | |
32 |
|
30 | |||
33 | INDEXSUFFIX = '.histidx' |
|
31 | INDEXSUFFIX = '.histidx' | |
34 | PACKSUFFIX = '.histpack' |
|
32 | PACKSUFFIX = '.histpack' | |
35 |
|
33 | |||
36 | ANC_NODE = 0 |
|
34 | ANC_NODE = 0 | |
37 | ANC_P1NODE = 1 |
|
35 | ANC_P1NODE = 1 | |
38 | ANC_P2NODE = 2 |
|
36 | ANC_P2NODE = 2 | |
39 | ANC_LINKNODE = 3 |
|
37 | ANC_LINKNODE = 3 | |
40 | ANC_COPYFROM = 4 |
|
38 | ANC_COPYFROM = 4 | |
41 |
|
39 | |||
42 | class historypackstore(basepack.basepackstore): |
|
40 | class historypackstore(basepack.basepackstore): | |
43 | INDEXSUFFIX = INDEXSUFFIX |
|
41 | INDEXSUFFIX = INDEXSUFFIX | |
44 | PACKSUFFIX = PACKSUFFIX |
|
42 | PACKSUFFIX = PACKSUFFIX | |
45 |
|
43 | |||
46 | def getpack(self, path): |
|
44 | def getpack(self, path): | |
47 | return historypack(path) |
|
45 | return historypack(path) | |
48 |
|
46 | |||
49 | def getancestors(self, name, node, known=None): |
|
47 | def getancestors(self, name, node, known=None): | |
50 | for pack in self.packs: |
|
48 | for pack in self.packs: | |
51 | try: |
|
49 | try: | |
52 | return pack.getancestors(name, node, known=known) |
|
50 | return pack.getancestors(name, node, known=known) | |
53 | except KeyError: |
|
51 | except KeyError: | |
54 | pass |
|
52 | pass | |
55 |
|
53 | |||
56 | for pack in self.refresh(): |
|
54 | for pack in self.refresh(): | |
57 | try: |
|
55 | try: | |
58 | return pack.getancestors(name, node, known=known) |
|
56 | return pack.getancestors(name, node, known=known) | |
59 | except KeyError: |
|
57 | except KeyError: | |
60 | pass |
|
58 | pass | |
61 |
|
59 | |||
62 | raise KeyError((name, node)) |
|
60 | raise KeyError((name, node)) | |
63 |
|
61 | |||
64 | def getnodeinfo(self, name, node): |
|
62 | def getnodeinfo(self, name, node): | |
65 | for pack in self.packs: |
|
63 | for pack in self.packs: | |
66 | try: |
|
64 | try: | |
67 | return pack.getnodeinfo(name, node) |
|
65 | return pack.getnodeinfo(name, node) | |
68 | except KeyError: |
|
66 | except KeyError: | |
69 | pass |
|
67 | pass | |
70 |
|
68 | |||
71 | for pack in self.refresh(): |
|
69 | for pack in self.refresh(): | |
72 | try: |
|
70 | try: | |
73 | return pack.getnodeinfo(name, node) |
|
71 | return pack.getnodeinfo(name, node) | |
74 | except KeyError: |
|
72 | except KeyError: | |
75 | pass |
|
73 | pass | |
76 |
|
74 | |||
77 | raise KeyError((name, node)) |
|
75 | raise KeyError((name, node)) | |
78 |
|
76 | |||
79 | def add(self, filename, node, p1, p2, linknode, copyfrom): |
|
77 | def add(self, filename, node, p1, p2, linknode, copyfrom): | |
80 | raise RuntimeError("cannot add to historypackstore (%s:%s)" |
|
78 | raise RuntimeError("cannot add to historypackstore (%s:%s)" | |
81 | % (filename, hex(node))) |
|
79 | % (filename, hex(node))) | |
82 |
|
80 | |||
83 | class historypack(basepack.basepack): |
|
81 | class historypack(basepack.basepack): | |
84 | INDEXSUFFIX = INDEXSUFFIX |
|
82 | INDEXSUFFIX = INDEXSUFFIX | |
85 | PACKSUFFIX = PACKSUFFIX |
|
83 | PACKSUFFIX = PACKSUFFIX | |
86 |
|
84 | |||
87 |
SUPPORTED_VERSIONS = [ |
|
85 | SUPPORTED_VERSIONS = [2] | |
88 |
|
86 | |||
89 | def __init__(self, path): |
|
87 | def __init__(self, path): | |
90 | super(historypack, self).__init__(path) |
|
88 | super(historypack, self).__init__(path) | |
91 |
|
89 | self.INDEXFORMAT = INDEXFORMAT2 | ||
92 | if self.VERSION == 0: |
|
90 | self.INDEXENTRYLENGTH = INDEXENTRYLENGTH2 | |
93 | self.INDEXFORMAT = INDEXFORMAT0 |
|
|||
94 | self.INDEXENTRYLENGTH = INDEXENTRYLENGTH0 |
|
|||
95 | else: |
|
|||
96 | self.INDEXFORMAT = INDEXFORMAT1 |
|
|||
97 | self.INDEXENTRYLENGTH = INDEXENTRYLENGTH1 |
|
|||
98 |
|
91 | |||
99 | def getmissing(self, keys): |
|
92 | def getmissing(self, keys): | |
100 | missing = [] |
|
93 | missing = [] | |
101 | for name, node in keys: |
|
94 | for name, node in keys: | |
102 | try: |
|
95 | try: | |
103 | self._findnode(name, node) |
|
96 | self._findnode(name, node) | |
104 | except KeyError: |
|
97 | except KeyError: | |
105 | missing.append((name, node)) |
|
98 | missing.append((name, node)) | |
106 |
|
99 | |||
107 | return missing |
|
100 | return missing | |
108 |
|
101 | |||
109 | def getancestors(self, name, node, known=None): |
|
102 | def getancestors(self, name, node, known=None): | |
110 | """Returns as many ancestors as we're aware of. |
|
103 | """Returns as many ancestors as we're aware of. | |
111 |
|
104 | |||
112 | return value: { |
|
105 | return value: { | |
113 | node: (p1, p2, linknode, copyfrom), |
|
106 | node: (p1, p2, linknode, copyfrom), | |
114 | ... |
|
107 | ... | |
115 | } |
|
108 | } | |
116 | """ |
|
109 | """ | |
117 | if known and node in known: |
|
110 | if known and node in known: | |
118 | return [] |
|
111 | return [] | |
119 |
|
112 | |||
120 | ancestors = self._getancestors(name, node, known=known) |
|
113 | ancestors = self._getancestors(name, node, known=known) | |
121 | results = {} |
|
114 | results = {} | |
122 | for ancnode, p1, p2, linknode, copyfrom in ancestors: |
|
115 | for ancnode, p1, p2, linknode, copyfrom in ancestors: | |
123 | results[ancnode] = (p1, p2, linknode, copyfrom) |
|
116 | results[ancnode] = (p1, p2, linknode, copyfrom) | |
124 |
|
117 | |||
125 | if not results: |
|
118 | if not results: | |
126 | raise KeyError((name, node)) |
|
119 | raise KeyError((name, node)) | |
127 | return results |
|
120 | return results | |
128 |
|
121 | |||
129 | def getnodeinfo(self, name, node): |
|
122 | def getnodeinfo(self, name, node): | |
130 | # Drop the node from the tuple before returning, since the result should |
|
123 | # Drop the node from the tuple before returning, since the result should | |
131 | # just be (p1, p2, linknode, copyfrom) |
|
124 | # just be (p1, p2, linknode, copyfrom) | |
132 | return self._findnode(name, node)[1:] |
|
125 | return self._findnode(name, node)[1:] | |
133 |
|
126 | |||
134 | def _getancestors(self, name, node, known=None): |
|
127 | def _getancestors(self, name, node, known=None): | |
135 | if known is None: |
|
128 | if known is None: | |
136 | known = set() |
|
129 | known = set() | |
137 | section = self._findsection(name) |
|
130 | section = self._findsection(name) | |
138 | filename, offset, size, nodeindexoffset, nodeindexsize = section |
|
131 | filename, offset, size, nodeindexoffset, nodeindexsize = section | |
139 | pending = set((node,)) |
|
132 | pending = set((node,)) | |
140 | o = 0 |
|
133 | o = 0 | |
141 | while o < size: |
|
134 | while o < size: | |
142 | if not pending: |
|
135 | if not pending: | |
143 | break |
|
136 | break | |
144 | entry, copyfrom = self._readentry(offset + o) |
|
137 | entry, copyfrom = self._readentry(offset + o) | |
145 | o += PACKENTRYLENGTH |
|
138 | o += PACKENTRYLENGTH | |
146 | if copyfrom: |
|
139 | if copyfrom: | |
147 | o += len(copyfrom) |
|
140 | o += len(copyfrom) | |
148 |
|
141 | |||
149 | ancnode = entry[ANC_NODE] |
|
142 | ancnode = entry[ANC_NODE] | |
150 | if ancnode in pending: |
|
143 | if ancnode in pending: | |
151 | pending.remove(ancnode) |
|
144 | pending.remove(ancnode) | |
152 | p1node = entry[ANC_P1NODE] |
|
145 | p1node = entry[ANC_P1NODE] | |
153 | p2node = entry[ANC_P2NODE] |
|
146 | p2node = entry[ANC_P2NODE] | |
154 | if p1node != nullid and p1node not in known: |
|
147 | if p1node != nullid and p1node not in known: | |
155 | pending.add(p1node) |
|
148 | pending.add(p1node) | |
156 | if p2node != nullid and p2node not in known: |
|
149 | if p2node != nullid and p2node not in known: | |
157 | pending.add(p2node) |
|
150 | pending.add(p2node) | |
158 |
|
151 | |||
159 | yield (ancnode, p1node, p2node, entry[ANC_LINKNODE], copyfrom) |
|
152 | yield (ancnode, p1node, p2node, entry[ANC_LINKNODE], copyfrom) | |
160 |
|
153 | |||
161 | def _readentry(self, offset): |
|
154 | def _readentry(self, offset): | |
162 | data = self._data |
|
155 | data = self._data | |
163 | entry = struct.unpack(PACKFORMAT, data[offset:offset + PACKENTRYLENGTH]) |
|
156 | entry = struct.unpack(PACKFORMAT, data[offset:offset + PACKENTRYLENGTH]) | |
164 | copyfrom = None |
|
157 | copyfrom = None | |
165 | copyfromlen = entry[ANC_COPYFROM] |
|
158 | copyfromlen = entry[ANC_COPYFROM] | |
166 | if copyfromlen != 0: |
|
159 | if copyfromlen != 0: | |
167 | offset += PACKENTRYLENGTH |
|
160 | offset += PACKENTRYLENGTH | |
168 | copyfrom = data[offset:offset + copyfromlen] |
|
161 | copyfrom = data[offset:offset + copyfromlen] | |
169 | return entry, copyfrom |
|
162 | return entry, copyfrom | |
170 |
|
163 | |||
171 | def add(self, filename, node, p1, p2, linknode, copyfrom): |
|
164 | def add(self, filename, node, p1, p2, linknode, copyfrom): | |
172 | raise RuntimeError("cannot add to historypack (%s:%s)" % |
|
165 | raise RuntimeError("cannot add to historypack (%s:%s)" % | |
173 | (filename, hex(node))) |
|
166 | (filename, hex(node))) | |
174 |
|
167 | |||
175 | def _findnode(self, name, node): |
|
168 | def _findnode(self, name, node): | |
176 | if self.VERSION == 0: |
|
169 | if self.VERSION == 0: | |
177 | ancestors = self._getancestors(name, node) |
|
170 | ancestors = self._getancestors(name, node) | |
178 | for ancnode, p1node, p2node, linknode, copyfrom in ancestors: |
|
171 | for ancnode, p1node, p2node, linknode, copyfrom in ancestors: | |
179 | if ancnode == node: |
|
172 | if ancnode == node: | |
180 | return (ancnode, p1node, p2node, linknode, copyfrom) |
|
173 | return (ancnode, p1node, p2node, linknode, copyfrom) | |
181 | else: |
|
174 | else: | |
182 | section = self._findsection(name) |
|
175 | section = self._findsection(name) | |
183 | nodeindexoffset, nodeindexsize = section[3:] |
|
176 | nodeindexoffset, nodeindexsize = section[3:] | |
184 | entry = self._bisect(node, nodeindexoffset, |
|
177 | entry = self._bisect(node, nodeindexoffset, | |
185 | nodeindexoffset + nodeindexsize, |
|
178 | nodeindexoffset + nodeindexsize, | |
186 | NODEINDEXENTRYLENGTH) |
|
179 | NODEINDEXENTRYLENGTH) | |
187 | if entry is not None: |
|
180 | if entry is not None: | |
188 | node, offset = struct.unpack(NODEINDEXFORMAT, entry) |
|
181 | node, offset = struct.unpack(NODEINDEXFORMAT, entry) | |
189 | entry, copyfrom = self._readentry(offset) |
|
182 | entry, copyfrom = self._readentry(offset) | |
190 | # Drop the copyfromlen from the end of entry, and replace it |
|
183 | # Drop the copyfromlen from the end of entry, and replace it | |
191 | # with the copyfrom string. |
|
184 | # with the copyfrom string. | |
192 | return entry[:4] + (copyfrom,) |
|
185 | return entry[:4] + (copyfrom,) | |
193 |
|
186 | |||
194 | raise KeyError("unable to find history for %s:%s" % (name, hex(node))) |
|
187 | raise KeyError("unable to find history for %s:%s" % (name, hex(node))) | |
195 |
|
188 | |||
196 | def _findsection(self, name): |
|
189 | def _findsection(self, name): | |
197 | params = self.params |
|
190 | params = self.params | |
198 | namehash = hashlib.sha1(name).digest() |
|
191 | namehash = hashlib.sha1(name).digest() | |
199 | fanoutkey = struct.unpack(params.fanoutstruct, |
|
192 | fanoutkey = struct.unpack(params.fanoutstruct, | |
200 | namehash[:params.fanoutprefix])[0] |
|
193 | namehash[:params.fanoutprefix])[0] | |
201 | fanout = self._fanouttable |
|
194 | fanout = self._fanouttable | |
202 |
|
195 | |||
203 | start = fanout[fanoutkey] + params.indexstart |
|
196 | start = fanout[fanoutkey] + params.indexstart | |
204 | indexend = self._indexend |
|
197 | indexend = self._indexend | |
205 |
|
198 | |||
206 | for i in pycompat.xrange(fanoutkey + 1, params.fanoutcount): |
|
199 | for i in pycompat.xrange(fanoutkey + 1, params.fanoutcount): | |
207 | end = fanout[i] + params.indexstart |
|
200 | end = fanout[i] + params.indexstart | |
208 | if end != start: |
|
201 | if end != start: | |
209 | break |
|
202 | break | |
210 | else: |
|
203 | else: | |
211 | end = indexend |
|
204 | end = indexend | |
212 |
|
205 | |||
213 | entry = self._bisect(namehash, start, end, self.INDEXENTRYLENGTH) |
|
206 | entry = self._bisect(namehash, start, end, self.INDEXENTRYLENGTH) | |
214 | if not entry: |
|
207 | if not entry: | |
215 | raise KeyError(name) |
|
208 | raise KeyError(name) | |
216 |
|
209 | |||
217 | rawentry = struct.unpack(self.INDEXFORMAT, entry) |
|
210 | rawentry = struct.unpack(self.INDEXFORMAT, entry) | |
218 | if self.VERSION == 0: |
|
211 | x, offset, size, nodeindexoffset, nodeindexsize = rawentry | |
219 | x, offset, size = rawentry |
|
212 | rawnamelen = self._index[nodeindexoffset:nodeindexoffset + | |
220 | nodeindexoffset = None |
|
213 | constants.FILENAMESIZE] | |
221 | nodeindexsize = None |
|
214 | actualnamelen = struct.unpack('!H', rawnamelen)[0] | |
222 | else: |
|
215 | nodeindexoffset += constants.FILENAMESIZE | |
223 |
|
|
216 | actualname = self._index[nodeindexoffset:nodeindexoffset + | |
224 | rawnamelen = self._index[nodeindexoffset:nodeindexoffset + |
|
217 | actualnamelen] | |
225 | constants.FILENAMESIZE] |
|
218 | if actualname != name: | |
226 | actualnamelen = struct.unpack('!H', rawnamelen)[0] |
|
219 | raise KeyError("found file name %s when looking for %s" % | |
227 | nodeindexoffset += constants.FILENAMESIZE |
|
220 | (actualname, name)) | |
228 | actualname = self._index[nodeindexoffset:nodeindexoffset + |
|
221 | nodeindexoffset += actualnamelen | |
229 | actualnamelen] |
|
|||
230 | if actualname != name: |
|
|||
231 | raise KeyError("found file name %s when looking for %s" % |
|
|||
232 | (actualname, name)) |
|
|||
233 | nodeindexoffset += actualnamelen |
|
|||
234 |
|
222 | |||
235 | filenamelength = struct.unpack('!H', self._data[offset:offset + |
|
223 | filenamelength = struct.unpack('!H', self._data[offset:offset + | |
236 | constants.FILENAMESIZE])[0] |
|
224 | constants.FILENAMESIZE])[0] | |
237 | offset += constants.FILENAMESIZE |
|
225 | offset += constants.FILENAMESIZE | |
238 |
|
226 | |||
239 | actualname = self._data[offset:offset + filenamelength] |
|
227 | actualname = self._data[offset:offset + filenamelength] | |
240 | offset += filenamelength |
|
228 | offset += filenamelength | |
241 |
|
229 | |||
242 | if name != actualname: |
|
230 | if name != actualname: | |
243 | raise KeyError("found file name %s when looking for %s" % |
|
231 | raise KeyError("found file name %s when looking for %s" % | |
244 | (actualname, name)) |
|
232 | (actualname, name)) | |
245 |
|
233 | |||
246 | # Skip entry list size |
|
234 | # Skip entry list size | |
247 | offset += ENTRYCOUNTSIZE |
|
235 | offset += ENTRYCOUNTSIZE | |
248 |
|
236 | |||
249 | nodelistoffset = offset |
|
237 | nodelistoffset = offset | |
250 | nodelistsize = (size - constants.FILENAMESIZE - filenamelength - |
|
238 | nodelistsize = (size - constants.FILENAMESIZE - filenamelength - | |
251 | ENTRYCOUNTSIZE) |
|
239 | ENTRYCOUNTSIZE) | |
252 | return (name, nodelistoffset, nodelistsize, |
|
240 | return (name, nodelistoffset, nodelistsize, | |
253 | nodeindexoffset, nodeindexsize) |
|
241 | nodeindexoffset, nodeindexsize) | |
254 |
|
242 | |||
255 | def _bisect(self, node, start, end, entrylen): |
|
243 | def _bisect(self, node, start, end, entrylen): | |
256 | # Bisect between start and end to find node |
|
244 | # Bisect between start and end to find node | |
257 | origstart = start |
|
245 | origstart = start | |
258 | startnode = self._index[start:start + NODELENGTH] |
|
246 | startnode = self._index[start:start + NODELENGTH] | |
259 | endnode = self._index[end:end + NODELENGTH] |
|
247 | endnode = self._index[end:end + NODELENGTH] | |
260 |
|
248 | |||
261 | if startnode == node: |
|
249 | if startnode == node: | |
262 | return self._index[start:start + entrylen] |
|
250 | return self._index[start:start + entrylen] | |
263 | elif endnode == node: |
|
251 | elif endnode == node: | |
264 | return self._index[end:end + entrylen] |
|
252 | return self._index[end:end + entrylen] | |
265 | else: |
|
253 | else: | |
266 | while start < end - entrylen: |
|
254 | while start < end - entrylen: | |
267 | mid = start + (end - start) / 2 |
|
255 | mid = start + (end - start) / 2 | |
268 | mid = mid - ((mid - origstart) % entrylen) |
|
256 | mid = mid - ((mid - origstart) % entrylen) | |
269 | midnode = self._index[mid:mid + NODELENGTH] |
|
257 | midnode = self._index[mid:mid + NODELENGTH] | |
270 | if midnode == node: |
|
258 | if midnode == node: | |
271 | return self._index[mid:mid + entrylen] |
|
259 | return self._index[mid:mid + entrylen] | |
272 | if node > midnode: |
|
260 | if node > midnode: | |
273 | start = mid |
|
261 | start = mid | |
274 | startnode = midnode |
|
262 | startnode = midnode | |
275 | elif node < midnode: |
|
263 | elif node < midnode: | |
276 | end = mid |
|
264 | end = mid | |
277 | endnode = midnode |
|
265 | endnode = midnode | |
278 | return None |
|
266 | return None | |
279 |
|
267 | |||
280 | def markledger(self, ledger, options=None): |
|
268 | def markledger(self, ledger, options=None): | |
281 | for filename, node in self: |
|
269 | for filename, node in self: | |
282 | ledger.markhistoryentry(self, filename, node) |
|
270 | ledger.markhistoryentry(self, filename, node) | |
283 |
|
271 | |||
284 | def cleanup(self, ledger): |
|
272 | def cleanup(self, ledger): | |
285 | entries = ledger.sources.get(self, []) |
|
273 | entries = ledger.sources.get(self, []) | |
286 | allkeys = set(self) |
|
274 | allkeys = set(self) | |
287 | repackedkeys = set((e.filename, e.node) for e in entries if |
|
275 | repackedkeys = set((e.filename, e.node) for e in entries if | |
288 | e.historyrepacked) |
|
276 | e.historyrepacked) | |
289 |
|
277 | |||
290 | if len(allkeys - repackedkeys) == 0: |
|
278 | if len(allkeys - repackedkeys) == 0: | |
291 | if self.path not in ledger.created: |
|
279 | if self.path not in ledger.created: | |
292 | util.unlinkpath(self.indexpath, ignoremissing=True) |
|
280 | util.unlinkpath(self.indexpath, ignoremissing=True) | |
293 | util.unlinkpath(self.packpath, ignoremissing=True) |
|
281 | util.unlinkpath(self.packpath, ignoremissing=True) | |
294 |
|
282 | |||
295 | def __iter__(self): |
|
283 | def __iter__(self): | |
296 | for f, n, x, x, x, x in self.iterentries(): |
|
284 | for f, n, x, x, x, x in self.iterentries(): | |
297 | yield f, n |
|
285 | yield f, n | |
298 |
|
286 | |||
299 | def iterentries(self): |
|
287 | def iterentries(self): | |
300 | # Start at 1 to skip the header |
|
288 | # Start at 1 to skip the header | |
301 | offset = 1 |
|
289 | offset = 1 | |
302 | while offset < self.datasize: |
|
290 | while offset < self.datasize: | |
303 | data = self._data |
|
291 | data = self._data | |
304 | # <2 byte len> + <filename> |
|
292 | # <2 byte len> + <filename> | |
305 | filenamelen = struct.unpack('!H', data[offset:offset + |
|
293 | filenamelen = struct.unpack('!H', data[offset:offset + | |
306 | constants.FILENAMESIZE])[0] |
|
294 | constants.FILENAMESIZE])[0] | |
307 | offset += constants.FILENAMESIZE |
|
295 | offset += constants.FILENAMESIZE | |
308 | filename = data[offset:offset + filenamelen] |
|
296 | filename = data[offset:offset + filenamelen] | |
309 | offset += filenamelen |
|
297 | offset += filenamelen | |
310 |
|
298 | |||
311 | revcount = struct.unpack('!I', data[offset:offset + |
|
299 | revcount = struct.unpack('!I', data[offset:offset + | |
312 | ENTRYCOUNTSIZE])[0] |
|
300 | ENTRYCOUNTSIZE])[0] | |
313 | offset += ENTRYCOUNTSIZE |
|
301 | offset += ENTRYCOUNTSIZE | |
314 |
|
302 | |||
315 | for i in pycompat.xrange(revcount): |
|
303 | for i in pycompat.xrange(revcount): | |
316 | entry = struct.unpack(PACKFORMAT, data[offset:offset + |
|
304 | entry = struct.unpack(PACKFORMAT, data[offset:offset + | |
317 | PACKENTRYLENGTH]) |
|
305 | PACKENTRYLENGTH]) | |
318 | offset += PACKENTRYLENGTH |
|
306 | offset += PACKENTRYLENGTH | |
319 |
|
307 | |||
320 | copyfrom = data[offset:offset + entry[ANC_COPYFROM]] |
|
308 | copyfrom = data[offset:offset + entry[ANC_COPYFROM]] | |
321 | offset += entry[ANC_COPYFROM] |
|
309 | offset += entry[ANC_COPYFROM] | |
322 |
|
310 | |||
323 | yield (filename, entry[ANC_NODE], entry[ANC_P1NODE], |
|
311 | yield (filename, entry[ANC_NODE], entry[ANC_P1NODE], | |
324 | entry[ANC_P2NODE], entry[ANC_LINKNODE], copyfrom) |
|
312 | entry[ANC_P2NODE], entry[ANC_LINKNODE], copyfrom) | |
325 |
|
313 | |||
326 | self._pagedin += PACKENTRYLENGTH |
|
314 | self._pagedin += PACKENTRYLENGTH | |
327 |
|
315 | |||
328 | # If we've read a lot of data from the mmap, free some memory. |
|
316 | # If we've read a lot of data from the mmap, free some memory. | |
329 | self.freememory() |
|
317 | self.freememory() | |
330 |
|
318 | |||
331 | class mutablehistorypack(basepack.mutablebasepack): |
|
319 | class mutablehistorypack(basepack.mutablebasepack): | |
332 | """A class for constructing and serializing a histpack file and index. |
|
320 | """A class for constructing and serializing a histpack file and index. | |
333 |
|
321 | |||
334 | A history pack is a pair of files that contain the revision history for |
|
322 | A history pack is a pair of files that contain the revision history for | |
335 | various file revisions in Mercurial. It contains only revision history (like |
|
323 | various file revisions in Mercurial. It contains only revision history (like | |
336 | parent pointers and linknodes), not any revision content information. |
|
324 | parent pointers and linknodes), not any revision content information. | |
337 |
|
325 | |||
338 | It consists of two files, with the following format: |
|
326 | It consists of two files, with the following format: | |
339 |
|
327 | |||
340 | .histpack |
|
328 | .histpack | |
341 | The pack itself is a series of file revisions with some basic header |
|
329 | The pack itself is a series of file revisions with some basic header | |
342 | information on each. |
|
330 | information on each. | |
343 |
|
331 | |||
344 | datapack = <version: 1 byte> |
|
332 | datapack = <version: 1 byte> | |
345 | [<filesection>,...] |
|
333 | [<filesection>,...] | |
346 | filesection = <filename len: 2 byte unsigned int> |
|
334 | filesection = <filename len: 2 byte unsigned int> | |
347 | <filename> |
|
335 | <filename> | |
348 | <revision count: 4 byte unsigned int> |
|
336 | <revision count: 4 byte unsigned int> | |
349 | [<revision>,...] |
|
337 | [<revision>,...] | |
350 | revision = <node: 20 byte> |
|
338 | revision = <node: 20 byte> | |
351 | <p1node: 20 byte> |
|
339 | <p1node: 20 byte> | |
352 | <p2node: 20 byte> |
|
340 | <p2node: 20 byte> | |
353 | <linknode: 20 byte> |
|
341 | <linknode: 20 byte> | |
354 | <copyfromlen: 2 byte> |
|
342 | <copyfromlen: 2 byte> | |
355 | <copyfrom> |
|
343 | <copyfrom> | |
356 |
|
344 | |||
357 | The revisions within each filesection are stored in topological order |
|
345 | The revisions within each filesection are stored in topological order | |
358 | (newest first). If a given entry has a parent from another file (a copy) |
|
346 | (newest first). If a given entry has a parent from another file (a copy) | |
359 | then p1node is the node from the other file, and copyfrom is the |
|
347 | then p1node is the node from the other file, and copyfrom is the | |
360 | filepath of the other file. |
|
348 | filepath of the other file. | |
361 |
|
349 | |||
362 | .histidx |
|
350 | .histidx | |
363 | The index file provides a mapping from filename to the file section in |
|
351 | The index file provides a mapping from filename to the file section in | |
364 | the histpack. In V1 it also contains sub-indexes for specific nodes |
|
352 | the histpack. In V1 it also contains sub-indexes for specific nodes | |
365 | within each file. It consists of three parts, the fanout, the file index |
|
353 | within each file. It consists of three parts, the fanout, the file index | |
366 | and the node indexes. |
|
354 | and the node indexes. | |
367 |
|
355 | |||
368 | The file index is a list of index entries, sorted by filename hash (one |
|
356 | The file index is a list of index entries, sorted by filename hash (one | |
369 | per file section in the pack). Each entry has: |
|
357 | per file section in the pack). Each entry has: | |
370 |
|
358 | |||
371 | - node (The 20 byte hash of the filename) |
|
359 | - node (The 20 byte hash of the filename) | |
372 | - pack entry offset (The location of this file section in the histpack) |
|
360 | - pack entry offset (The location of this file section in the histpack) | |
373 | - pack content size (The on-disk length of this file section's pack |
|
361 | - pack content size (The on-disk length of this file section's pack | |
374 | data) |
|
362 | data) | |
375 | - node index offset (The location of the file's node index in the index |
|
363 | - node index offset (The location of the file's node index in the index | |
376 | file) [1] |
|
364 | file) [1] | |
377 | - node index size (the on-disk length of this file's node index) [1] |
|
365 | - node index size (the on-disk length of this file's node index) [1] | |
378 |
|
366 | |||
379 | The fanout is a quick lookup table to reduce the number of steps for |
|
367 | The fanout is a quick lookup table to reduce the number of steps for | |
380 | bisecting the index. It is a series of 4 byte pointers to positions |
|
368 | bisecting the index. It is a series of 4 byte pointers to positions | |
381 | within the index. It has 2^16 entries, which corresponds to hash |
|
369 | within the index. It has 2^16 entries, which corresponds to hash | |
382 | prefixes [00, 01, 02,..., FD, FE, FF]. Example: the pointer in slot 4F |
|
370 | prefixes [00, 01, 02,..., FD, FE, FF]. Example: the pointer in slot 4F | |
383 | points to the index position of the first revision whose node starts |
|
371 | points to the index position of the first revision whose node starts | |
384 | with 4F. This saves log(2^16) bisect steps. |
|
372 | with 4F. This saves log(2^16) bisect steps. | |
385 |
|
373 | |||
386 | dataidx = <fanouttable> |
|
374 | dataidx = <fanouttable> | |
387 | <file count: 8 byte unsigned> [1] |
|
375 | <file count: 8 byte unsigned> [1] | |
388 | <fileindex> |
|
376 | <fileindex> | |
389 | <node count: 8 byte unsigned> [1] |
|
377 | <node count: 8 byte unsigned> [1] | |
390 | [<nodeindex>,...] [1] |
|
378 | [<nodeindex>,...] [1] | |
391 | fanouttable = [<index offset: 4 byte unsigned int>,...] (2^16 entries) |
|
379 | fanouttable = [<index offset: 4 byte unsigned int>,...] (2^16 entries) | |
392 |
|
380 | |||
393 | fileindex = [<file index entry>,...] |
|
381 | fileindex = [<file index entry>,...] | |
394 | fileindexentry = <node: 20 byte> |
|
382 | fileindexentry = <node: 20 byte> | |
395 | <pack file section offset: 8 byte unsigned int> |
|
383 | <pack file section offset: 8 byte unsigned int> | |
396 | <pack file section size: 8 byte unsigned int> |
|
384 | <pack file section size: 8 byte unsigned int> | |
397 | <node index offset: 4 byte unsigned int> [1] |
|
385 | <node index offset: 4 byte unsigned int> [1] | |
398 | <node index size: 4 byte unsigned int> [1] |
|
386 | <node index size: 4 byte unsigned int> [1] | |
399 | nodeindex = <filename>[<node index entry>,...] [1] |
|
387 | nodeindex = <filename>[<node index entry>,...] [1] | |
400 | filename = <filename len : 2 byte unsigned int><filename value> [1] |
|
388 | filename = <filename len : 2 byte unsigned int><filename value> [1] | |
401 | nodeindexentry = <node: 20 byte> [1] |
|
389 | nodeindexentry = <node: 20 byte> [1] | |
402 | <pack file node offset: 8 byte unsigned int> [1] |
|
390 | <pack file node offset: 8 byte unsigned int> [1] | |
403 |
|
391 | |||
404 | [1]: new in version 1. |
|
392 | [1]: new in version 1. | |
405 | """ |
|
393 | """ | |
406 | INDEXSUFFIX = INDEXSUFFIX |
|
394 | INDEXSUFFIX = INDEXSUFFIX | |
407 | PACKSUFFIX = PACKSUFFIX |
|
395 | PACKSUFFIX = PACKSUFFIX | |
408 |
|
396 | |||
409 |
SUPPORTED_VERSIONS = [ |
|
397 | SUPPORTED_VERSIONS = [2] | |
410 |
|
398 | |||
411 |
def __init__(self, ui, packpath, version= |
|
399 | def __init__(self, ui, packpath, version=2): | |
412 | # internal config: remotefilelog.historypackv1 |
|
|||
413 | if version == 0 and ui.configbool('remotefilelog', 'historypackv1'): |
|
|||
414 | version = 1 |
|
|||
415 |
|
||||
416 | super(mutablehistorypack, self).__init__(ui, packpath, version=version) |
|
400 | super(mutablehistorypack, self).__init__(ui, packpath, version=version) | |
417 | self.files = {} |
|
401 | self.files = {} | |
418 | self.entrylocations = {} |
|
402 | self.entrylocations = {} | |
419 | self.fileentries = {} |
|
403 | self.fileentries = {} | |
420 |
|
404 | |||
421 | if version == 0: |
|
405 | self.INDEXFORMAT = INDEXFORMAT2 | |
422 |
|
|
406 | self.INDEXENTRYLENGTH = INDEXENTRYLENGTH2 | |
423 | self.INDEXENTRYLENGTH = INDEXENTRYLENGTH0 |
|
|||
424 | else: |
|
|||
425 | self.INDEXFORMAT = INDEXFORMAT1 |
|
|||
426 | self.INDEXENTRYLENGTH = INDEXENTRYLENGTH1 |
|
|||
427 |
|
407 | |||
428 | self.NODEINDEXFORMAT = NODEINDEXFORMAT |
|
408 | self.NODEINDEXFORMAT = NODEINDEXFORMAT | |
429 | self.NODEINDEXENTRYLENGTH = NODEINDEXENTRYLENGTH |
|
409 | self.NODEINDEXENTRYLENGTH = NODEINDEXENTRYLENGTH | |
430 |
|
410 | |||
431 | def add(self, filename, node, p1, p2, linknode, copyfrom): |
|
411 | def add(self, filename, node, p1, p2, linknode, copyfrom): | |
432 | copyfrom = copyfrom or '' |
|
412 | copyfrom = copyfrom or '' | |
433 | copyfromlen = struct.pack('!H', len(copyfrom)) |
|
413 | copyfromlen = struct.pack('!H', len(copyfrom)) | |
434 | self.fileentries.setdefault(filename, []).append((node, p1, p2, |
|
414 | self.fileentries.setdefault(filename, []).append((node, p1, p2, | |
435 | linknode, |
|
415 | linknode, | |
436 | copyfromlen, |
|
416 | copyfromlen, | |
437 | copyfrom)) |
|
417 | copyfrom)) | |
438 |
|
418 | |||
439 | def _write(self): |
|
419 | def _write(self): | |
440 | for filename in sorted(self.fileentries): |
|
420 | for filename in sorted(self.fileentries): | |
441 | entries = self.fileentries[filename] |
|
421 | entries = self.fileentries[filename] | |
442 | sectionstart = self.packfp.tell() |
|
422 | sectionstart = self.packfp.tell() | |
443 |
|
423 | |||
444 | # Write the file section content |
|
424 | # Write the file section content | |
445 | entrymap = dict((e[0], e) for e in entries) |
|
425 | entrymap = dict((e[0], e) for e in entries) | |
446 | def parentfunc(node): |
|
426 | def parentfunc(node): | |
447 | x, p1, p2, x, x, x = entrymap[node] |
|
427 | x, p1, p2, x, x, x = entrymap[node] | |
448 | parents = [] |
|
428 | parents = [] | |
449 | if p1 != nullid: |
|
429 | if p1 != nullid: | |
450 | parents.append(p1) |
|
430 | parents.append(p1) | |
451 | if p2 != nullid: |
|
431 | if p2 != nullid: | |
452 | parents.append(p2) |
|
432 | parents.append(p2) | |
453 | return parents |
|
433 | return parents | |
454 |
|
434 | |||
455 | sortednodes = list(reversed(shallowutil.sortnodes( |
|
435 | sortednodes = list(reversed(shallowutil.sortnodes( | |
456 | (e[0] for e in entries), |
|
436 | (e[0] for e in entries), | |
457 | parentfunc))) |
|
437 | parentfunc))) | |
458 |
|
438 | |||
459 | # Write the file section header |
|
439 | # Write the file section header | |
460 | self.writeraw("%s%s%s" % ( |
|
440 | self.writeraw("%s%s%s" % ( | |
461 | struct.pack('!H', len(filename)), |
|
441 | struct.pack('!H', len(filename)), | |
462 | filename, |
|
442 | filename, | |
463 | struct.pack('!I', len(sortednodes)), |
|
443 | struct.pack('!I', len(sortednodes)), | |
464 | )) |
|
444 | )) | |
465 |
|
445 | |||
466 | sectionlen = constants.FILENAMESIZE + len(filename) + 4 |
|
446 | sectionlen = constants.FILENAMESIZE + len(filename) + 4 | |
467 |
|
447 | |||
468 | rawstrings = [] |
|
448 | rawstrings = [] | |
469 |
|
449 | |||
470 | # Record the node locations for the index |
|
450 | # Record the node locations for the index | |
471 | locations = self.entrylocations.setdefault(filename, {}) |
|
451 | locations = self.entrylocations.setdefault(filename, {}) | |
472 | offset = sectionstart + sectionlen |
|
452 | offset = sectionstart + sectionlen | |
473 | for node in sortednodes: |
|
453 | for node in sortednodes: | |
474 | locations[node] = offset |
|
454 | locations[node] = offset | |
475 | raw = '%s%s%s%s%s%s' % entrymap[node] |
|
455 | raw = '%s%s%s%s%s%s' % entrymap[node] | |
476 | rawstrings.append(raw) |
|
456 | rawstrings.append(raw) | |
477 | offset += len(raw) |
|
457 | offset += len(raw) | |
478 |
|
458 | |||
479 | rawdata = ''.join(rawstrings) |
|
459 | rawdata = ''.join(rawstrings) | |
480 | sectionlen += len(rawdata) |
|
460 | sectionlen += len(rawdata) | |
481 |
|
461 | |||
482 | self.writeraw(rawdata) |
|
462 | self.writeraw(rawdata) | |
483 |
|
463 | |||
484 | # Record metadata for the index |
|
464 | # Record metadata for the index | |
485 | self.files[filename] = (sectionstart, sectionlen) |
|
465 | self.files[filename] = (sectionstart, sectionlen) | |
486 | node = hashlib.sha1(filename).digest() |
|
466 | node = hashlib.sha1(filename).digest() | |
487 | self.entries[node] = node |
|
467 | self.entries[node] = node | |
488 |
|
468 | |||
489 | def close(self, ledger=None): |
|
469 | def close(self, ledger=None): | |
490 | if self._closed: |
|
470 | if self._closed: | |
491 | return |
|
471 | return | |
492 |
|
472 | |||
493 | self._write() |
|
473 | self._write() | |
494 |
|
474 | |||
495 | return super(mutablehistorypack, self).close(ledger=ledger) |
|
475 | return super(mutablehistorypack, self).close(ledger=ledger) | |
496 |
|
476 | |||
497 | def createindex(self, nodelocations, indexoffset): |
|
477 | def createindex(self, nodelocations, indexoffset): | |
498 | fileindexformat = self.INDEXFORMAT |
|
478 | fileindexformat = self.INDEXFORMAT | |
499 | fileindexlength = self.INDEXENTRYLENGTH |
|
479 | fileindexlength = self.INDEXENTRYLENGTH | |
500 | nodeindexformat = self.NODEINDEXFORMAT |
|
480 | nodeindexformat = self.NODEINDEXFORMAT | |
501 | nodeindexlength = self.NODEINDEXENTRYLENGTH |
|
481 | nodeindexlength = self.NODEINDEXENTRYLENGTH | |
502 | version = self.VERSION |
|
|||
503 |
|
482 | |||
504 | files = ((hashlib.sha1(filename).digest(), filename, offset, size) |
|
483 | files = ((hashlib.sha1(filename).digest(), filename, offset, size) | |
505 | for filename, (offset, size) in self.files.iteritems()) |
|
484 | for filename, (offset, size) in self.files.iteritems()) | |
506 | files = sorted(files) |
|
485 | files = sorted(files) | |
507 |
|
486 | |||
508 | # node index is after file index size, file index, and node index size |
|
487 | # node index is after file index size, file index, and node index size | |
509 | indexlensize = struct.calcsize('!Q') |
|
488 | indexlensize = struct.calcsize('!Q') | |
510 | nodeindexoffset = (indexoffset + indexlensize + |
|
489 | nodeindexoffset = (indexoffset + indexlensize + | |
511 | (len(files) * fileindexlength) + indexlensize) |
|
490 | (len(files) * fileindexlength) + indexlensize) | |
512 |
|
491 | |||
513 | fileindexentries = [] |
|
492 | fileindexentries = [] | |
514 | nodeindexentries = [] |
|
493 | nodeindexentries = [] | |
515 | nodecount = 0 |
|
494 | nodecount = 0 | |
516 | for namehash, filename, offset, size in files: |
|
495 | for namehash, filename, offset, size in files: | |
517 | # File section index |
|
496 | # File section index | |
518 | if version == 0: |
|
497 | nodelocations = self.entrylocations[filename] | |
519 | rawentry = struct.pack(fileindexformat, namehash, offset, size) |
|
|||
520 | else: |
|
|||
521 | nodelocations = self.entrylocations[filename] |
|
|||
522 |
|
498 | |||
523 |
|
|
499 | nodeindexsize = len(nodelocations) * nodeindexlength | |
524 |
|
500 | |||
525 |
|
|
501 | rawentry = struct.pack(fileindexformat, namehash, offset, size, | |
526 |
|
|
502 | nodeindexoffset, nodeindexsize) | |
527 |
|
|
503 | # Node index | |
528 |
|
|
504 | nodeindexentries.append(struct.pack(constants.FILENAMESTRUCT, | |
529 |
|
|
505 | len(filename)) + filename) | |
530 |
|
|
506 | nodeindexoffset += constants.FILENAMESIZE + len(filename) | |
531 |
|
507 | |||
532 |
|
|
508 | for node, location in sorted(nodelocations.iteritems()): | |
533 |
|
|
509 | nodeindexentries.append(struct.pack(nodeindexformat, node, | |
534 |
|
|
510 | location)) | |
535 |
|
|
511 | nodecount += 1 | |
536 |
|
512 | |||
537 |
|
|
513 | nodeindexoffset += len(nodelocations) * nodeindexlength | |
538 |
|
514 | |||
539 | fileindexentries.append(rawentry) |
|
515 | fileindexentries.append(rawentry) | |
540 |
|
516 | |||
541 | nodecountraw = '' |
|
517 | nodecountraw = '' | |
542 | if version == 1: |
|
518 | nodecountraw = struct.pack('!Q', nodecount) | |
543 | nodecountraw = struct.pack('!Q', nodecount) |
|
|||
544 | return (''.join(fileindexentries) + nodecountraw + |
|
519 | return (''.join(fileindexentries) + nodecountraw + | |
545 | ''.join(nodeindexentries)) |
|
520 | ''.join(nodeindexentries)) |
@@ -1,784 +1,781 b'' | |||||
1 | from __future__ import absolute_import |
|
1 | from __future__ import absolute_import | |
2 |
|
2 | |||
3 | import os |
|
3 | import os | |
4 | import time |
|
4 | import time | |
5 |
|
5 | |||
6 | from mercurial.i18n import _ |
|
6 | from mercurial.i18n import _ | |
7 | from mercurial.node import ( |
|
7 | from mercurial.node import ( | |
8 | nullid, |
|
8 | nullid, | |
9 | short, |
|
9 | short, | |
10 | ) |
|
10 | ) | |
11 | from mercurial import ( |
|
11 | from mercurial import ( | |
12 | encoding, |
|
12 | encoding, | |
13 | error, |
|
13 | error, | |
14 | mdiff, |
|
14 | mdiff, | |
15 | policy, |
|
15 | policy, | |
16 | pycompat, |
|
16 | pycompat, | |
17 | scmutil, |
|
17 | scmutil, | |
18 | util, |
|
18 | util, | |
19 | vfs, |
|
19 | vfs, | |
20 | ) |
|
20 | ) | |
21 | from mercurial.utils import procutil |
|
21 | from mercurial.utils import procutil | |
22 | from . import ( |
|
22 | from . import ( | |
23 | constants, |
|
23 | constants, | |
24 | contentstore, |
|
24 | contentstore, | |
25 | datapack, |
|
25 | datapack, | |
26 | extutil, |
|
26 | extutil, | |
27 | historypack, |
|
27 | historypack, | |
28 | metadatastore, |
|
28 | metadatastore, | |
29 | shallowutil, |
|
29 | shallowutil, | |
30 | ) |
|
30 | ) | |
31 |
|
31 | |||
32 | osutil = policy.importmod(r'osutil') |
|
32 | osutil = policy.importmod(r'osutil') | |
33 |
|
33 | |||
34 | class RepackAlreadyRunning(error.Abort): |
|
34 | class RepackAlreadyRunning(error.Abort): | |
35 | pass |
|
35 | pass | |
36 |
|
36 | |||
37 | if util.safehasattr(util, '_hgexecutable'): |
|
37 | if util.safehasattr(util, '_hgexecutable'): | |
38 | # Before 5be286db |
|
38 | # Before 5be286db | |
39 | _hgexecutable = util.hgexecutable |
|
39 | _hgexecutable = util.hgexecutable | |
40 | else: |
|
40 | else: | |
41 | from mercurial.utils import procutil |
|
41 | from mercurial.utils import procutil | |
42 | _hgexecutable = procutil.hgexecutable |
|
42 | _hgexecutable = procutil.hgexecutable | |
43 |
|
43 | |||
44 | def backgroundrepack(repo, incremental=True, packsonly=False): |
|
44 | def backgroundrepack(repo, incremental=True, packsonly=False): | |
45 | cmd = [_hgexecutable(), '-R', repo.origroot, 'repack'] |
|
45 | cmd = [_hgexecutable(), '-R', repo.origroot, 'repack'] | |
46 | msg = _("(running background repack)\n") |
|
46 | msg = _("(running background repack)\n") | |
47 | if incremental: |
|
47 | if incremental: | |
48 | cmd.append('--incremental') |
|
48 | cmd.append('--incremental') | |
49 | msg = _("(running background incremental repack)\n") |
|
49 | msg = _("(running background incremental repack)\n") | |
50 | if packsonly: |
|
50 | if packsonly: | |
51 | cmd.append('--packsonly') |
|
51 | cmd.append('--packsonly') | |
52 | repo.ui.warn(msg) |
|
52 | repo.ui.warn(msg) | |
53 | procutil.runbgcommand(cmd, encoding.environ) |
|
53 | procutil.runbgcommand(cmd, encoding.environ) | |
54 |
|
54 | |||
55 | def fullrepack(repo, options=None): |
|
55 | def fullrepack(repo, options=None): | |
56 | """If ``packsonly`` is True, stores creating only loose objects are skipped. |
|
56 | """If ``packsonly`` is True, stores creating only loose objects are skipped. | |
57 | """ |
|
57 | """ | |
58 | if util.safehasattr(repo, 'shareddatastores'): |
|
58 | if util.safehasattr(repo, 'shareddatastores'): | |
59 | datasource = contentstore.unioncontentstore( |
|
59 | datasource = contentstore.unioncontentstore( | |
60 | *repo.shareddatastores) |
|
60 | *repo.shareddatastores) | |
61 | historysource = metadatastore.unionmetadatastore( |
|
61 | historysource = metadatastore.unionmetadatastore( | |
62 | *repo.sharedhistorystores, |
|
62 | *repo.sharedhistorystores, | |
63 | allowincomplete=True) |
|
63 | allowincomplete=True) | |
64 |
|
64 | |||
65 | packpath = shallowutil.getcachepackpath( |
|
65 | packpath = shallowutil.getcachepackpath( | |
66 | repo, |
|
66 | repo, | |
67 | constants.FILEPACK_CATEGORY) |
|
67 | constants.FILEPACK_CATEGORY) | |
68 | _runrepack(repo, datasource, historysource, packpath, |
|
68 | _runrepack(repo, datasource, historysource, packpath, | |
69 | constants.FILEPACK_CATEGORY, options=options) |
|
69 | constants.FILEPACK_CATEGORY, options=options) | |
70 |
|
70 | |||
71 | if util.safehasattr(repo.manifestlog, 'datastore'): |
|
71 | if util.safehasattr(repo.manifestlog, 'datastore'): | |
72 | localdata, shareddata = _getmanifeststores(repo) |
|
72 | localdata, shareddata = _getmanifeststores(repo) | |
73 | lpackpath, ldstores, lhstores = localdata |
|
73 | lpackpath, ldstores, lhstores = localdata | |
74 | spackpath, sdstores, shstores = shareddata |
|
74 | spackpath, sdstores, shstores = shareddata | |
75 |
|
75 | |||
76 | # Repack the shared manifest store |
|
76 | # Repack the shared manifest store | |
77 | datasource = contentstore.unioncontentstore(*sdstores) |
|
77 | datasource = contentstore.unioncontentstore(*sdstores) | |
78 | historysource = metadatastore.unionmetadatastore( |
|
78 | historysource = metadatastore.unionmetadatastore( | |
79 | *shstores, |
|
79 | *shstores, | |
80 | allowincomplete=True) |
|
80 | allowincomplete=True) | |
81 | _runrepack(repo, datasource, historysource, spackpath, |
|
81 | _runrepack(repo, datasource, historysource, spackpath, | |
82 | constants.TREEPACK_CATEGORY, options=options) |
|
82 | constants.TREEPACK_CATEGORY, options=options) | |
83 |
|
83 | |||
84 | # Repack the local manifest store |
|
84 | # Repack the local manifest store | |
85 | datasource = contentstore.unioncontentstore( |
|
85 | datasource = contentstore.unioncontentstore( | |
86 | *ldstores, |
|
86 | *ldstores, | |
87 | allowincomplete=True) |
|
87 | allowincomplete=True) | |
88 | historysource = metadatastore.unionmetadatastore( |
|
88 | historysource = metadatastore.unionmetadatastore( | |
89 | *lhstores, |
|
89 | *lhstores, | |
90 | allowincomplete=True) |
|
90 | allowincomplete=True) | |
91 | _runrepack(repo, datasource, historysource, lpackpath, |
|
91 | _runrepack(repo, datasource, historysource, lpackpath, | |
92 | constants.TREEPACK_CATEGORY, options=options) |
|
92 | constants.TREEPACK_CATEGORY, options=options) | |
93 |
|
93 | |||
94 | def incrementalrepack(repo, options=None): |
|
94 | def incrementalrepack(repo, options=None): | |
95 | """This repacks the repo by looking at the distribution of pack files in the |
|
95 | """This repacks the repo by looking at the distribution of pack files in the | |
96 | repo and performing the most minimal repack to keep the repo in good shape. |
|
96 | repo and performing the most minimal repack to keep the repo in good shape. | |
97 | """ |
|
97 | """ | |
98 | if util.safehasattr(repo, 'shareddatastores'): |
|
98 | if util.safehasattr(repo, 'shareddatastores'): | |
99 | packpath = shallowutil.getcachepackpath( |
|
99 | packpath = shallowutil.getcachepackpath( | |
100 | repo, |
|
100 | repo, | |
101 | constants.FILEPACK_CATEGORY) |
|
101 | constants.FILEPACK_CATEGORY) | |
102 | _incrementalrepack(repo, |
|
102 | _incrementalrepack(repo, | |
103 | repo.shareddatastores, |
|
103 | repo.shareddatastores, | |
104 | repo.sharedhistorystores, |
|
104 | repo.sharedhistorystores, | |
105 | packpath, |
|
105 | packpath, | |
106 | constants.FILEPACK_CATEGORY, |
|
106 | constants.FILEPACK_CATEGORY, | |
107 | options=options) |
|
107 | options=options) | |
108 |
|
108 | |||
109 | if util.safehasattr(repo.manifestlog, 'datastore'): |
|
109 | if util.safehasattr(repo.manifestlog, 'datastore'): | |
110 | localdata, shareddata = _getmanifeststores(repo) |
|
110 | localdata, shareddata = _getmanifeststores(repo) | |
111 | lpackpath, ldstores, lhstores = localdata |
|
111 | lpackpath, ldstores, lhstores = localdata | |
112 | spackpath, sdstores, shstores = shareddata |
|
112 | spackpath, sdstores, shstores = shareddata | |
113 |
|
113 | |||
114 | # Repack the shared manifest store |
|
114 | # Repack the shared manifest store | |
115 | _incrementalrepack(repo, |
|
115 | _incrementalrepack(repo, | |
116 | sdstores, |
|
116 | sdstores, | |
117 | shstores, |
|
117 | shstores, | |
118 | spackpath, |
|
118 | spackpath, | |
119 | constants.TREEPACK_CATEGORY, |
|
119 | constants.TREEPACK_CATEGORY, | |
120 | options=options) |
|
120 | options=options) | |
121 |
|
121 | |||
122 | # Repack the local manifest store |
|
122 | # Repack the local manifest store | |
123 | _incrementalrepack(repo, |
|
123 | _incrementalrepack(repo, | |
124 | ldstores, |
|
124 | ldstores, | |
125 | lhstores, |
|
125 | lhstores, | |
126 | lpackpath, |
|
126 | lpackpath, | |
127 | constants.TREEPACK_CATEGORY, |
|
127 | constants.TREEPACK_CATEGORY, | |
128 | allowincompletedata=True, |
|
128 | allowincompletedata=True, | |
129 | options=options) |
|
129 | options=options) | |
130 |
|
130 | |||
131 | def _getmanifeststores(repo): |
|
131 | def _getmanifeststores(repo): | |
132 | shareddatastores = repo.manifestlog.shareddatastores |
|
132 | shareddatastores = repo.manifestlog.shareddatastores | |
133 | localdatastores = repo.manifestlog.localdatastores |
|
133 | localdatastores = repo.manifestlog.localdatastores | |
134 | sharedhistorystores = repo.manifestlog.sharedhistorystores |
|
134 | sharedhistorystores = repo.manifestlog.sharedhistorystores | |
135 | localhistorystores = repo.manifestlog.localhistorystores |
|
135 | localhistorystores = repo.manifestlog.localhistorystores | |
136 |
|
136 | |||
137 | sharedpackpath = shallowutil.getcachepackpath(repo, |
|
137 | sharedpackpath = shallowutil.getcachepackpath(repo, | |
138 | constants.TREEPACK_CATEGORY) |
|
138 | constants.TREEPACK_CATEGORY) | |
139 | localpackpath = shallowutil.getlocalpackpath(repo.svfs.vfs.base, |
|
139 | localpackpath = shallowutil.getlocalpackpath(repo.svfs.vfs.base, | |
140 | constants.TREEPACK_CATEGORY) |
|
140 | constants.TREEPACK_CATEGORY) | |
141 |
|
141 | |||
142 | return ((localpackpath, localdatastores, localhistorystores), |
|
142 | return ((localpackpath, localdatastores, localhistorystores), | |
143 | (sharedpackpath, shareddatastores, sharedhistorystores)) |
|
143 | (sharedpackpath, shareddatastores, sharedhistorystores)) | |
144 |
|
144 | |||
145 | def _topacks(packpath, files, constructor): |
|
145 | def _topacks(packpath, files, constructor): | |
146 | paths = list(os.path.join(packpath, p) for p in files) |
|
146 | paths = list(os.path.join(packpath, p) for p in files) | |
147 | packs = list(constructor(p) for p in paths) |
|
147 | packs = list(constructor(p) for p in paths) | |
148 | return packs |
|
148 | return packs | |
149 |
|
149 | |||
150 | def _deletebigpacks(repo, folder, files): |
|
150 | def _deletebigpacks(repo, folder, files): | |
151 | """Deletes packfiles that are bigger than ``packs.maxpacksize``. |
|
151 | """Deletes packfiles that are bigger than ``packs.maxpacksize``. | |
152 |
|
152 | |||
153 | Returns ``files` with the removed files omitted.""" |
|
153 | Returns ``files` with the removed files omitted.""" | |
154 | maxsize = repo.ui.configbytes("packs", "maxpacksize") |
|
154 | maxsize = repo.ui.configbytes("packs", "maxpacksize") | |
155 | if maxsize <= 0: |
|
155 | if maxsize <= 0: | |
156 | return files |
|
156 | return files | |
157 |
|
157 | |||
158 | # This only considers datapacks today, but we could broaden it to include |
|
158 | # This only considers datapacks today, but we could broaden it to include | |
159 | # historypacks. |
|
159 | # historypacks. | |
160 | VALIDEXTS = [".datapack", ".dataidx"] |
|
160 | VALIDEXTS = [".datapack", ".dataidx"] | |
161 |
|
161 | |||
162 | # Either an oversize index or datapack will trigger cleanup of the whole |
|
162 | # Either an oversize index or datapack will trigger cleanup of the whole | |
163 | # pack: |
|
163 | # pack: | |
164 | oversized = set([os.path.splitext(path)[0] for path, ftype, stat in files |
|
164 | oversized = set([os.path.splitext(path)[0] for path, ftype, stat in files | |
165 | if (stat.st_size > maxsize and (os.path.splitext(path)[1] |
|
165 | if (stat.st_size > maxsize and (os.path.splitext(path)[1] | |
166 | in VALIDEXTS))]) |
|
166 | in VALIDEXTS))]) | |
167 |
|
167 | |||
168 | for rootfname in oversized: |
|
168 | for rootfname in oversized: | |
169 | rootpath = os.path.join(folder, rootfname) |
|
169 | rootpath = os.path.join(folder, rootfname) | |
170 | for ext in VALIDEXTS: |
|
170 | for ext in VALIDEXTS: | |
171 | path = rootpath + ext |
|
171 | path = rootpath + ext | |
172 | repo.ui.debug('removing oversize packfile %s (%s)\n' % |
|
172 | repo.ui.debug('removing oversize packfile %s (%s)\n' % | |
173 | (path, util.bytecount(os.stat(path).st_size))) |
|
173 | (path, util.bytecount(os.stat(path).st_size))) | |
174 | os.unlink(path) |
|
174 | os.unlink(path) | |
175 | return [row for row in files if os.path.basename(row[0]) not in oversized] |
|
175 | return [row for row in files if os.path.basename(row[0]) not in oversized] | |
176 |
|
176 | |||
177 | def _incrementalrepack(repo, datastore, historystore, packpath, category, |
|
177 | def _incrementalrepack(repo, datastore, historystore, packpath, category, | |
178 | allowincompletedata=False, options=None): |
|
178 | allowincompletedata=False, options=None): | |
179 | shallowutil.mkstickygroupdir(repo.ui, packpath) |
|
179 | shallowutil.mkstickygroupdir(repo.ui, packpath) | |
180 |
|
180 | |||
181 | files = osutil.listdir(packpath, stat=True) |
|
181 | files = osutil.listdir(packpath, stat=True) | |
182 | files = _deletebigpacks(repo, packpath, files) |
|
182 | files = _deletebigpacks(repo, packpath, files) | |
183 | datapacks = _topacks(packpath, |
|
183 | datapacks = _topacks(packpath, | |
184 | _computeincrementaldatapack(repo.ui, files), |
|
184 | _computeincrementaldatapack(repo.ui, files), | |
185 | datapack.datapack) |
|
185 | datapack.datapack) | |
186 | datapacks.extend(s for s in datastore |
|
186 | datapacks.extend(s for s in datastore | |
187 | if not isinstance(s, datapack.datapackstore)) |
|
187 | if not isinstance(s, datapack.datapackstore)) | |
188 |
|
188 | |||
189 | historypacks = _topacks(packpath, |
|
189 | historypacks = _topacks(packpath, | |
190 | _computeincrementalhistorypack(repo.ui, files), |
|
190 | _computeincrementalhistorypack(repo.ui, files), | |
191 | historypack.historypack) |
|
191 | historypack.historypack) | |
192 | historypacks.extend(s for s in historystore |
|
192 | historypacks.extend(s for s in historystore | |
193 | if not isinstance(s, historypack.historypackstore)) |
|
193 | if not isinstance(s, historypack.historypackstore)) | |
194 |
|
194 | |||
195 | # ``allhistory{files,packs}`` contains all known history packs, even ones we |
|
195 | # ``allhistory{files,packs}`` contains all known history packs, even ones we | |
196 | # don't plan to repack. They are used during the datapack repack to ensure |
|
196 | # don't plan to repack. They are used during the datapack repack to ensure | |
197 | # good ordering of nodes. |
|
197 | # good ordering of nodes. | |
198 | allhistoryfiles = _allpackfileswithsuffix(files, historypack.PACKSUFFIX, |
|
198 | allhistoryfiles = _allpackfileswithsuffix(files, historypack.PACKSUFFIX, | |
199 | historypack.INDEXSUFFIX) |
|
199 | historypack.INDEXSUFFIX) | |
200 | allhistorypacks = _topacks(packpath, |
|
200 | allhistorypacks = _topacks(packpath, | |
201 | (f for f, mode, stat in allhistoryfiles), |
|
201 | (f for f, mode, stat in allhistoryfiles), | |
202 | historypack.historypack) |
|
202 | historypack.historypack) | |
203 | allhistorypacks.extend(s for s in historystore |
|
203 | allhistorypacks.extend(s for s in historystore | |
204 | if not isinstance(s, historypack.historypackstore)) |
|
204 | if not isinstance(s, historypack.historypackstore)) | |
205 | _runrepack(repo, |
|
205 | _runrepack(repo, | |
206 | contentstore.unioncontentstore( |
|
206 | contentstore.unioncontentstore( | |
207 | *datapacks, |
|
207 | *datapacks, | |
208 | allowincomplete=allowincompletedata), |
|
208 | allowincomplete=allowincompletedata), | |
209 | metadatastore.unionmetadatastore( |
|
209 | metadatastore.unionmetadatastore( | |
210 | *historypacks, |
|
210 | *historypacks, | |
211 | allowincomplete=True), |
|
211 | allowincomplete=True), | |
212 | packpath, category, |
|
212 | packpath, category, | |
213 | fullhistory=metadatastore.unionmetadatastore( |
|
213 | fullhistory=metadatastore.unionmetadatastore( | |
214 | *allhistorypacks, |
|
214 | *allhistorypacks, | |
215 | allowincomplete=True), |
|
215 | allowincomplete=True), | |
216 | options=options) |
|
216 | options=options) | |
217 |
|
217 | |||
218 | def _computeincrementaldatapack(ui, files): |
|
218 | def _computeincrementaldatapack(ui, files): | |
219 | opts = { |
|
219 | opts = { | |
220 | 'gencountlimit' : ui.configint( |
|
220 | 'gencountlimit' : ui.configint( | |
221 | 'remotefilelog', 'data.gencountlimit'), |
|
221 | 'remotefilelog', 'data.gencountlimit'), | |
222 | 'generations' : ui.configlist( |
|
222 | 'generations' : ui.configlist( | |
223 | 'remotefilelog', 'data.generations'), |
|
223 | 'remotefilelog', 'data.generations'), | |
224 | 'maxrepackpacks' : ui.configint( |
|
224 | 'maxrepackpacks' : ui.configint( | |
225 | 'remotefilelog', 'data.maxrepackpacks'), |
|
225 | 'remotefilelog', 'data.maxrepackpacks'), | |
226 | 'repackmaxpacksize' : ui.configbytes( |
|
226 | 'repackmaxpacksize' : ui.configbytes( | |
227 | 'remotefilelog', 'data.repackmaxpacksize'), |
|
227 | 'remotefilelog', 'data.repackmaxpacksize'), | |
228 | 'repacksizelimit' : ui.configbytes( |
|
228 | 'repacksizelimit' : ui.configbytes( | |
229 | 'remotefilelog', 'data.repacksizelimit'), |
|
229 | 'remotefilelog', 'data.repacksizelimit'), | |
230 | } |
|
230 | } | |
231 |
|
231 | |||
232 | packfiles = _allpackfileswithsuffix( |
|
232 | packfiles = _allpackfileswithsuffix( | |
233 | files, datapack.PACKSUFFIX, datapack.INDEXSUFFIX) |
|
233 | files, datapack.PACKSUFFIX, datapack.INDEXSUFFIX) | |
234 | return _computeincrementalpack(packfiles, opts) |
|
234 | return _computeincrementalpack(packfiles, opts) | |
235 |
|
235 | |||
236 | def _computeincrementalhistorypack(ui, files): |
|
236 | def _computeincrementalhistorypack(ui, files): | |
237 | opts = { |
|
237 | opts = { | |
238 | 'gencountlimit' : ui.configint( |
|
238 | 'gencountlimit' : ui.configint( | |
239 | 'remotefilelog', 'history.gencountlimit'), |
|
239 | 'remotefilelog', 'history.gencountlimit'), | |
240 | 'generations' : ui.configlist( |
|
240 | 'generations' : ui.configlist( | |
241 | 'remotefilelog', 'history.generations', ['100MB']), |
|
241 | 'remotefilelog', 'history.generations', ['100MB']), | |
242 | 'maxrepackpacks' : ui.configint( |
|
242 | 'maxrepackpacks' : ui.configint( | |
243 | 'remotefilelog', 'history.maxrepackpacks'), |
|
243 | 'remotefilelog', 'history.maxrepackpacks'), | |
244 | 'repackmaxpacksize' : ui.configbytes( |
|
244 | 'repackmaxpacksize' : ui.configbytes( | |
245 | 'remotefilelog', 'history.repackmaxpacksize', '400MB'), |
|
245 | 'remotefilelog', 'history.repackmaxpacksize', '400MB'), | |
246 | 'repacksizelimit' : ui.configbytes( |
|
246 | 'repacksizelimit' : ui.configbytes( | |
247 | 'remotefilelog', 'history.repacksizelimit'), |
|
247 | 'remotefilelog', 'history.repacksizelimit'), | |
248 | } |
|
248 | } | |
249 |
|
249 | |||
250 | packfiles = _allpackfileswithsuffix( |
|
250 | packfiles = _allpackfileswithsuffix( | |
251 | files, historypack.PACKSUFFIX, historypack.INDEXSUFFIX) |
|
251 | files, historypack.PACKSUFFIX, historypack.INDEXSUFFIX) | |
252 | return _computeincrementalpack(packfiles, opts) |
|
252 | return _computeincrementalpack(packfiles, opts) | |
253 |
|
253 | |||
254 | def _allpackfileswithsuffix(files, packsuffix, indexsuffix): |
|
254 | def _allpackfileswithsuffix(files, packsuffix, indexsuffix): | |
255 | result = [] |
|
255 | result = [] | |
256 | fileset = set(fn for fn, mode, stat in files) |
|
256 | fileset = set(fn for fn, mode, stat in files) | |
257 | for filename, mode, stat in files: |
|
257 | for filename, mode, stat in files: | |
258 | if not filename.endswith(packsuffix): |
|
258 | if not filename.endswith(packsuffix): | |
259 | continue |
|
259 | continue | |
260 |
|
260 | |||
261 | prefix = filename[:-len(packsuffix)] |
|
261 | prefix = filename[:-len(packsuffix)] | |
262 |
|
262 | |||
263 | # Don't process a pack if it doesn't have an index. |
|
263 | # Don't process a pack if it doesn't have an index. | |
264 | if (prefix + indexsuffix) not in fileset: |
|
264 | if (prefix + indexsuffix) not in fileset: | |
265 | continue |
|
265 | continue | |
266 | result.append((prefix, mode, stat)) |
|
266 | result.append((prefix, mode, stat)) | |
267 |
|
267 | |||
268 | return result |
|
268 | return result | |
269 |
|
269 | |||
270 | def _computeincrementalpack(files, opts): |
|
270 | def _computeincrementalpack(files, opts): | |
271 | """Given a set of pack files along with the configuration options, this |
|
271 | """Given a set of pack files along with the configuration options, this | |
272 | function computes the list of files that should be packed as part of an |
|
272 | function computes the list of files that should be packed as part of an | |
273 | incremental repack. |
|
273 | incremental repack. | |
274 |
|
274 | |||
275 | It tries to strike a balance between keeping incremental repacks cheap (i.e. |
|
275 | It tries to strike a balance between keeping incremental repacks cheap (i.e. | |
276 | packing small things when possible, and rolling the packs up to the big ones |
|
276 | packing small things when possible, and rolling the packs up to the big ones | |
277 | over time). |
|
277 | over time). | |
278 | """ |
|
278 | """ | |
279 |
|
279 | |||
280 | limits = list(sorted((util.sizetoint(s) for s in opts['generations']), |
|
280 | limits = list(sorted((util.sizetoint(s) for s in opts['generations']), | |
281 | reverse=True)) |
|
281 | reverse=True)) | |
282 | limits.append(0) |
|
282 | limits.append(0) | |
283 |
|
283 | |||
284 | # Group the packs by generation (i.e. by size) |
|
284 | # Group the packs by generation (i.e. by size) | |
285 | generations = [] |
|
285 | generations = [] | |
286 | for i in pycompat.xrange(len(limits)): |
|
286 | for i in pycompat.xrange(len(limits)): | |
287 | generations.append([]) |
|
287 | generations.append([]) | |
288 |
|
288 | |||
289 | sizes = {} |
|
289 | sizes = {} | |
290 | for prefix, mode, stat in files: |
|
290 | for prefix, mode, stat in files: | |
291 | size = stat.st_size |
|
291 | size = stat.st_size | |
292 | if size > opts['repackmaxpacksize']: |
|
292 | if size > opts['repackmaxpacksize']: | |
293 | continue |
|
293 | continue | |
294 |
|
294 | |||
295 | sizes[prefix] = size |
|
295 | sizes[prefix] = size | |
296 | for i, limit in enumerate(limits): |
|
296 | for i, limit in enumerate(limits): | |
297 | if size > limit: |
|
297 | if size > limit: | |
298 | generations[i].append(prefix) |
|
298 | generations[i].append(prefix) | |
299 | break |
|
299 | break | |
300 |
|
300 | |||
301 | # Steps for picking what packs to repack: |
|
301 | # Steps for picking what packs to repack: | |
302 | # 1. Pick the largest generation with > gencountlimit pack files. |
|
302 | # 1. Pick the largest generation with > gencountlimit pack files. | |
303 | # 2. Take the smallest three packs. |
|
303 | # 2. Take the smallest three packs. | |
304 | # 3. While total-size-of-packs < repacksizelimit: add another pack |
|
304 | # 3. While total-size-of-packs < repacksizelimit: add another pack | |
305 |
|
305 | |||
306 | # Find the largest generation with more than gencountlimit packs |
|
306 | # Find the largest generation with more than gencountlimit packs | |
307 | genpacks = [] |
|
307 | genpacks = [] | |
308 | for i, limit in enumerate(limits): |
|
308 | for i, limit in enumerate(limits): | |
309 | if len(generations[i]) > opts['gencountlimit']: |
|
309 | if len(generations[i]) > opts['gencountlimit']: | |
310 | # Sort to be smallest last, for easy popping later |
|
310 | # Sort to be smallest last, for easy popping later | |
311 | genpacks.extend(sorted(generations[i], reverse=True, |
|
311 | genpacks.extend(sorted(generations[i], reverse=True, | |
312 | key=lambda x: sizes[x])) |
|
312 | key=lambda x: sizes[x])) | |
313 | break |
|
313 | break | |
314 |
|
314 | |||
315 | # Take as many packs from the generation as we can |
|
315 | # Take as many packs from the generation as we can | |
316 | chosenpacks = genpacks[-3:] |
|
316 | chosenpacks = genpacks[-3:] | |
317 | genpacks = genpacks[:-3] |
|
317 | genpacks = genpacks[:-3] | |
318 | repacksize = sum(sizes[n] for n in chosenpacks) |
|
318 | repacksize = sum(sizes[n] for n in chosenpacks) | |
319 | while (repacksize < opts['repacksizelimit'] and genpacks and |
|
319 | while (repacksize < opts['repacksizelimit'] and genpacks and | |
320 | len(chosenpacks) < opts['maxrepackpacks']): |
|
320 | len(chosenpacks) < opts['maxrepackpacks']): | |
321 | chosenpacks.append(genpacks.pop()) |
|
321 | chosenpacks.append(genpacks.pop()) | |
322 | repacksize += sizes[chosenpacks[-1]] |
|
322 | repacksize += sizes[chosenpacks[-1]] | |
323 |
|
323 | |||
324 | return chosenpacks |
|
324 | return chosenpacks | |
325 |
|
325 | |||
326 | def _runrepack(repo, data, history, packpath, category, fullhistory=None, |
|
326 | def _runrepack(repo, data, history, packpath, category, fullhistory=None, | |
327 | options=None): |
|
327 | options=None): | |
328 | shallowutil.mkstickygroupdir(repo.ui, packpath) |
|
328 | shallowutil.mkstickygroupdir(repo.ui, packpath) | |
329 |
|
329 | |||
330 | def isold(repo, filename, node): |
|
330 | def isold(repo, filename, node): | |
331 | """Check if the file node is older than a limit. |
|
331 | """Check if the file node is older than a limit. | |
332 | Unless a limit is specified in the config the default limit is taken. |
|
332 | Unless a limit is specified in the config the default limit is taken. | |
333 | """ |
|
333 | """ | |
334 | filectx = repo.filectx(filename, fileid=node) |
|
334 | filectx = repo.filectx(filename, fileid=node) | |
335 | filetime = repo[filectx.linkrev()].date() |
|
335 | filetime = repo[filectx.linkrev()].date() | |
336 |
|
336 | |||
337 | ttl = repo.ui.configint('remotefilelog', 'nodettl') |
|
337 | ttl = repo.ui.configint('remotefilelog', 'nodettl') | |
338 |
|
338 | |||
339 | limit = time.time() - ttl |
|
339 | limit = time.time() - ttl | |
340 | return filetime[0] < limit |
|
340 | return filetime[0] < limit | |
341 |
|
341 | |||
342 | garbagecollect = repo.ui.configbool('remotefilelog', 'gcrepack') |
|
342 | garbagecollect = repo.ui.configbool('remotefilelog', 'gcrepack') | |
343 | if not fullhistory: |
|
343 | if not fullhistory: | |
344 | fullhistory = history |
|
344 | fullhistory = history | |
345 | packer = repacker(repo, data, history, fullhistory, category, |
|
345 | packer = repacker(repo, data, history, fullhistory, category, | |
346 | gc=garbagecollect, isold=isold, options=options) |
|
346 | gc=garbagecollect, isold=isold, options=options) | |
347 |
|
347 | |||
348 | # internal config: remotefilelog.datapackversion |
|
348 | with datapack.mutabledatapack(repo.ui, packpath, version=2) as dpack: | |
349 | dv = repo.ui.configint('remotefilelog', 'datapackversion', 0) |
|
|||
350 |
|
||||
351 | with datapack.mutabledatapack(repo.ui, packpath, version=dv) as dpack: |
|
|||
352 | with historypack.mutablehistorypack(repo.ui, packpath) as hpack: |
|
349 | with historypack.mutablehistorypack(repo.ui, packpath) as hpack: | |
353 | try: |
|
350 | try: | |
354 | packer.run(dpack, hpack) |
|
351 | packer.run(dpack, hpack) | |
355 | except error.LockHeld: |
|
352 | except error.LockHeld: | |
356 | raise RepackAlreadyRunning(_("skipping repack - another repack " |
|
353 | raise RepackAlreadyRunning(_("skipping repack - another repack " | |
357 | "is already running")) |
|
354 | "is already running")) | |
358 |
|
355 | |||
359 | def keepset(repo, keyfn, lastkeepkeys=None): |
|
356 | def keepset(repo, keyfn, lastkeepkeys=None): | |
360 | """Computes a keepset which is not garbage collected. |
|
357 | """Computes a keepset which is not garbage collected. | |
361 | 'keyfn' is a function that maps filename, node to a unique key. |
|
358 | 'keyfn' is a function that maps filename, node to a unique key. | |
362 | 'lastkeepkeys' is an optional argument and if provided the keepset |
|
359 | 'lastkeepkeys' is an optional argument and if provided the keepset | |
363 | function updates lastkeepkeys with more keys and returns the result. |
|
360 | function updates lastkeepkeys with more keys and returns the result. | |
364 | """ |
|
361 | """ | |
365 | if not lastkeepkeys: |
|
362 | if not lastkeepkeys: | |
366 | keepkeys = set() |
|
363 | keepkeys = set() | |
367 | else: |
|
364 | else: | |
368 | keepkeys = lastkeepkeys |
|
365 | keepkeys = lastkeepkeys | |
369 |
|
366 | |||
370 | # We want to keep: |
|
367 | # We want to keep: | |
371 | # 1. Working copy parent |
|
368 | # 1. Working copy parent | |
372 | # 2. Draft commits |
|
369 | # 2. Draft commits | |
373 | # 3. Parents of draft commits |
|
370 | # 3. Parents of draft commits | |
374 | # 4. Pullprefetch and bgprefetchrevs revsets if specified |
|
371 | # 4. Pullprefetch and bgprefetchrevs revsets if specified | |
375 | revs = ['.', 'draft()', 'parents(draft())'] |
|
372 | revs = ['.', 'draft()', 'parents(draft())'] | |
376 | prefetchrevs = repo.ui.config('remotefilelog', 'pullprefetch', None) |
|
373 | prefetchrevs = repo.ui.config('remotefilelog', 'pullprefetch', None) | |
377 | if prefetchrevs: |
|
374 | if prefetchrevs: | |
378 | revs.append('(%s)' % prefetchrevs) |
|
375 | revs.append('(%s)' % prefetchrevs) | |
379 | prefetchrevs = repo.ui.config('remotefilelog', 'bgprefetchrevs', None) |
|
376 | prefetchrevs = repo.ui.config('remotefilelog', 'bgprefetchrevs', None) | |
380 | if prefetchrevs: |
|
377 | if prefetchrevs: | |
381 | revs.append('(%s)' % prefetchrevs) |
|
378 | revs.append('(%s)' % prefetchrevs) | |
382 | revs = '+'.join(revs) |
|
379 | revs = '+'.join(revs) | |
383 |
|
380 | |||
384 | revs = ['sort((%s), "topo")' % revs] |
|
381 | revs = ['sort((%s), "topo")' % revs] | |
385 | keep = scmutil.revrange(repo, revs) |
|
382 | keep = scmutil.revrange(repo, revs) | |
386 |
|
383 | |||
387 | processed = set() |
|
384 | processed = set() | |
388 | lastmanifest = None |
|
385 | lastmanifest = None | |
389 |
|
386 | |||
390 | # process the commits in toposorted order starting from the oldest |
|
387 | # process the commits in toposorted order starting from the oldest | |
391 | for r in reversed(keep._list): |
|
388 | for r in reversed(keep._list): | |
392 | if repo[r].p1().rev() in processed: |
|
389 | if repo[r].p1().rev() in processed: | |
393 | # if the direct parent has already been processed |
|
390 | # if the direct parent has already been processed | |
394 | # then we only need to process the delta |
|
391 | # then we only need to process the delta | |
395 | m = repo[r].manifestctx().readdelta() |
|
392 | m = repo[r].manifestctx().readdelta() | |
396 | else: |
|
393 | else: | |
397 | # otherwise take the manifest and diff it |
|
394 | # otherwise take the manifest and diff it | |
398 | # with the previous manifest if one exists |
|
395 | # with the previous manifest if one exists | |
399 | if lastmanifest: |
|
396 | if lastmanifest: | |
400 | m = repo[r].manifest().diff(lastmanifest) |
|
397 | m = repo[r].manifest().diff(lastmanifest) | |
401 | else: |
|
398 | else: | |
402 | m = repo[r].manifest() |
|
399 | m = repo[r].manifest() | |
403 | lastmanifest = repo[r].manifest() |
|
400 | lastmanifest = repo[r].manifest() | |
404 | processed.add(r) |
|
401 | processed.add(r) | |
405 |
|
402 | |||
406 | # populate keepkeys with keys from the current manifest |
|
403 | # populate keepkeys with keys from the current manifest | |
407 | if type(m) is dict: |
|
404 | if type(m) is dict: | |
408 | # m is a result of diff of two manifests and is a dictionary that |
|
405 | # m is a result of diff of two manifests and is a dictionary that | |
409 | # maps filename to ((newnode, newflag), (oldnode, oldflag)) tuple |
|
406 | # maps filename to ((newnode, newflag), (oldnode, oldflag)) tuple | |
410 | for filename, diff in m.iteritems(): |
|
407 | for filename, diff in m.iteritems(): | |
411 | if diff[0][0] is not None: |
|
408 | if diff[0][0] is not None: | |
412 | keepkeys.add(keyfn(filename, diff[0][0])) |
|
409 | keepkeys.add(keyfn(filename, diff[0][0])) | |
413 | else: |
|
410 | else: | |
414 | # m is a manifest object |
|
411 | # m is a manifest object | |
415 | for filename, filenode in m.iteritems(): |
|
412 | for filename, filenode in m.iteritems(): | |
416 | keepkeys.add(keyfn(filename, filenode)) |
|
413 | keepkeys.add(keyfn(filename, filenode)) | |
417 |
|
414 | |||
418 | return keepkeys |
|
415 | return keepkeys | |
419 |
|
416 | |||
420 | class repacker(object): |
|
417 | class repacker(object): | |
421 | """Class for orchestrating the repack of data and history information into a |
|
418 | """Class for orchestrating the repack of data and history information into a | |
422 | new format. |
|
419 | new format. | |
423 | """ |
|
420 | """ | |
424 | def __init__(self, repo, data, history, fullhistory, category, gc=False, |
|
421 | def __init__(self, repo, data, history, fullhistory, category, gc=False, | |
425 | isold=None, options=None): |
|
422 | isold=None, options=None): | |
426 | self.repo = repo |
|
423 | self.repo = repo | |
427 | self.data = data |
|
424 | self.data = data | |
428 | self.history = history |
|
425 | self.history = history | |
429 | self.fullhistory = fullhistory |
|
426 | self.fullhistory = fullhistory | |
430 | self.unit = constants.getunits(category) |
|
427 | self.unit = constants.getunits(category) | |
431 | self.garbagecollect = gc |
|
428 | self.garbagecollect = gc | |
432 | self.options = options |
|
429 | self.options = options | |
433 | if self.garbagecollect: |
|
430 | if self.garbagecollect: | |
434 | if not isold: |
|
431 | if not isold: | |
435 | raise ValueError("Function 'isold' is not properly specified") |
|
432 | raise ValueError("Function 'isold' is not properly specified") | |
436 | # use (filename, node) tuple as a keepset key |
|
433 | # use (filename, node) tuple as a keepset key | |
437 | self.keepkeys = keepset(repo, lambda f, n : (f, n)) |
|
434 | self.keepkeys = keepset(repo, lambda f, n : (f, n)) | |
438 | self.isold = isold |
|
435 | self.isold = isold | |
439 |
|
436 | |||
440 | def run(self, targetdata, targethistory): |
|
437 | def run(self, targetdata, targethistory): | |
441 | ledger = repackledger() |
|
438 | ledger = repackledger() | |
442 |
|
439 | |||
443 | with extutil.flock(repacklockvfs(self.repo).join("repacklock"), |
|
440 | with extutil.flock(repacklockvfs(self.repo).join("repacklock"), | |
444 | _('repacking %s') % self.repo.origroot, timeout=0): |
|
441 | _('repacking %s') % self.repo.origroot, timeout=0): | |
445 | self.repo.hook('prerepack') |
|
442 | self.repo.hook('prerepack') | |
446 |
|
443 | |||
447 | # Populate ledger from source |
|
444 | # Populate ledger from source | |
448 | self.data.markledger(ledger, options=self.options) |
|
445 | self.data.markledger(ledger, options=self.options) | |
449 | self.history.markledger(ledger, options=self.options) |
|
446 | self.history.markledger(ledger, options=self.options) | |
450 |
|
447 | |||
451 | # Run repack |
|
448 | # Run repack | |
452 | self.repackdata(ledger, targetdata) |
|
449 | self.repackdata(ledger, targetdata) | |
453 | self.repackhistory(ledger, targethistory) |
|
450 | self.repackhistory(ledger, targethistory) | |
454 |
|
451 | |||
455 | # Call cleanup on each source |
|
452 | # Call cleanup on each source | |
456 | for source in ledger.sources: |
|
453 | for source in ledger.sources: | |
457 | source.cleanup(ledger) |
|
454 | source.cleanup(ledger) | |
458 |
|
455 | |||
459 | def _chainorphans(self, ui, filename, nodes, orphans, deltabases): |
|
456 | def _chainorphans(self, ui, filename, nodes, orphans, deltabases): | |
460 | """Reorderes ``orphans`` into a single chain inside ``nodes`` and |
|
457 | """Reorderes ``orphans`` into a single chain inside ``nodes`` and | |
461 | ``deltabases``. |
|
458 | ``deltabases``. | |
462 |
|
459 | |||
463 | We often have orphan entries (nodes without a base that aren't |
|
460 | We often have orphan entries (nodes without a base that aren't | |
464 | referenced by other nodes -- i.e., part of a chain) due to gaps in |
|
461 | referenced by other nodes -- i.e., part of a chain) due to gaps in | |
465 | history. Rather than store them as individual fulltexts, we prefer to |
|
462 | history. Rather than store them as individual fulltexts, we prefer to | |
466 | insert them as one chain sorted by size. |
|
463 | insert them as one chain sorted by size. | |
467 | """ |
|
464 | """ | |
468 | if not orphans: |
|
465 | if not orphans: | |
469 | return nodes |
|
466 | return nodes | |
470 |
|
467 | |||
471 | def getsize(node, default=0): |
|
468 | def getsize(node, default=0): | |
472 | meta = self.data.getmeta(filename, node) |
|
469 | meta = self.data.getmeta(filename, node) | |
473 | if constants.METAKEYSIZE in meta: |
|
470 | if constants.METAKEYSIZE in meta: | |
474 | return meta[constants.METAKEYSIZE] |
|
471 | return meta[constants.METAKEYSIZE] | |
475 | else: |
|
472 | else: | |
476 | return default |
|
473 | return default | |
477 |
|
474 | |||
478 | # Sort orphans by size; biggest first is preferred, since it's more |
|
475 | # Sort orphans by size; biggest first is preferred, since it's more | |
479 | # likely to be the newest version assuming files grow over time. |
|
476 | # likely to be the newest version assuming files grow over time. | |
480 | # (Sort by node first to ensure the sort is stable.) |
|
477 | # (Sort by node first to ensure the sort is stable.) | |
481 | orphans = sorted(orphans) |
|
478 | orphans = sorted(orphans) | |
482 | orphans = list(sorted(orphans, key=getsize, reverse=True)) |
|
479 | orphans = list(sorted(orphans, key=getsize, reverse=True)) | |
483 | if ui.debugflag: |
|
480 | if ui.debugflag: | |
484 | ui.debug("%s: orphan chain: %s\n" % (filename, |
|
481 | ui.debug("%s: orphan chain: %s\n" % (filename, | |
485 | ", ".join([short(s) for s in orphans]))) |
|
482 | ", ".join([short(s) for s in orphans]))) | |
486 |
|
483 | |||
487 | # Create one contiguous chain and reassign deltabases. |
|
484 | # Create one contiguous chain and reassign deltabases. | |
488 | for i, node in enumerate(orphans): |
|
485 | for i, node in enumerate(orphans): | |
489 | if i == 0: |
|
486 | if i == 0: | |
490 | deltabases[node] = (nullid, 0) |
|
487 | deltabases[node] = (nullid, 0) | |
491 | else: |
|
488 | else: | |
492 | parent = orphans[i - 1] |
|
489 | parent = orphans[i - 1] | |
493 | deltabases[node] = (parent, deltabases[parent][1] + 1) |
|
490 | deltabases[node] = (parent, deltabases[parent][1] + 1) | |
494 | nodes = filter(lambda node: node not in orphans, nodes) |
|
491 | nodes = filter(lambda node: node not in orphans, nodes) | |
495 | nodes += orphans |
|
492 | nodes += orphans | |
496 | return nodes |
|
493 | return nodes | |
497 |
|
494 | |||
498 | def repackdata(self, ledger, target): |
|
495 | def repackdata(self, ledger, target): | |
499 | ui = self.repo.ui |
|
496 | ui = self.repo.ui | |
500 | maxchainlen = ui.configint('packs', 'maxchainlen', 1000) |
|
497 | maxchainlen = ui.configint('packs', 'maxchainlen', 1000) | |
501 |
|
498 | |||
502 | byfile = {} |
|
499 | byfile = {} | |
503 | for entry in ledger.entries.itervalues(): |
|
500 | for entry in ledger.entries.itervalues(): | |
504 | if entry.datasource: |
|
501 | if entry.datasource: | |
505 | byfile.setdefault(entry.filename, {})[entry.node] = entry |
|
502 | byfile.setdefault(entry.filename, {})[entry.node] = entry | |
506 |
|
503 | |||
507 | count = 0 |
|
504 | count = 0 | |
508 | for filename, entries in sorted(byfile.iteritems()): |
|
505 | for filename, entries in sorted(byfile.iteritems()): | |
509 | ui.progress(_("repacking data"), count, unit=self.unit, |
|
506 | ui.progress(_("repacking data"), count, unit=self.unit, | |
510 | total=len(byfile)) |
|
507 | total=len(byfile)) | |
511 |
|
508 | |||
512 | ancestors = {} |
|
509 | ancestors = {} | |
513 | nodes = list(node for node in entries.iterkeys()) |
|
510 | nodes = list(node for node in entries.iterkeys()) | |
514 | nohistory = [] |
|
511 | nohistory = [] | |
515 | for i, node in enumerate(nodes): |
|
512 | for i, node in enumerate(nodes): | |
516 | if node in ancestors: |
|
513 | if node in ancestors: | |
517 | continue |
|
514 | continue | |
518 | ui.progress(_("building history"), i, unit='nodes', |
|
515 | ui.progress(_("building history"), i, unit='nodes', | |
519 | total=len(nodes)) |
|
516 | total=len(nodes)) | |
520 | try: |
|
517 | try: | |
521 | ancestors.update(self.fullhistory.getancestors(filename, |
|
518 | ancestors.update(self.fullhistory.getancestors(filename, | |
522 | node, known=ancestors)) |
|
519 | node, known=ancestors)) | |
523 | except KeyError: |
|
520 | except KeyError: | |
524 | # Since we're packing data entries, we may not have the |
|
521 | # Since we're packing data entries, we may not have the | |
525 | # corresponding history entries for them. It's not a big |
|
522 | # corresponding history entries for them. It's not a big | |
526 | # deal, but the entries won't be delta'd perfectly. |
|
523 | # deal, but the entries won't be delta'd perfectly. | |
527 | nohistory.append(node) |
|
524 | nohistory.append(node) | |
528 | ui.progress(_("building history"), None) |
|
525 | ui.progress(_("building history"), None) | |
529 |
|
526 | |||
530 | # Order the nodes children first, so we can produce reverse deltas |
|
527 | # Order the nodes children first, so we can produce reverse deltas | |
531 | orderednodes = list(reversed(self._toposort(ancestors))) |
|
528 | orderednodes = list(reversed(self._toposort(ancestors))) | |
532 | if len(nohistory) > 0: |
|
529 | if len(nohistory) > 0: | |
533 | ui.debug('repackdata: %d nodes without history\n' % |
|
530 | ui.debug('repackdata: %d nodes without history\n' % | |
534 | len(nohistory)) |
|
531 | len(nohistory)) | |
535 | orderednodes.extend(sorted(nohistory)) |
|
532 | orderednodes.extend(sorted(nohistory)) | |
536 |
|
533 | |||
537 | # Filter orderednodes to just the nodes we want to serialize (it |
|
534 | # Filter orderednodes to just the nodes we want to serialize (it | |
538 | # currently also has the edge nodes' ancestors). |
|
535 | # currently also has the edge nodes' ancestors). | |
539 | orderednodes = filter(lambda node: node in nodes, orderednodes) |
|
536 | orderednodes = filter(lambda node: node in nodes, orderednodes) | |
540 |
|
537 | |||
541 | # Garbage collect old nodes: |
|
538 | # Garbage collect old nodes: | |
542 | if self.garbagecollect: |
|
539 | if self.garbagecollect: | |
543 | neworderednodes = [] |
|
540 | neworderednodes = [] | |
544 | for node in orderednodes: |
|
541 | for node in orderednodes: | |
545 | # If the node is old and is not in the keepset, we skip it, |
|
542 | # If the node is old and is not in the keepset, we skip it, | |
546 | # and mark as garbage collected |
|
543 | # and mark as garbage collected | |
547 | if ((filename, node) not in self.keepkeys and |
|
544 | if ((filename, node) not in self.keepkeys and | |
548 | self.isold(self.repo, filename, node)): |
|
545 | self.isold(self.repo, filename, node)): | |
549 | entries[node].gced = True |
|
546 | entries[node].gced = True | |
550 | continue |
|
547 | continue | |
551 | neworderednodes.append(node) |
|
548 | neworderednodes.append(node) | |
552 | orderednodes = neworderednodes |
|
549 | orderednodes = neworderednodes | |
553 |
|
550 | |||
554 | # Compute delta bases for nodes: |
|
551 | # Compute delta bases for nodes: | |
555 | deltabases = {} |
|
552 | deltabases = {} | |
556 | nobase = set() |
|
553 | nobase = set() | |
557 | referenced = set() |
|
554 | referenced = set() | |
558 | nodes = set(nodes) |
|
555 | nodes = set(nodes) | |
559 | for i, node in enumerate(orderednodes): |
|
556 | for i, node in enumerate(orderednodes): | |
560 | ui.progress(_("processing nodes"), i, unit='nodes', |
|
557 | ui.progress(_("processing nodes"), i, unit='nodes', | |
561 | total=len(orderednodes)) |
|
558 | total=len(orderednodes)) | |
562 | # Find delta base |
|
559 | # Find delta base | |
563 | # TODO: allow delta'ing against most recent descendant instead |
|
560 | # TODO: allow delta'ing against most recent descendant instead | |
564 | # of immediate child |
|
561 | # of immediate child | |
565 | deltatuple = deltabases.get(node, None) |
|
562 | deltatuple = deltabases.get(node, None) | |
566 | if deltatuple is None: |
|
563 | if deltatuple is None: | |
567 | deltabase, chainlen = nullid, 0 |
|
564 | deltabase, chainlen = nullid, 0 | |
568 | deltabases[node] = (nullid, 0) |
|
565 | deltabases[node] = (nullid, 0) | |
569 | nobase.add(node) |
|
566 | nobase.add(node) | |
570 | else: |
|
567 | else: | |
571 | deltabase, chainlen = deltatuple |
|
568 | deltabase, chainlen = deltatuple | |
572 | referenced.add(deltabase) |
|
569 | referenced.add(deltabase) | |
573 |
|
570 | |||
574 | # Use available ancestor information to inform our delta choices |
|
571 | # Use available ancestor information to inform our delta choices | |
575 | ancestorinfo = ancestors.get(node) |
|
572 | ancestorinfo = ancestors.get(node) | |
576 | if ancestorinfo: |
|
573 | if ancestorinfo: | |
577 | p1, p2, linknode, copyfrom = ancestorinfo |
|
574 | p1, p2, linknode, copyfrom = ancestorinfo | |
578 |
|
575 | |||
579 | # The presence of copyfrom means we're at a point where the |
|
576 | # The presence of copyfrom means we're at a point where the | |
580 | # file was copied from elsewhere. So don't attempt to do any |
|
577 | # file was copied from elsewhere. So don't attempt to do any | |
581 | # deltas with the other file. |
|
578 | # deltas with the other file. | |
582 | if copyfrom: |
|
579 | if copyfrom: | |
583 | p1 = nullid |
|
580 | p1 = nullid | |
584 |
|
581 | |||
585 | if chainlen < maxchainlen: |
|
582 | if chainlen < maxchainlen: | |
586 | # Record this child as the delta base for its parents. |
|
583 | # Record this child as the delta base for its parents. | |
587 | # This may be non optimal, since the parents may have |
|
584 | # This may be non optimal, since the parents may have | |
588 | # many children, and this will only choose the last one. |
|
585 | # many children, and this will only choose the last one. | |
589 | # TODO: record all children and try all deltas to find |
|
586 | # TODO: record all children and try all deltas to find | |
590 | # best |
|
587 | # best | |
591 | if p1 != nullid: |
|
588 | if p1 != nullid: | |
592 | deltabases[p1] = (node, chainlen + 1) |
|
589 | deltabases[p1] = (node, chainlen + 1) | |
593 | if p2 != nullid: |
|
590 | if p2 != nullid: | |
594 | deltabases[p2] = (node, chainlen + 1) |
|
591 | deltabases[p2] = (node, chainlen + 1) | |
595 |
|
592 | |||
596 | # experimental config: repack.chainorphansbysize |
|
593 | # experimental config: repack.chainorphansbysize | |
597 | if ui.configbool('repack', 'chainorphansbysize'): |
|
594 | if ui.configbool('repack', 'chainorphansbysize'): | |
598 | orphans = nobase - referenced |
|
595 | orphans = nobase - referenced | |
599 | orderednodes = self._chainorphans(ui, filename, orderednodes, |
|
596 | orderednodes = self._chainorphans(ui, filename, orderednodes, | |
600 | orphans, deltabases) |
|
597 | orphans, deltabases) | |
601 |
|
598 | |||
602 | # Compute deltas and write to the pack |
|
599 | # Compute deltas and write to the pack | |
603 | for i, node in enumerate(orderednodes): |
|
600 | for i, node in enumerate(orderednodes): | |
604 | deltabase, chainlen = deltabases[node] |
|
601 | deltabase, chainlen = deltabases[node] | |
605 | # Compute delta |
|
602 | # Compute delta | |
606 | # TODO: Optimize the deltachain fetching. Since we're |
|
603 | # TODO: Optimize the deltachain fetching. Since we're | |
607 | # iterating over the different version of the file, we may |
|
604 | # iterating over the different version of the file, we may | |
608 | # be fetching the same deltachain over and over again. |
|
605 | # be fetching the same deltachain over and over again. | |
609 | meta = None |
|
606 | meta = None | |
610 | if deltabase != nullid: |
|
607 | if deltabase != nullid: | |
611 | deltaentry = self.data.getdelta(filename, node) |
|
608 | deltaentry = self.data.getdelta(filename, node) | |
612 | delta, deltabasename, origdeltabase, meta = deltaentry |
|
609 | delta, deltabasename, origdeltabase, meta = deltaentry | |
613 | size = meta.get(constants.METAKEYSIZE) |
|
610 | size = meta.get(constants.METAKEYSIZE) | |
614 | if (deltabasename != filename or origdeltabase != deltabase |
|
611 | if (deltabasename != filename or origdeltabase != deltabase | |
615 | or size is None): |
|
612 | or size is None): | |
616 | deltabasetext = self.data.get(filename, deltabase) |
|
613 | deltabasetext = self.data.get(filename, deltabase) | |
617 | original = self.data.get(filename, node) |
|
614 | original = self.data.get(filename, node) | |
618 | size = len(original) |
|
615 | size = len(original) | |
619 | delta = mdiff.textdiff(deltabasetext, original) |
|
616 | delta = mdiff.textdiff(deltabasetext, original) | |
620 | else: |
|
617 | else: | |
621 | delta = self.data.get(filename, node) |
|
618 | delta = self.data.get(filename, node) | |
622 | size = len(delta) |
|
619 | size = len(delta) | |
623 | meta = self.data.getmeta(filename, node) |
|
620 | meta = self.data.getmeta(filename, node) | |
624 |
|
621 | |||
625 | # TODO: don't use the delta if it's larger than the fulltext |
|
622 | # TODO: don't use the delta if it's larger than the fulltext | |
626 | if constants.METAKEYSIZE not in meta: |
|
623 | if constants.METAKEYSIZE not in meta: | |
627 | meta[constants.METAKEYSIZE] = size |
|
624 | meta[constants.METAKEYSIZE] = size | |
628 | target.add(filename, node, deltabase, delta, meta) |
|
625 | target.add(filename, node, deltabase, delta, meta) | |
629 |
|
626 | |||
630 | entries[node].datarepacked = True |
|
627 | entries[node].datarepacked = True | |
631 |
|
628 | |||
632 | ui.progress(_("processing nodes"), None) |
|
629 | ui.progress(_("processing nodes"), None) | |
633 | count += 1 |
|
630 | count += 1 | |
634 |
|
631 | |||
635 | ui.progress(_("repacking data"), None) |
|
632 | ui.progress(_("repacking data"), None) | |
636 | target.close(ledger=ledger) |
|
633 | target.close(ledger=ledger) | |
637 |
|
634 | |||
638 | def repackhistory(self, ledger, target): |
|
635 | def repackhistory(self, ledger, target): | |
639 | ui = self.repo.ui |
|
636 | ui = self.repo.ui | |
640 |
|
637 | |||
641 | byfile = {} |
|
638 | byfile = {} | |
642 | for entry in ledger.entries.itervalues(): |
|
639 | for entry in ledger.entries.itervalues(): | |
643 | if entry.historysource: |
|
640 | if entry.historysource: | |
644 | byfile.setdefault(entry.filename, {})[entry.node] = entry |
|
641 | byfile.setdefault(entry.filename, {})[entry.node] = entry | |
645 |
|
642 | |||
646 | count = 0 |
|
643 | count = 0 | |
647 | for filename, entries in sorted(byfile.iteritems()): |
|
644 | for filename, entries in sorted(byfile.iteritems()): | |
648 | ancestors = {} |
|
645 | ancestors = {} | |
649 | nodes = list(node for node in entries.iterkeys()) |
|
646 | nodes = list(node for node in entries.iterkeys()) | |
650 |
|
647 | |||
651 | for node in nodes: |
|
648 | for node in nodes: | |
652 | if node in ancestors: |
|
649 | if node in ancestors: | |
653 | continue |
|
650 | continue | |
654 | ancestors.update(self.history.getancestors(filename, node, |
|
651 | ancestors.update(self.history.getancestors(filename, node, | |
655 | known=ancestors)) |
|
652 | known=ancestors)) | |
656 |
|
653 | |||
657 | # Order the nodes children first |
|
654 | # Order the nodes children first | |
658 | orderednodes = reversed(self._toposort(ancestors)) |
|
655 | orderednodes = reversed(self._toposort(ancestors)) | |
659 |
|
656 | |||
660 | # Write to the pack |
|
657 | # Write to the pack | |
661 | dontprocess = set() |
|
658 | dontprocess = set() | |
662 | for node in orderednodes: |
|
659 | for node in orderednodes: | |
663 | p1, p2, linknode, copyfrom = ancestors[node] |
|
660 | p1, p2, linknode, copyfrom = ancestors[node] | |
664 |
|
661 | |||
665 | # If the node is marked dontprocess, but it's also in the |
|
662 | # If the node is marked dontprocess, but it's also in the | |
666 | # explicit entries set, that means the node exists both in this |
|
663 | # explicit entries set, that means the node exists both in this | |
667 | # file and in another file that was copied to this file. |
|
664 | # file and in another file that was copied to this file. | |
668 | # Usually this happens if the file was copied to another file, |
|
665 | # Usually this happens if the file was copied to another file, | |
669 | # then the copy was deleted, then reintroduced without copy |
|
666 | # then the copy was deleted, then reintroduced without copy | |
670 | # metadata. The original add and the new add have the same hash |
|
667 | # metadata. The original add and the new add have the same hash | |
671 | # since the content is identical and the parents are null. |
|
668 | # since the content is identical and the parents are null. | |
672 | if node in dontprocess and node not in entries: |
|
669 | if node in dontprocess and node not in entries: | |
673 | # If copyfrom == filename, it means the copy history |
|
670 | # If copyfrom == filename, it means the copy history | |
674 | # went to come other file, then came back to this one, so we |
|
671 | # went to come other file, then came back to this one, so we | |
675 | # should continue processing it. |
|
672 | # should continue processing it. | |
676 | if p1 != nullid and copyfrom != filename: |
|
673 | if p1 != nullid and copyfrom != filename: | |
677 | dontprocess.add(p1) |
|
674 | dontprocess.add(p1) | |
678 | if p2 != nullid: |
|
675 | if p2 != nullid: | |
679 | dontprocess.add(p2) |
|
676 | dontprocess.add(p2) | |
680 | continue |
|
677 | continue | |
681 |
|
678 | |||
682 | if copyfrom: |
|
679 | if copyfrom: | |
683 | dontprocess.add(p1) |
|
680 | dontprocess.add(p1) | |
684 |
|
681 | |||
685 | target.add(filename, node, p1, p2, linknode, copyfrom) |
|
682 | target.add(filename, node, p1, p2, linknode, copyfrom) | |
686 |
|
683 | |||
687 | if node in entries: |
|
684 | if node in entries: | |
688 | entries[node].historyrepacked = True |
|
685 | entries[node].historyrepacked = True | |
689 |
|
686 | |||
690 | count += 1 |
|
687 | count += 1 | |
691 | ui.progress(_("repacking history"), count, unit=self.unit, |
|
688 | ui.progress(_("repacking history"), count, unit=self.unit, | |
692 | total=len(byfile)) |
|
689 | total=len(byfile)) | |
693 |
|
690 | |||
694 | ui.progress(_("repacking history"), None) |
|
691 | ui.progress(_("repacking history"), None) | |
695 | target.close(ledger=ledger) |
|
692 | target.close(ledger=ledger) | |
696 |
|
693 | |||
697 | def _toposort(self, ancestors): |
|
694 | def _toposort(self, ancestors): | |
698 | def parentfunc(node): |
|
695 | def parentfunc(node): | |
699 | p1, p2, linknode, copyfrom = ancestors[node] |
|
696 | p1, p2, linknode, copyfrom = ancestors[node] | |
700 | parents = [] |
|
697 | parents = [] | |
701 | if p1 != nullid: |
|
698 | if p1 != nullid: | |
702 | parents.append(p1) |
|
699 | parents.append(p1) | |
703 | if p2 != nullid: |
|
700 | if p2 != nullid: | |
704 | parents.append(p2) |
|
701 | parents.append(p2) | |
705 | return parents |
|
702 | return parents | |
706 |
|
703 | |||
707 | sortednodes = shallowutil.sortnodes(ancestors.keys(), parentfunc) |
|
704 | sortednodes = shallowutil.sortnodes(ancestors.keys(), parentfunc) | |
708 | return sortednodes |
|
705 | return sortednodes | |
709 |
|
706 | |||
710 | class repackledger(object): |
|
707 | class repackledger(object): | |
711 | """Storage for all the bookkeeping that happens during a repack. It contains |
|
708 | """Storage for all the bookkeeping that happens during a repack. It contains | |
712 | the list of revisions being repacked, what happened to each revision, and |
|
709 | the list of revisions being repacked, what happened to each revision, and | |
713 | which source store contained which revision originally (for later cleanup). |
|
710 | which source store contained which revision originally (for later cleanup). | |
714 | """ |
|
711 | """ | |
715 | def __init__(self): |
|
712 | def __init__(self): | |
716 | self.entries = {} |
|
713 | self.entries = {} | |
717 | self.sources = {} |
|
714 | self.sources = {} | |
718 | self.created = set() |
|
715 | self.created = set() | |
719 |
|
716 | |||
720 | def markdataentry(self, source, filename, node): |
|
717 | def markdataentry(self, source, filename, node): | |
721 | """Mark the given filename+node revision as having a data rev in the |
|
718 | """Mark the given filename+node revision as having a data rev in the | |
722 | given source. |
|
719 | given source. | |
723 | """ |
|
720 | """ | |
724 | entry = self._getorcreateentry(filename, node) |
|
721 | entry = self._getorcreateentry(filename, node) | |
725 | entry.datasource = True |
|
722 | entry.datasource = True | |
726 | entries = self.sources.get(source) |
|
723 | entries = self.sources.get(source) | |
727 | if not entries: |
|
724 | if not entries: | |
728 | entries = set() |
|
725 | entries = set() | |
729 | self.sources[source] = entries |
|
726 | self.sources[source] = entries | |
730 | entries.add(entry) |
|
727 | entries.add(entry) | |
731 |
|
728 | |||
732 | def markhistoryentry(self, source, filename, node): |
|
729 | def markhistoryentry(self, source, filename, node): | |
733 | """Mark the given filename+node revision as having a history rev in the |
|
730 | """Mark the given filename+node revision as having a history rev in the | |
734 | given source. |
|
731 | given source. | |
735 | """ |
|
732 | """ | |
736 | entry = self._getorcreateentry(filename, node) |
|
733 | entry = self._getorcreateentry(filename, node) | |
737 | entry.historysource = True |
|
734 | entry.historysource = True | |
738 | entries = self.sources.get(source) |
|
735 | entries = self.sources.get(source) | |
739 | if not entries: |
|
736 | if not entries: | |
740 | entries = set() |
|
737 | entries = set() | |
741 | self.sources[source] = entries |
|
738 | self.sources[source] = entries | |
742 | entries.add(entry) |
|
739 | entries.add(entry) | |
743 |
|
740 | |||
744 | def _getorcreateentry(self, filename, node): |
|
741 | def _getorcreateentry(self, filename, node): | |
745 | key = (filename, node) |
|
742 | key = (filename, node) | |
746 | value = self.entries.get(key) |
|
743 | value = self.entries.get(key) | |
747 | if not value: |
|
744 | if not value: | |
748 | value = repackentry(filename, node) |
|
745 | value = repackentry(filename, node) | |
749 | self.entries[key] = value |
|
746 | self.entries[key] = value | |
750 |
|
747 | |||
751 | return value |
|
748 | return value | |
752 |
|
749 | |||
753 | def addcreated(self, value): |
|
750 | def addcreated(self, value): | |
754 | self.created.add(value) |
|
751 | self.created.add(value) | |
755 |
|
752 | |||
756 | class repackentry(object): |
|
753 | class repackentry(object): | |
757 | """Simple class representing a single revision entry in the repackledger. |
|
754 | """Simple class representing a single revision entry in the repackledger. | |
758 | """ |
|
755 | """ | |
759 | __slots__ = ['filename', 'node', 'datasource', 'historysource', |
|
756 | __slots__ = ['filename', 'node', 'datasource', 'historysource', | |
760 | 'datarepacked', 'historyrepacked', 'gced'] |
|
757 | 'datarepacked', 'historyrepacked', 'gced'] | |
761 | def __init__(self, filename, node): |
|
758 | def __init__(self, filename, node): | |
762 | self.filename = filename |
|
759 | self.filename = filename | |
763 | self.node = node |
|
760 | self.node = node | |
764 | # If the revision has a data entry in the source |
|
761 | # If the revision has a data entry in the source | |
765 | self.datasource = False |
|
762 | self.datasource = False | |
766 | # If the revision has a history entry in the source |
|
763 | # If the revision has a history entry in the source | |
767 | self.historysource = False |
|
764 | self.historysource = False | |
768 | # If the revision's data entry was repacked into the repack target |
|
765 | # If the revision's data entry was repacked into the repack target | |
769 | self.datarepacked = False |
|
766 | self.datarepacked = False | |
770 | # If the revision's history entry was repacked into the repack target |
|
767 | # If the revision's history entry was repacked into the repack target | |
771 | self.historyrepacked = False |
|
768 | self.historyrepacked = False | |
772 | # If garbage collected |
|
769 | # If garbage collected | |
773 | self.gced = False |
|
770 | self.gced = False | |
774 |
|
771 | |||
775 | def repacklockvfs(repo): |
|
772 | def repacklockvfs(repo): | |
776 | if util.safehasattr(repo, 'name'): |
|
773 | if util.safehasattr(repo, 'name'): | |
777 | # Lock in the shared cache so repacks across multiple copies of the same |
|
774 | # Lock in the shared cache so repacks across multiple copies of the same | |
778 | # repo are coordinated. |
|
775 | # repo are coordinated. | |
779 | sharedcachepath = shallowutil.getcachepackpath( |
|
776 | sharedcachepath = shallowutil.getcachepackpath( | |
780 | repo, |
|
777 | repo, | |
781 | constants.FILEPACK_CATEGORY) |
|
778 | constants.FILEPACK_CATEGORY) | |
782 | return vfs.vfs(sharedcachepath) |
|
779 | return vfs.vfs(sharedcachepath) | |
783 | else: |
|
780 | else: | |
784 | return repo.svfs |
|
781 | return repo.svfs |
@@ -1,370 +1,370 b'' | |||||
1 | $ PYTHONPATH=$TESTDIR/..:$PYTHONPATH |
|
1 | $ PYTHONPATH=$TESTDIR/..:$PYTHONPATH | |
2 | $ export PYTHONPATH |
|
2 | $ export PYTHONPATH | |
3 |
|
3 | |||
4 | $ . "$TESTDIR/remotefilelog-library.sh" |
|
4 | $ . "$TESTDIR/remotefilelog-library.sh" | |
5 |
|
5 | |||
6 | $ hginit master |
|
6 | $ hginit master | |
7 | $ cd master |
|
7 | $ cd master | |
8 | $ cat >> .hg/hgrc <<EOF |
|
8 | $ cat >> .hg/hgrc <<EOF | |
9 | > [remotefilelog] |
|
9 | > [remotefilelog] | |
10 | > server=True |
|
10 | > server=True | |
11 | > EOF |
|
11 | > EOF | |
12 | $ echo x > x |
|
12 | $ echo x > x | |
13 | $ echo z > z |
|
13 | $ echo z > z | |
14 | $ hg commit -qAm x |
|
14 | $ hg commit -qAm x | |
15 | $ echo x2 > x |
|
15 | $ echo x2 > x | |
16 | $ echo y > y |
|
16 | $ echo y > y | |
17 | $ hg commit -qAm y |
|
17 | $ hg commit -qAm y | |
18 | $ echo w > w |
|
18 | $ echo w > w | |
19 | $ rm z |
|
19 | $ rm z | |
20 | $ hg commit -qAm w |
|
20 | $ hg commit -qAm w | |
21 | $ hg bookmark foo |
|
21 | $ hg bookmark foo | |
22 |
|
22 | |||
23 | $ cd .. |
|
23 | $ cd .. | |
24 |
|
24 | |||
25 | # clone the repo |
|
25 | # clone the repo | |
26 |
|
26 | |||
27 | $ hgcloneshallow ssh://user@dummy/master shallow --noupdate |
|
27 | $ hgcloneshallow ssh://user@dummy/master shallow --noupdate | |
28 | streaming all changes |
|
28 | streaming all changes | |
29 | 2 files to transfer, 776 bytes of data |
|
29 | 2 files to transfer, 776 bytes of data | |
30 | transferred 776 bytes in * seconds (*/sec) (glob) |
|
30 | transferred 776 bytes in * seconds (*/sec) (glob) | |
31 | searching for changes |
|
31 | searching for changes | |
32 | no changes found |
|
32 | no changes found | |
33 |
|
33 | |||
34 | # Set the prefetchdays config to zero so that all commits are prefetched |
|
34 | # Set the prefetchdays config to zero so that all commits are prefetched | |
35 | # no matter what their creation date is. Also set prefetchdelay config |
|
35 | # no matter what their creation date is. Also set prefetchdelay config | |
36 | # to zero so that there is no delay between prefetches. |
|
36 | # to zero so that there is no delay between prefetches. | |
37 | $ cd shallow |
|
37 | $ cd shallow | |
38 | $ cat >> .hg/hgrc <<EOF |
|
38 | $ cat >> .hg/hgrc <<EOF | |
39 | > [remotefilelog] |
|
39 | > [remotefilelog] | |
40 | > prefetchdays=0 |
|
40 | > prefetchdays=0 | |
41 | > prefetchdelay=0 |
|
41 | > prefetchdelay=0 | |
42 | > EOF |
|
42 | > EOF | |
43 | $ cd .. |
|
43 | $ cd .. | |
44 |
|
44 | |||
45 | # prefetch a revision |
|
45 | # prefetch a revision | |
46 | $ cd shallow |
|
46 | $ cd shallow | |
47 |
|
47 | |||
48 | $ hg prefetch -r 0 |
|
48 | $ hg prefetch -r 0 | |
49 | 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob) |
|
49 | 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob) | |
50 |
|
50 | |||
51 | $ hg cat -r 0 x |
|
51 | $ hg cat -r 0 x | |
52 | x |
|
52 | x | |
53 |
|
53 | |||
54 | # background prefetch on pull when configured |
|
54 | # background prefetch on pull when configured | |
55 |
|
55 | |||
56 | $ cat >> .hg/hgrc <<EOF |
|
56 | $ cat >> .hg/hgrc <<EOF | |
57 | > [remotefilelog] |
|
57 | > [remotefilelog] | |
58 | > pullprefetch=bookmark() |
|
58 | > pullprefetch=bookmark() | |
59 | > backgroundprefetch=True |
|
59 | > backgroundprefetch=True | |
60 | > EOF |
|
60 | > EOF | |
61 | $ hg strip tip |
|
61 | $ hg strip tip | |
62 | saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/6b4b6f66ef8c-b4b8bdaf-backup.hg (glob) |
|
62 | saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/6b4b6f66ef8c-b4b8bdaf-backup.hg (glob) | |
63 |
|
63 | |||
64 | $ clearcache |
|
64 | $ clearcache | |
65 | $ hg pull |
|
65 | $ hg pull | |
66 | pulling from ssh://user@dummy/master |
|
66 | pulling from ssh://user@dummy/master | |
67 | searching for changes |
|
67 | searching for changes | |
68 | adding changesets |
|
68 | adding changesets | |
69 | adding manifests |
|
69 | adding manifests | |
70 | adding file changes |
|
70 | adding file changes | |
71 | added 1 changesets with 0 changes to 0 files |
|
71 | added 1 changesets with 0 changes to 0 files | |
72 | updating bookmark foo |
|
72 | updating bookmark foo | |
73 | new changesets 6b4b6f66ef8c |
|
73 | new changesets 6b4b6f66ef8c | |
74 | (run 'hg update' to get a working copy) |
|
74 | (run 'hg update' to get a working copy) | |
75 | prefetching file contents |
|
75 | prefetching file contents | |
76 | $ sleep 0.5 |
|
76 | $ sleep 0.5 | |
77 | $ hg debugwaitonprefetch >/dev/null 2>%1 |
|
77 | $ hg debugwaitonprefetch >/dev/null 2>%1 | |
78 | $ find $CACHEDIR -type f | sort |
|
78 | $ find $CACHEDIR -type f | sort | |
79 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/ef95c5376f34698742fe34f315fd82136f8f68c0 |
|
79 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/ef95c5376f34698742fe34f315fd82136f8f68c0 | |
80 | $TESTTMP/hgcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/076f5e2225b3ff0400b98c92aa6cdf403ee24cca |
|
80 | $TESTTMP/hgcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/076f5e2225b3ff0400b98c92aa6cdf403ee24cca | |
81 | $TESTTMP/hgcache/master/af/f024fe4ab0fece4091de044c58c9ae4233383a/bb6ccd5dceaa5e9dc220e0dad65e051b94f69a2c |
|
81 | $TESTTMP/hgcache/master/af/f024fe4ab0fece4091de044c58c9ae4233383a/bb6ccd5dceaa5e9dc220e0dad65e051b94f69a2c | |
82 | $TESTTMP/hgcache/repos |
|
82 | $TESTTMP/hgcache/repos | |
83 |
|
83 | |||
84 | # background prefetch with repack on pull when configured |
|
84 | # background prefetch with repack on pull when configured | |
85 |
|
85 | |||
86 | $ cat >> .hg/hgrc <<EOF |
|
86 | $ cat >> .hg/hgrc <<EOF | |
87 | > [remotefilelog] |
|
87 | > [remotefilelog] | |
88 | > backgroundrepack=True |
|
88 | > backgroundrepack=True | |
89 | > EOF |
|
89 | > EOF | |
90 | $ hg strip tip |
|
90 | $ hg strip tip | |
91 | saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/6b4b6f66ef8c-b4b8bdaf-backup.hg (glob) |
|
91 | saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/6b4b6f66ef8c-b4b8bdaf-backup.hg (glob) | |
92 |
|
92 | |||
93 | $ clearcache |
|
93 | $ clearcache | |
94 | $ hg pull |
|
94 | $ hg pull | |
95 | pulling from ssh://user@dummy/master |
|
95 | pulling from ssh://user@dummy/master | |
96 | searching for changes |
|
96 | searching for changes | |
97 | adding changesets |
|
97 | adding changesets | |
98 | adding manifests |
|
98 | adding manifests | |
99 | adding file changes |
|
99 | adding file changes | |
100 | added 1 changesets with 0 changes to 0 files |
|
100 | added 1 changesets with 0 changes to 0 files | |
101 | updating bookmark foo |
|
101 | updating bookmark foo | |
102 | new changesets 6b4b6f66ef8c |
|
102 | new changesets 6b4b6f66ef8c | |
103 | (run 'hg update' to get a working copy) |
|
103 | (run 'hg update' to get a working copy) | |
104 | prefetching file contents |
|
104 | prefetching file contents | |
105 | $ sleep 0.5 |
|
105 | $ sleep 0.5 | |
106 | $ hg debugwaitonprefetch >/dev/null 2>%1 |
|
106 | $ hg debugwaitonprefetch >/dev/null 2>%1 | |
107 | $ sleep 0.5 |
|
107 | $ sleep 0.5 | |
108 | $ hg debugwaitonrepack >/dev/null 2>%1 |
|
108 | $ hg debugwaitonrepack >/dev/null 2>%1 | |
109 | $ find $CACHEDIR -type f | sort |
|
109 | $ find $CACHEDIR -type f | sort | |
110 | $TESTTMP/hgcache/master/packs/94d53eef9e622533aec1fc6d8053cb086e785d21.histidx |
|
110 | $TESTTMP/hgcache/master/packs/3616094d229ed39e2593f79c772676d4ec00253a.dataidx | |
111 | $TESTTMP/hgcache/master/packs/94d53eef9e622533aec1fc6d8053cb086e785d21.histpack |
|
111 | $TESTTMP/hgcache/master/packs/3616094d229ed39e2593f79c772676d4ec00253a.datapack | |
112 |
$TESTTMP/hgcache/master/packs/ |
|
112 | $TESTTMP/hgcache/master/packs/6e8633deba6e544e5f8edbd7b996d6e31a2c42ae.histidx | |
113 |
$TESTTMP/hgcache/master/packs/ |
|
113 | $TESTTMP/hgcache/master/packs/6e8633deba6e544e5f8edbd7b996d6e31a2c42ae.histpack | |
114 | $TESTTMP/hgcache/master/packs/repacklock |
|
114 | $TESTTMP/hgcache/master/packs/repacklock | |
115 | $TESTTMP/hgcache/repos |
|
115 | $TESTTMP/hgcache/repos | |
116 |
|
116 | |||
117 | # background prefetch with repack on update when wcprevset configured |
|
117 | # background prefetch with repack on update when wcprevset configured | |
118 |
|
118 | |||
119 | $ clearcache |
|
119 | $ clearcache | |
120 | $ hg up -r 0 |
|
120 | $ hg up -r 0 | |
121 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
121 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved | |
122 | 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob) |
|
122 | 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob) | |
123 | $ find $CACHEDIR -type f | sort |
|
123 | $ find $CACHEDIR -type f | sort | |
124 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 |
|
124 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 | |
125 | $TESTTMP/hgcache/master/39/5df8f7c51f007019cb30201c49e884b46b92fa/69a1b67522704ec122181c0890bd16e9d3e7516a |
|
125 | $TESTTMP/hgcache/master/39/5df8f7c51f007019cb30201c49e884b46b92fa/69a1b67522704ec122181c0890bd16e9d3e7516a | |
126 | $TESTTMP/hgcache/repos |
|
126 | $TESTTMP/hgcache/repos | |
127 |
|
127 | |||
128 | $ hg up -r 1 |
|
128 | $ hg up -r 1 | |
129 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
129 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved | |
130 | 2 files fetched over 2 fetches - (2 misses, 0.00% hit ratio) over *s (glob) |
|
130 | 2 files fetched over 2 fetches - (2 misses, 0.00% hit ratio) over *s (glob) | |
131 |
|
131 | |||
132 | $ cat >> .hg/hgrc <<EOF |
|
132 | $ cat >> .hg/hgrc <<EOF | |
133 | > [remotefilelog] |
|
133 | > [remotefilelog] | |
134 | > bgprefetchrevs=.:: |
|
134 | > bgprefetchrevs=.:: | |
135 | > EOF |
|
135 | > EOF | |
136 |
|
136 | |||
137 | $ clearcache |
|
137 | $ clearcache | |
138 | $ hg up -r 0 |
|
138 | $ hg up -r 0 | |
139 | 1 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
139 | 1 files updated, 0 files merged, 1 files removed, 0 files unresolved | |
140 | * files fetched over * fetches - (* misses, 0.00% hit ratio) over *s (glob) |
|
140 | * files fetched over * fetches - (* misses, 0.00% hit ratio) over *s (glob) | |
141 | $ sleep 1 |
|
141 | $ sleep 1 | |
142 | $ hg debugwaitonprefetch >/dev/null 2>%1 |
|
142 | $ hg debugwaitonprefetch >/dev/null 2>%1 | |
143 | $ sleep 1 |
|
143 | $ sleep 1 | |
144 | $ hg debugwaitonrepack >/dev/null 2>%1 |
|
144 | $ hg debugwaitonrepack >/dev/null 2>%1 | |
145 | $ find $CACHEDIR -type f | sort |
|
145 | $ find $CACHEDIR -type f | sort | |
146 |
$TESTTMP/hgcache/master/packs/ |
|
146 | $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histidx | |
147 |
$TESTTMP/hgcache/master/packs/ |
|
147 | $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histpack | |
148 |
$TESTTMP/hgcache/master/packs/ |
|
148 | $TESTTMP/hgcache/master/packs/ac19aff076286bebe3ff108c96c6445a0fe27c46.dataidx | |
149 |
$TESTTMP/hgcache/master/packs/ |
|
149 | $TESTTMP/hgcache/master/packs/ac19aff076286bebe3ff108c96c6445a0fe27c46.datapack | |
150 | $TESTTMP/hgcache/master/packs/repacklock |
|
150 | $TESTTMP/hgcache/master/packs/repacklock | |
151 | $TESTTMP/hgcache/repos |
|
151 | $TESTTMP/hgcache/repos | |
152 |
|
152 | |||
153 | # Ensure that file 'w' was prefetched - it was not part of the update operation and therefore |
|
153 | # Ensure that file 'w' was prefetched - it was not part of the update operation and therefore | |
154 | # could only be downloaded by the background prefetch |
|
154 | # could only be downloaded by the background prefetch | |
155 |
|
155 | |||
156 | $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1` |
|
156 | $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1` | |
157 |
$TESTTMP/hgcache/master/packs/ |
|
157 | $TESTTMP/hgcache/master/packs/ac19aff076286bebe3ff108c96c6445a0fe27c46: | |
158 | w: |
|
158 | w: | |
159 | Node Delta Base Delta Length Blob Size |
|
159 | Node Delta Base Delta Length Blob Size | |
160 | bb6ccd5dceaa 000000000000 2 2 |
|
160 | bb6ccd5dceaa 000000000000 2 2 | |
161 |
|
161 | |||
162 | Total: 2 2 (0.0% bigger) |
|
162 | Total: 2 2 (0.0% bigger) | |
163 | x: |
|
163 | x: | |
164 | Node Delta Base Delta Length Blob Size |
|
164 | Node Delta Base Delta Length Blob Size | |
165 | ef95c5376f34 000000000000 3 3 |
|
165 | ef95c5376f34 000000000000 3 3 | |
166 | 1406e7411862 ef95c5376f34 14 2 |
|
166 | 1406e7411862 ef95c5376f34 14 2 | |
167 |
|
167 | |||
168 | Total: 17 5 (240.0% bigger) |
|
168 | Total: 17 5 (240.0% bigger) | |
169 | y: |
|
169 | y: | |
170 | Node Delta Base Delta Length Blob Size |
|
170 | Node Delta Base Delta Length Blob Size | |
171 | 076f5e2225b3 000000000000 2 2 |
|
171 | 076f5e2225b3 000000000000 2 2 | |
172 |
|
172 | |||
173 | Total: 2 2 (0.0% bigger) |
|
173 | Total: 2 2 (0.0% bigger) | |
174 | z: |
|
174 | z: | |
175 | Node Delta Base Delta Length Blob Size |
|
175 | Node Delta Base Delta Length Blob Size | |
176 | 69a1b6752270 000000000000 2 2 |
|
176 | 69a1b6752270 000000000000 2 2 | |
177 |
|
177 | |||
178 | Total: 2 2 (0.0% bigger) |
|
178 | Total: 2 2 (0.0% bigger) | |
179 |
|
179 | |||
180 | # background prefetch with repack on commit when wcprevset configured |
|
180 | # background prefetch with repack on commit when wcprevset configured | |
181 |
|
181 | |||
182 | $ cat >> .hg/hgrc <<EOF |
|
182 | $ cat >> .hg/hgrc <<EOF | |
183 | > [remotefilelog] |
|
183 | > [remotefilelog] | |
184 | > bgprefetchrevs=0:: |
|
184 | > bgprefetchrevs=0:: | |
185 | > EOF |
|
185 | > EOF | |
186 |
|
186 | |||
187 | $ clearcache |
|
187 | $ clearcache | |
188 | $ find $CACHEDIR -type f | sort |
|
188 | $ find $CACHEDIR -type f | sort | |
189 | $ echo b > b |
|
189 | $ echo b > b | |
190 | $ hg commit -qAm b |
|
190 | $ hg commit -qAm b | |
191 | * files fetched over 1 fetches - (* misses, 0.00% hit ratio) over *s (glob) |
|
191 | * files fetched over 1 fetches - (* misses, 0.00% hit ratio) over *s (glob) | |
192 | $ hg bookmark temporary |
|
192 | $ hg bookmark temporary | |
193 | $ sleep 1 |
|
193 | $ sleep 1 | |
194 | $ hg debugwaitonprefetch >/dev/null 2>%1 |
|
194 | $ hg debugwaitonprefetch >/dev/null 2>%1 | |
195 | $ sleep 1 |
|
195 | $ sleep 1 | |
196 | $ hg debugwaitonrepack >/dev/null 2>%1 |
|
196 | $ hg debugwaitonrepack >/dev/null 2>%1 | |
197 | $ find $CACHEDIR -type f | sort |
|
197 | $ find $CACHEDIR -type f | sort | |
198 |
$TESTTMP/hgcache/master/packs/ |
|
198 | $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histidx | |
199 |
$TESTTMP/hgcache/master/packs/ |
|
199 | $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histpack | |
200 |
$TESTTMP/hgcache/master/packs/ |
|
200 | $TESTTMP/hgcache/master/packs/ac19aff076286bebe3ff108c96c6445a0fe27c46.dataidx | |
201 |
$TESTTMP/hgcache/master/packs/ |
|
201 | $TESTTMP/hgcache/master/packs/ac19aff076286bebe3ff108c96c6445a0fe27c46.datapack | |
202 | $TESTTMP/hgcache/master/packs/repacklock |
|
202 | $TESTTMP/hgcache/master/packs/repacklock | |
203 | $TESTTMP/hgcache/repos |
|
203 | $TESTTMP/hgcache/repos | |
204 |
|
204 | |||
205 | # Ensure that file 'w' was prefetched - it was not part of the commit operation and therefore |
|
205 | # Ensure that file 'w' was prefetched - it was not part of the commit operation and therefore | |
206 | # could only be downloaded by the background prefetch |
|
206 | # could only be downloaded by the background prefetch | |
207 |
|
207 | |||
208 | $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1` |
|
208 | $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1` | |
209 |
$TESTTMP/hgcache/master/packs/ |
|
209 | $TESTTMP/hgcache/master/packs/ac19aff076286bebe3ff108c96c6445a0fe27c46: | |
210 | w: |
|
210 | w: | |
211 | Node Delta Base Delta Length Blob Size |
|
211 | Node Delta Base Delta Length Blob Size | |
212 | bb6ccd5dceaa 000000000000 2 2 |
|
212 | bb6ccd5dceaa 000000000000 2 2 | |
213 |
|
213 | |||
214 | Total: 2 2 (0.0% bigger) |
|
214 | Total: 2 2 (0.0% bigger) | |
215 | x: |
|
215 | x: | |
216 | Node Delta Base Delta Length Blob Size |
|
216 | Node Delta Base Delta Length Blob Size | |
217 | ef95c5376f34 000000000000 3 3 |
|
217 | ef95c5376f34 000000000000 3 3 | |
218 | 1406e7411862 ef95c5376f34 14 2 |
|
218 | 1406e7411862 ef95c5376f34 14 2 | |
219 |
|
219 | |||
220 | Total: 17 5 (240.0% bigger) |
|
220 | Total: 17 5 (240.0% bigger) | |
221 | y: |
|
221 | y: | |
222 | Node Delta Base Delta Length Blob Size |
|
222 | Node Delta Base Delta Length Blob Size | |
223 | 076f5e2225b3 000000000000 2 2 |
|
223 | 076f5e2225b3 000000000000 2 2 | |
224 |
|
224 | |||
225 | Total: 2 2 (0.0% bigger) |
|
225 | Total: 2 2 (0.0% bigger) | |
226 | z: |
|
226 | z: | |
227 | Node Delta Base Delta Length Blob Size |
|
227 | Node Delta Base Delta Length Blob Size | |
228 | 69a1b6752270 000000000000 2 2 |
|
228 | 69a1b6752270 000000000000 2 2 | |
229 |
|
229 | |||
230 | Total: 2 2 (0.0% bigger) |
|
230 | Total: 2 2 (0.0% bigger) | |
231 |
|
231 | |||
232 | # background prefetch with repack on rebase when wcprevset configured |
|
232 | # background prefetch with repack on rebase when wcprevset configured | |
233 |
|
233 | |||
234 | $ hg up -r 2 |
|
234 | $ hg up -r 2 | |
235 | 3 files updated, 0 files merged, 3 files removed, 0 files unresolved |
|
235 | 3 files updated, 0 files merged, 3 files removed, 0 files unresolved | |
236 | (leaving bookmark temporary) |
|
236 | (leaving bookmark temporary) | |
237 | $ clearcache |
|
237 | $ clearcache | |
238 | $ find $CACHEDIR -type f | sort |
|
238 | $ find $CACHEDIR -type f | sort | |
239 | $ hg rebase -s temporary -d foo |
|
239 | $ hg rebase -s temporary -d foo | |
240 | rebasing 3:58147a5b5242 "b" (temporary tip) |
|
240 | rebasing 3:58147a5b5242 "b" (temporary tip) | |
241 | saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/58147a5b5242-c3678817-rebase.hg (glob) |
|
241 | saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/58147a5b5242-c3678817-rebase.hg (glob) | |
242 | 3 files fetched over 1 fetches - (3 misses, 0.00% hit ratio) over *s (glob) |
|
242 | 3 files fetched over 1 fetches - (3 misses, 0.00% hit ratio) over *s (glob) | |
243 | $ sleep 1 |
|
243 | $ sleep 1 | |
244 | $ hg debugwaitonprefetch >/dev/null 2>%1 |
|
244 | $ hg debugwaitonprefetch >/dev/null 2>%1 | |
245 | $ sleep 1 |
|
245 | $ sleep 1 | |
246 | $ hg debugwaitonrepack >/dev/null 2>%1 |
|
246 | $ hg debugwaitonrepack >/dev/null 2>%1 | |
247 |
|
247 | |||
248 | # Ensure that file 'y' was prefetched - it was not part of the rebase operation and therefore |
|
248 | # Ensure that file 'y' was prefetched - it was not part of the rebase operation and therefore | |
249 | # could only be downloaded by the background prefetch |
|
249 | # could only be downloaded by the background prefetch | |
250 |
|
250 | |||
251 | $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1` |
|
251 | $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1` | |
252 |
$TESTTMP/hgcache/master/packs/ |
|
252 | $TESTTMP/hgcache/master/packs/ac19aff076286bebe3ff108c96c6445a0fe27c46: | |
253 | w: |
|
253 | w: | |
254 | Node Delta Base Delta Length Blob Size |
|
254 | Node Delta Base Delta Length Blob Size | |
255 | bb6ccd5dceaa 000000000000 2 2 |
|
255 | bb6ccd5dceaa 000000000000 2 2 | |
256 |
|
256 | |||
257 | Total: 2 2 (0.0% bigger) |
|
257 | Total: 2 2 (0.0% bigger) | |
258 | x: |
|
258 | x: | |
259 | Node Delta Base Delta Length Blob Size |
|
259 | Node Delta Base Delta Length Blob Size | |
260 | ef95c5376f34 000000000000 3 3 |
|
260 | ef95c5376f34 000000000000 3 3 | |
261 | 1406e7411862 ef95c5376f34 14 2 |
|
261 | 1406e7411862 ef95c5376f34 14 2 | |
262 |
|
262 | |||
263 | Total: 17 5 (240.0% bigger) |
|
263 | Total: 17 5 (240.0% bigger) | |
264 | y: |
|
264 | y: | |
265 | Node Delta Base Delta Length Blob Size |
|
265 | Node Delta Base Delta Length Blob Size | |
266 | 076f5e2225b3 000000000000 2 2 |
|
266 | 076f5e2225b3 000000000000 2 2 | |
267 |
|
267 | |||
268 | Total: 2 2 (0.0% bigger) |
|
268 | Total: 2 2 (0.0% bigger) | |
269 | z: |
|
269 | z: | |
270 | Node Delta Base Delta Length Blob Size |
|
270 | Node Delta Base Delta Length Blob Size | |
271 | 69a1b6752270 000000000000 2 2 |
|
271 | 69a1b6752270 000000000000 2 2 | |
272 |
|
272 | |||
273 | Total: 2 2 (0.0% bigger) |
|
273 | Total: 2 2 (0.0% bigger) | |
274 |
|
274 | |||
275 | # Check that foregound prefetch with no arguments blocks until background prefetches finish |
|
275 | # Check that foregound prefetch with no arguments blocks until background prefetches finish | |
276 |
|
276 | |||
277 | $ hg up -r 3 |
|
277 | $ hg up -r 3 | |
278 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
278 | 2 files updated, 0 files merged, 0 files removed, 0 files unresolved | |
279 | $ clearcache |
|
279 | $ clearcache | |
280 | $ hg prefetch --repack |
|
280 | $ hg prefetch --repack | |
281 | waiting for lock on prefetching in $TESTTMP/shallow held by process * on host * (glob) (?) |
|
281 | waiting for lock on prefetching in $TESTTMP/shallow held by process * on host * (glob) (?) | |
282 | got lock after * seconds (glob) (?) |
|
282 | got lock after * seconds (glob) (?) | |
283 | (running background incremental repack) |
|
283 | (running background incremental repack) | |
284 | * files fetched over 1 fetches - (* misses, 0.00% hit ratio) over *s (glob) (?) |
|
284 | * files fetched over 1 fetches - (* misses, 0.00% hit ratio) over *s (glob) (?) | |
285 |
|
285 | |||
286 | $ sleep 0.5 |
|
286 | $ sleep 0.5 | |
287 | $ hg debugwaitonrepack >/dev/null 2>%1 |
|
287 | $ hg debugwaitonrepack >/dev/null 2>%1 | |
288 |
|
288 | |||
289 | $ find $CACHEDIR -type f | sort |
|
289 | $ find $CACHEDIR -type f | sort | |
290 |
$TESTTMP/hgcache/master/packs/ |
|
290 | $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histidx | |
291 |
$TESTTMP/hgcache/master/packs/ |
|
291 | $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histpack | |
292 |
$TESTTMP/hgcache/master/packs/ |
|
292 | $TESTTMP/hgcache/master/packs/ac19aff076286bebe3ff108c96c6445a0fe27c46.dataidx | |
293 |
$TESTTMP/hgcache/master/packs/ |
|
293 | $TESTTMP/hgcache/master/packs/ac19aff076286bebe3ff108c96c6445a0fe27c46.datapack | |
294 | $TESTTMP/hgcache/master/packs/repacklock |
|
294 | $TESTTMP/hgcache/master/packs/repacklock | |
295 | $TESTTMP/hgcache/repos |
|
295 | $TESTTMP/hgcache/repos | |
296 |
|
296 | |||
297 | # Ensure that files were prefetched |
|
297 | # Ensure that files were prefetched | |
298 | $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1` |
|
298 | $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1` | |
299 |
$TESTTMP/hgcache/master/packs/ |
|
299 | $TESTTMP/hgcache/master/packs/ac19aff076286bebe3ff108c96c6445a0fe27c46: | |
300 | w: |
|
300 | w: | |
301 | Node Delta Base Delta Length Blob Size |
|
301 | Node Delta Base Delta Length Blob Size | |
302 | bb6ccd5dceaa 000000000000 2 2 |
|
302 | bb6ccd5dceaa 000000000000 2 2 | |
303 |
|
303 | |||
304 | Total: 2 2 (0.0% bigger) |
|
304 | Total: 2 2 (0.0% bigger) | |
305 | x: |
|
305 | x: | |
306 | Node Delta Base Delta Length Blob Size |
|
306 | Node Delta Base Delta Length Blob Size | |
307 | ef95c5376f34 000000000000 3 3 |
|
307 | ef95c5376f34 000000000000 3 3 | |
308 | 1406e7411862 ef95c5376f34 14 2 |
|
308 | 1406e7411862 ef95c5376f34 14 2 | |
309 |
|
309 | |||
310 | Total: 17 5 (240.0% bigger) |
|
310 | Total: 17 5 (240.0% bigger) | |
311 | y: |
|
311 | y: | |
312 | Node Delta Base Delta Length Blob Size |
|
312 | Node Delta Base Delta Length Blob Size | |
313 | 076f5e2225b3 000000000000 2 2 |
|
313 | 076f5e2225b3 000000000000 2 2 | |
314 |
|
314 | |||
315 | Total: 2 2 (0.0% bigger) |
|
315 | Total: 2 2 (0.0% bigger) | |
316 | z: |
|
316 | z: | |
317 | Node Delta Base Delta Length Blob Size |
|
317 | Node Delta Base Delta Length Blob Size | |
318 | 69a1b6752270 000000000000 2 2 |
|
318 | 69a1b6752270 000000000000 2 2 | |
319 |
|
319 | |||
320 | Total: 2 2 (0.0% bigger) |
|
320 | Total: 2 2 (0.0% bigger) | |
321 |
|
321 | |||
322 | # Check that foreground prefetch fetches revs specified by '. + draft() + bgprefetchrevs + pullprefetch' |
|
322 | # Check that foreground prefetch fetches revs specified by '. + draft() + bgprefetchrevs + pullprefetch' | |
323 |
|
323 | |||
324 | $ clearcache |
|
324 | $ clearcache | |
325 | $ hg prefetch --repack |
|
325 | $ hg prefetch --repack | |
326 | waiting for lock on prefetching in $TESTTMP/shallow held by process * on host * (glob) (?) |
|
326 | waiting for lock on prefetching in $TESTTMP/shallow held by process * on host * (glob) (?) | |
327 | got lock after * seconds (glob) (?) |
|
327 | got lock after * seconds (glob) (?) | |
328 | (running background incremental repack) |
|
328 | (running background incremental repack) | |
329 | * files fetched over 1 fetches - (* misses, 0.00% hit ratio) over *s (glob) (?) |
|
329 | * files fetched over 1 fetches - (* misses, 0.00% hit ratio) over *s (glob) (?) | |
330 | $ sleep 0.5 |
|
330 | $ sleep 0.5 | |
331 | $ hg debugwaitonrepack >/dev/null 2>%1 |
|
331 | $ hg debugwaitonrepack >/dev/null 2>%1 | |
332 |
|
332 | |||
333 | $ find $CACHEDIR -type f | sort |
|
333 | $ find $CACHEDIR -type f | sort | |
334 |
$TESTTMP/hgcache/master/packs/ |
|
334 | $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histidx | |
335 |
$TESTTMP/hgcache/master/packs/ |
|
335 | $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histpack | |
336 |
$TESTTMP/hgcache/master/packs/ |
|
336 | $TESTTMP/hgcache/master/packs/ac19aff076286bebe3ff108c96c6445a0fe27c46.dataidx | |
337 |
$TESTTMP/hgcache/master/packs/ |
|
337 | $TESTTMP/hgcache/master/packs/ac19aff076286bebe3ff108c96c6445a0fe27c46.datapack | |
338 | $TESTTMP/hgcache/master/packs/repacklock |
|
338 | $TESTTMP/hgcache/master/packs/repacklock | |
339 | $TESTTMP/hgcache/repos |
|
339 | $TESTTMP/hgcache/repos | |
340 |
|
340 | |||
341 | # Ensure that files were prefetched |
|
341 | # Ensure that files were prefetched | |
342 | $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1` |
|
342 | $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1` | |
343 |
$TESTTMP/hgcache/master/packs/ |
|
343 | $TESTTMP/hgcache/master/packs/ac19aff076286bebe3ff108c96c6445a0fe27c46: | |
344 | w: |
|
344 | w: | |
345 | Node Delta Base Delta Length Blob Size |
|
345 | Node Delta Base Delta Length Blob Size | |
346 | bb6ccd5dceaa 000000000000 2 2 |
|
346 | bb6ccd5dceaa 000000000000 2 2 | |
347 |
|
347 | |||
348 | Total: 2 2 (0.0% bigger) |
|
348 | Total: 2 2 (0.0% bigger) | |
349 | x: |
|
349 | x: | |
350 | Node Delta Base Delta Length Blob Size |
|
350 | Node Delta Base Delta Length Blob Size | |
351 | ef95c5376f34 000000000000 3 3 |
|
351 | ef95c5376f34 000000000000 3 3 | |
352 | 1406e7411862 ef95c5376f34 14 2 |
|
352 | 1406e7411862 ef95c5376f34 14 2 | |
353 |
|
353 | |||
354 | Total: 17 5 (240.0% bigger) |
|
354 | Total: 17 5 (240.0% bigger) | |
355 | y: |
|
355 | y: | |
356 | Node Delta Base Delta Length Blob Size |
|
356 | Node Delta Base Delta Length Blob Size | |
357 | 076f5e2225b3 000000000000 2 2 |
|
357 | 076f5e2225b3 000000000000 2 2 | |
358 |
|
358 | |||
359 | Total: 2 2 (0.0% bigger) |
|
359 | Total: 2 2 (0.0% bigger) | |
360 | z: |
|
360 | z: | |
361 | Node Delta Base Delta Length Blob Size |
|
361 | Node Delta Base Delta Length Blob Size | |
362 | 69a1b6752270 000000000000 2 2 |
|
362 | 69a1b6752270 000000000000 2 2 | |
363 |
|
363 | |||
364 | Total: 2 2 (0.0% bigger) |
|
364 | Total: 2 2 (0.0% bigger) | |
365 |
|
365 | |||
366 | # Test that if data was prefetched and repacked we dont need to prefetch it again |
|
366 | # Test that if data was prefetched and repacked we dont need to prefetch it again | |
367 | # It ensures that Mercurial looks not only in loose files but in packs as well |
|
367 | # It ensures that Mercurial looks not only in loose files but in packs as well | |
368 |
|
368 | |||
369 | $ hg prefetch --repack |
|
369 | $ hg prefetch --repack | |
370 | (running background incremental repack) |
|
370 | (running background incremental repack) |
@@ -1,388 +1,375 b'' | |||||
1 | #!/usr/bin/env python |
|
1 | #!/usr/bin/env python | |
2 | from __future__ import absolute_import, print_function |
|
2 | from __future__ import absolute_import, print_function | |
3 |
|
3 | |||
4 | import hashlib |
|
4 | import hashlib | |
5 | import os |
|
5 | import os | |
6 | import random |
|
6 | import random | |
7 | import shutil |
|
7 | import shutil | |
8 | import stat |
|
8 | import stat | |
9 | import struct |
|
9 | import struct | |
10 | import sys |
|
10 | import sys | |
11 | import tempfile |
|
11 | import tempfile | |
12 | import time |
|
12 | import time | |
13 | import unittest |
|
13 | import unittest | |
14 |
|
14 | |||
15 | import silenttestrunner |
|
15 | import silenttestrunner | |
16 |
|
16 | |||
17 | # Load the local remotefilelog, not the system one |
|
17 | # Load the local remotefilelog, not the system one | |
18 | sys.path[0:0] = [os.path.join(os.path.dirname(__file__), '..')] |
|
18 | sys.path[0:0] = [os.path.join(os.path.dirname(__file__), '..')] | |
19 | from mercurial.node import nullid |
|
19 | from mercurial.node import nullid | |
20 | from mercurial import ( |
|
20 | from mercurial import ( | |
21 | ui as uimod, |
|
21 | ui as uimod, | |
22 | ) |
|
22 | ) | |
23 | from hgext.remotefilelog import ( |
|
23 | from hgext.remotefilelog import ( | |
24 | basepack, |
|
24 | basepack, | |
25 | constants, |
|
25 | constants, | |
26 | datapack, |
|
26 | datapack, | |
27 | ) |
|
27 | ) | |
28 |
|
28 | |||
29 | class datapacktestsbase(object): |
|
29 | class datapacktestsbase(object): | |
30 | def __init__(self, datapackreader, paramsavailable): |
|
30 | def __init__(self, datapackreader, paramsavailable): | |
31 | self.datapackreader = datapackreader |
|
31 | self.datapackreader = datapackreader | |
32 | self.paramsavailable = paramsavailable |
|
32 | self.paramsavailable = paramsavailable | |
33 |
|
33 | |||
34 | def setUp(self): |
|
34 | def setUp(self): | |
35 | self.tempdirs = [] |
|
35 | self.tempdirs = [] | |
36 |
|
36 | |||
37 | def tearDown(self): |
|
37 | def tearDown(self): | |
38 | for d in self.tempdirs: |
|
38 | for d in self.tempdirs: | |
39 | shutil.rmtree(d) |
|
39 | shutil.rmtree(d) | |
40 |
|
40 | |||
41 | def makeTempDir(self): |
|
41 | def makeTempDir(self): | |
42 | tempdir = tempfile.mkdtemp() |
|
42 | tempdir = tempfile.mkdtemp() | |
43 | self.tempdirs.append(tempdir) |
|
43 | self.tempdirs.append(tempdir) | |
44 | return tempdir |
|
44 | return tempdir | |
45 |
|
45 | |||
46 | def getHash(self, content): |
|
46 | def getHash(self, content): | |
47 | return hashlib.sha1(content).digest() |
|
47 | return hashlib.sha1(content).digest() | |
48 |
|
48 | |||
49 | def getFakeHash(self): |
|
49 | def getFakeHash(self): | |
50 | return ''.join(chr(random.randint(0, 255)) for _ in range(20)) |
|
50 | return ''.join(chr(random.randint(0, 255)) for _ in range(20)) | |
51 |
|
51 | |||
52 |
def createPack(self, revisions=None, packdir=None |
|
52 | def createPack(self, revisions=None, packdir=None): | |
53 | if revisions is None: |
|
53 | if revisions is None: | |
54 | revisions = [("filename", self.getFakeHash(), nullid, "content")] |
|
54 | revisions = [("filename", self.getFakeHash(), nullid, "content")] | |
55 |
|
55 | |||
56 | if packdir is None: |
|
56 | if packdir is None: | |
57 | packdir = self.makeTempDir() |
|
57 | packdir = self.makeTempDir() | |
58 |
|
58 | |||
59 | packer = datapack.mutabledatapack( |
|
59 | packer = datapack.mutabledatapack(uimod.ui(), packdir, version=2) | |
60 | uimod.ui(), packdir, version=version) |
|
|||
61 |
|
60 | |||
62 | for args in revisions: |
|
61 | for args in revisions: | |
63 | filename, node, base, content = args[0:4] |
|
62 | filename, node, base, content = args[0:4] | |
64 | # meta is optional |
|
63 | # meta is optional | |
65 | meta = None |
|
64 | meta = None | |
66 | if len(args) > 4: |
|
65 | if len(args) > 4: | |
67 | meta = args[4] |
|
66 | meta = args[4] | |
68 | packer.add(filename, node, base, content, metadata=meta) |
|
67 | packer.add(filename, node, base, content, metadata=meta) | |
69 |
|
68 | |||
70 | path = packer.close() |
|
69 | path = packer.close() | |
71 | return self.datapackreader(path) |
|
70 | return self.datapackreader(path) | |
72 |
|
71 | |||
73 | def _testAddSingle(self, content): |
|
72 | def _testAddSingle(self, content): | |
74 | """Test putting a simple blob into a pack and reading it out. |
|
73 | """Test putting a simple blob into a pack and reading it out. | |
75 | """ |
|
74 | """ | |
76 | filename = "foo" |
|
75 | filename = "foo" | |
77 | node = self.getHash(content) |
|
76 | node = self.getHash(content) | |
78 |
|
77 | |||
79 | revisions = [(filename, node, nullid, content)] |
|
78 | revisions = [(filename, node, nullid, content)] | |
80 | pack = self.createPack(revisions) |
|
79 | pack = self.createPack(revisions) | |
81 | if self.paramsavailable: |
|
80 | if self.paramsavailable: | |
82 | self.assertEquals(pack.params.fanoutprefix, |
|
81 | self.assertEquals(pack.params.fanoutprefix, | |
83 | basepack.SMALLFANOUTPREFIX) |
|
82 | basepack.SMALLFANOUTPREFIX) | |
84 |
|
83 | |||
85 | chain = pack.getdeltachain(filename, node) |
|
84 | chain = pack.getdeltachain(filename, node) | |
86 | self.assertEquals(content, chain[0][4]) |
|
85 | self.assertEquals(content, chain[0][4]) | |
87 |
|
86 | |||
88 | def testAddSingle(self): |
|
87 | def testAddSingle(self): | |
89 | self._testAddSingle('') |
|
88 | self._testAddSingle('') | |
90 |
|
89 | |||
91 | def testAddSingleEmpty(self): |
|
90 | def testAddSingleEmpty(self): | |
92 | self._testAddSingle('abcdef') |
|
91 | self._testAddSingle('abcdef') | |
93 |
|
92 | |||
94 | def testAddMultiple(self): |
|
93 | def testAddMultiple(self): | |
95 | """Test putting multiple unrelated blobs into a pack and reading them |
|
94 | """Test putting multiple unrelated blobs into a pack and reading them | |
96 | out. |
|
95 | out. | |
97 | """ |
|
96 | """ | |
98 | revisions = [] |
|
97 | revisions = [] | |
99 | for i in range(10): |
|
98 | for i in range(10): | |
100 | filename = "foo%s" % i |
|
99 | filename = "foo%s" % i | |
101 | content = "abcdef%s" % i |
|
100 | content = "abcdef%s" % i | |
102 | node = self.getHash(content) |
|
101 | node = self.getHash(content) | |
103 | revisions.append((filename, node, self.getFakeHash(), content)) |
|
102 | revisions.append((filename, node, self.getFakeHash(), content)) | |
104 |
|
103 | |||
105 | pack = self.createPack(revisions) |
|
104 | pack = self.createPack(revisions) | |
106 |
|
105 | |||
107 | for filename, node, base, content in revisions: |
|
106 | for filename, node, base, content in revisions: | |
108 | entry = pack.getdelta(filename, node) |
|
107 | entry = pack.getdelta(filename, node) | |
109 | self.assertEquals((content, filename, base, {}), entry) |
|
108 | self.assertEquals((content, filename, base, {}), entry) | |
110 |
|
109 | |||
111 | chain = pack.getdeltachain(filename, node) |
|
110 | chain = pack.getdeltachain(filename, node) | |
112 | self.assertEquals(content, chain[0][4]) |
|
111 | self.assertEquals(content, chain[0][4]) | |
113 |
|
112 | |||
114 | def testAddDeltas(self): |
|
113 | def testAddDeltas(self): | |
115 | """Test putting multiple delta blobs into a pack and read the chain. |
|
114 | """Test putting multiple delta blobs into a pack and read the chain. | |
116 | """ |
|
115 | """ | |
117 | revisions = [] |
|
116 | revisions = [] | |
118 | filename = "foo" |
|
117 | filename = "foo" | |
119 | lastnode = nullid |
|
118 | lastnode = nullid | |
120 | for i in range(10): |
|
119 | for i in range(10): | |
121 | content = "abcdef%s" % i |
|
120 | content = "abcdef%s" % i | |
122 | node = self.getHash(content) |
|
121 | node = self.getHash(content) | |
123 | revisions.append((filename, node, lastnode, content)) |
|
122 | revisions.append((filename, node, lastnode, content)) | |
124 | lastnode = node |
|
123 | lastnode = node | |
125 |
|
124 | |||
126 | pack = self.createPack(revisions) |
|
125 | pack = self.createPack(revisions) | |
127 |
|
126 | |||
128 | entry = pack.getdelta(filename, revisions[0][1]) |
|
127 | entry = pack.getdelta(filename, revisions[0][1]) | |
129 | realvalue = (revisions[0][3], filename, revisions[0][2], {}) |
|
128 | realvalue = (revisions[0][3], filename, revisions[0][2], {}) | |
130 | self.assertEquals(entry, realvalue) |
|
129 | self.assertEquals(entry, realvalue) | |
131 |
|
130 | |||
132 | # Test that the chain for the final entry has all the others |
|
131 | # Test that the chain for the final entry has all the others | |
133 | chain = pack.getdeltachain(filename, node) |
|
132 | chain = pack.getdeltachain(filename, node) | |
134 | for i in range(10): |
|
133 | for i in range(10): | |
135 | content = "abcdef%s" % i |
|
134 | content = "abcdef%s" % i | |
136 | self.assertEquals(content, chain[-i - 1][4]) |
|
135 | self.assertEquals(content, chain[-i - 1][4]) | |
137 |
|
136 | |||
138 | def testPackMany(self): |
|
137 | def testPackMany(self): | |
139 | """Pack many related and unrelated objects. |
|
138 | """Pack many related and unrelated objects. | |
140 | """ |
|
139 | """ | |
141 | # Build a random pack file |
|
140 | # Build a random pack file | |
142 | revisions = [] |
|
141 | revisions = [] | |
143 | blobs = {} |
|
142 | blobs = {} | |
144 | random.seed(0) |
|
143 | random.seed(0) | |
145 | for i in range(100): |
|
144 | for i in range(100): | |
146 | filename = "filename-%s" % i |
|
145 | filename = "filename-%s" % i | |
147 | filerevs = [] |
|
146 | filerevs = [] | |
148 | for j in range(random.randint(1, 100)): |
|
147 | for j in range(random.randint(1, 100)): | |
149 | content = "content-%s" % j |
|
148 | content = "content-%s" % j | |
150 | node = self.getHash(content) |
|
149 | node = self.getHash(content) | |
151 | lastnode = nullid |
|
150 | lastnode = nullid | |
152 | if len(filerevs) > 0: |
|
151 | if len(filerevs) > 0: | |
153 | lastnode = filerevs[random.randint(0, len(filerevs) - 1)] |
|
152 | lastnode = filerevs[random.randint(0, len(filerevs) - 1)] | |
154 | filerevs.append(node) |
|
153 | filerevs.append(node) | |
155 | blobs[(filename, node, lastnode)] = content |
|
154 | blobs[(filename, node, lastnode)] = content | |
156 | revisions.append((filename, node, lastnode, content)) |
|
155 | revisions.append((filename, node, lastnode, content)) | |
157 |
|
156 | |||
158 | pack = self.createPack(revisions) |
|
157 | pack = self.createPack(revisions) | |
159 |
|
158 | |||
160 | # Verify the pack contents |
|
159 | # Verify the pack contents | |
161 | for (filename, node, lastnode), content in sorted(blobs.iteritems()): |
|
160 | for (filename, node, lastnode), content in sorted(blobs.iteritems()): | |
162 | chain = pack.getdeltachain(filename, node) |
|
161 | chain = pack.getdeltachain(filename, node) | |
163 | for entry in chain: |
|
162 | for entry in chain: | |
164 | expectedcontent = blobs[(entry[0], entry[1], entry[3])] |
|
163 | expectedcontent = blobs[(entry[0], entry[1], entry[3])] | |
165 | self.assertEquals(entry[4], expectedcontent) |
|
164 | self.assertEquals(entry[4], expectedcontent) | |
166 |
|
165 | |||
167 | def testPackMetadata(self): |
|
166 | def testPackMetadata(self): | |
168 | revisions = [] |
|
167 | revisions = [] | |
169 | for i in range(100): |
|
168 | for i in range(100): | |
170 | filename = '%s.txt' % i |
|
169 | filename = '%s.txt' % i | |
171 | content = 'put-something-here \n' * i |
|
170 | content = 'put-something-here \n' * i | |
172 | node = self.getHash(content) |
|
171 | node = self.getHash(content) | |
173 | meta = {constants.METAKEYFLAG: i ** 4, |
|
172 | meta = {constants.METAKEYFLAG: i ** 4, | |
174 | constants.METAKEYSIZE: len(content), |
|
173 | constants.METAKEYSIZE: len(content), | |
175 | 'Z': 'random_string', |
|
174 | 'Z': 'random_string', | |
176 | '_': '\0' * i} |
|
175 | '_': '\0' * i} | |
177 | revisions.append((filename, node, nullid, content, meta)) |
|
176 | revisions.append((filename, node, nullid, content, meta)) | |
178 |
pack = self.createPack(revisions |
|
177 | pack = self.createPack(revisions) | |
179 | for name, node, x, content, origmeta in revisions: |
|
178 | for name, node, x, content, origmeta in revisions: | |
180 | parsedmeta = pack.getmeta(name, node) |
|
179 | parsedmeta = pack.getmeta(name, node) | |
181 | # flag == 0 should be optimized out |
|
180 | # flag == 0 should be optimized out | |
182 | if origmeta[constants.METAKEYFLAG] == 0: |
|
181 | if origmeta[constants.METAKEYFLAG] == 0: | |
183 | del origmeta[constants.METAKEYFLAG] |
|
182 | del origmeta[constants.METAKEYFLAG] | |
184 | self.assertEquals(parsedmeta, origmeta) |
|
183 | self.assertEquals(parsedmeta, origmeta) | |
185 |
|
184 | |||
186 | def testPackMetadataThrows(self): |
|
|||
187 | filename = '1' |
|
|||
188 | content = '2' |
|
|||
189 | node = self.getHash(content) |
|
|||
190 | meta = {constants.METAKEYFLAG: 3} |
|
|||
191 | revisions = [(filename, node, nullid, content, meta)] |
|
|||
192 | try: |
|
|||
193 | self.createPack(revisions, version=0) |
|
|||
194 | self.assertTrue(False, "should throw if metadata is not supported") |
|
|||
195 | except RuntimeError: |
|
|||
196 | pass |
|
|||
197 |
|
||||
198 | def testGetMissing(self): |
|
185 | def testGetMissing(self): | |
199 | """Test the getmissing() api. |
|
186 | """Test the getmissing() api. | |
200 | """ |
|
187 | """ | |
201 | revisions = [] |
|
188 | revisions = [] | |
202 | filename = "foo" |
|
189 | filename = "foo" | |
203 | lastnode = nullid |
|
190 | lastnode = nullid | |
204 | for i in range(10): |
|
191 | for i in range(10): | |
205 | content = "abcdef%s" % i |
|
192 | content = "abcdef%s" % i | |
206 | node = self.getHash(content) |
|
193 | node = self.getHash(content) | |
207 | revisions.append((filename, node, lastnode, content)) |
|
194 | revisions.append((filename, node, lastnode, content)) | |
208 | lastnode = node |
|
195 | lastnode = node | |
209 |
|
196 | |||
210 | pack = self.createPack(revisions) |
|
197 | pack = self.createPack(revisions) | |
211 |
|
198 | |||
212 | missing = pack.getmissing([("foo", revisions[0][1])]) |
|
199 | missing = pack.getmissing([("foo", revisions[0][1])]) | |
213 | self.assertFalse(missing) |
|
200 | self.assertFalse(missing) | |
214 |
|
201 | |||
215 | missing = pack.getmissing([("foo", revisions[0][1]), |
|
202 | missing = pack.getmissing([("foo", revisions[0][1]), | |
216 | ("foo", revisions[1][1])]) |
|
203 | ("foo", revisions[1][1])]) | |
217 | self.assertFalse(missing) |
|
204 | self.assertFalse(missing) | |
218 |
|
205 | |||
219 | fakenode = self.getFakeHash() |
|
206 | fakenode = self.getFakeHash() | |
220 | missing = pack.getmissing([("foo", revisions[0][1]), ("foo", fakenode)]) |
|
207 | missing = pack.getmissing([("foo", revisions[0][1]), ("foo", fakenode)]) | |
221 | self.assertEquals(missing, [("foo", fakenode)]) |
|
208 | self.assertEquals(missing, [("foo", fakenode)]) | |
222 |
|
209 | |||
223 | def testAddThrows(self): |
|
210 | def testAddThrows(self): | |
224 | pack = self.createPack() |
|
211 | pack = self.createPack() | |
225 |
|
212 | |||
226 | try: |
|
213 | try: | |
227 | pack.add('filename', nullid, 'contents') |
|
214 | pack.add('filename', nullid, 'contents') | |
228 | self.assertTrue(False, "datapack.add should throw") |
|
215 | self.assertTrue(False, "datapack.add should throw") | |
229 | except RuntimeError: |
|
216 | except RuntimeError: | |
230 | pass |
|
217 | pass | |
231 |
|
218 | |||
232 | def testBadVersionThrows(self): |
|
219 | def testBadVersionThrows(self): | |
233 | pack = self.createPack() |
|
220 | pack = self.createPack() | |
234 | path = pack.path + '.datapack' |
|
221 | path = pack.path + '.datapack' | |
235 | with open(path) as f: |
|
222 | with open(path) as f: | |
236 | raw = f.read() |
|
223 | raw = f.read() | |
237 | raw = struct.pack('!B', 255) + raw[1:] |
|
224 | raw = struct.pack('!B', 255) + raw[1:] | |
238 | os.chmod(path, os.stat(path).st_mode | stat.S_IWRITE) |
|
225 | os.chmod(path, os.stat(path).st_mode | stat.S_IWRITE) | |
239 | with open(path, 'w+') as f: |
|
226 | with open(path, 'w+') as f: | |
240 | f.write(raw) |
|
227 | f.write(raw) | |
241 |
|
228 | |||
242 | try: |
|
229 | try: | |
243 | pack = self.datapackreader(pack.path) |
|
230 | pack = self.datapackreader(pack.path) | |
244 | self.assertTrue(False, "bad version number should have thrown") |
|
231 | self.assertTrue(False, "bad version number should have thrown") | |
245 | except RuntimeError: |
|
232 | except RuntimeError: | |
246 | pass |
|
233 | pass | |
247 |
|
234 | |||
248 | def testMissingDeltabase(self): |
|
235 | def testMissingDeltabase(self): | |
249 | fakenode = self.getFakeHash() |
|
236 | fakenode = self.getFakeHash() | |
250 | revisions = [("filename", fakenode, self.getFakeHash(), "content")] |
|
237 | revisions = [("filename", fakenode, self.getFakeHash(), "content")] | |
251 | pack = self.createPack(revisions) |
|
238 | pack = self.createPack(revisions) | |
252 | chain = pack.getdeltachain("filename", fakenode) |
|
239 | chain = pack.getdeltachain("filename", fakenode) | |
253 | self.assertEquals(len(chain), 1) |
|
240 | self.assertEquals(len(chain), 1) | |
254 |
|
241 | |||
255 | def testLargePack(self): |
|
242 | def testLargePack(self): | |
256 | """Test creating and reading from a large pack with over X entries. |
|
243 | """Test creating and reading from a large pack with over X entries. | |
257 | This causes it to use a 2^16 fanout table instead.""" |
|
244 | This causes it to use a 2^16 fanout table instead.""" | |
258 | revisions = [] |
|
245 | revisions = [] | |
259 | blobs = {} |
|
246 | blobs = {} | |
260 | total = basepack.SMALLFANOUTCUTOFF + 1 |
|
247 | total = basepack.SMALLFANOUTCUTOFF + 1 | |
261 | for i in xrange(total): |
|
248 | for i in xrange(total): | |
262 | filename = "filename-%s" % i |
|
249 | filename = "filename-%s" % i | |
263 | content = filename |
|
250 | content = filename | |
264 | node = self.getHash(content) |
|
251 | node = self.getHash(content) | |
265 | blobs[(filename, node)] = content |
|
252 | blobs[(filename, node)] = content | |
266 | revisions.append((filename, node, nullid, content)) |
|
253 | revisions.append((filename, node, nullid, content)) | |
267 |
|
254 | |||
268 | pack = self.createPack(revisions) |
|
255 | pack = self.createPack(revisions) | |
269 | if self.paramsavailable: |
|
256 | if self.paramsavailable: | |
270 | self.assertEquals(pack.params.fanoutprefix, |
|
257 | self.assertEquals(pack.params.fanoutprefix, | |
271 | basepack.LARGEFANOUTPREFIX) |
|
258 | basepack.LARGEFANOUTPREFIX) | |
272 |
|
259 | |||
273 | for (filename, node), content in blobs.iteritems(): |
|
260 | for (filename, node), content in blobs.iteritems(): | |
274 | actualcontent = pack.getdeltachain(filename, node)[0][4] |
|
261 | actualcontent = pack.getdeltachain(filename, node)[0][4] | |
275 | self.assertEquals(actualcontent, content) |
|
262 | self.assertEquals(actualcontent, content) | |
276 |
|
263 | |||
277 | def testPacksCache(self): |
|
264 | def testPacksCache(self): | |
278 | """Test that we remember the most recent packs while fetching the delta |
|
265 | """Test that we remember the most recent packs while fetching the delta | |
279 | chain.""" |
|
266 | chain.""" | |
280 |
|
267 | |||
281 | packdir = self.makeTempDir() |
|
268 | packdir = self.makeTempDir() | |
282 | deltachains = [] |
|
269 | deltachains = [] | |
283 |
|
270 | |||
284 | numpacks = 10 |
|
271 | numpacks = 10 | |
285 | revisionsperpack = 100 |
|
272 | revisionsperpack = 100 | |
286 |
|
273 | |||
287 | for i in range(numpacks): |
|
274 | for i in range(numpacks): | |
288 | chain = [] |
|
275 | chain = [] | |
289 | revision = (str(i), self.getFakeHash(), nullid, "content") |
|
276 | revision = (str(i), self.getFakeHash(), nullid, "content") | |
290 |
|
277 | |||
291 | for _ in range(revisionsperpack): |
|
278 | for _ in range(revisionsperpack): | |
292 | chain.append(revision) |
|
279 | chain.append(revision) | |
293 | revision = ( |
|
280 | revision = ( | |
294 | str(i), |
|
281 | str(i), | |
295 | self.getFakeHash(), |
|
282 | self.getFakeHash(), | |
296 | revision[1], |
|
283 | revision[1], | |
297 | self.getFakeHash() |
|
284 | self.getFakeHash() | |
298 | ) |
|
285 | ) | |
299 |
|
286 | |||
300 | self.createPack(chain, packdir) |
|
287 | self.createPack(chain, packdir) | |
301 | deltachains.append(chain) |
|
288 | deltachains.append(chain) | |
302 |
|
289 | |||
303 | class testdatapackstore(datapack.datapackstore): |
|
290 | class testdatapackstore(datapack.datapackstore): | |
304 | # Ensures that we are not keeping everything in the cache. |
|
291 | # Ensures that we are not keeping everything in the cache. | |
305 | DEFAULTCACHESIZE = numpacks / 2 |
|
292 | DEFAULTCACHESIZE = numpacks / 2 | |
306 |
|
293 | |||
307 | store = testdatapackstore(uimod.ui(), packdir) |
|
294 | store = testdatapackstore(uimod.ui(), packdir) | |
308 |
|
295 | |||
309 | random.shuffle(deltachains) |
|
296 | random.shuffle(deltachains) | |
310 | for randomchain in deltachains: |
|
297 | for randomchain in deltachains: | |
311 | revision = random.choice(randomchain) |
|
298 | revision = random.choice(randomchain) | |
312 | chain = store.getdeltachain(revision[0], revision[1]) |
|
299 | chain = store.getdeltachain(revision[0], revision[1]) | |
313 |
|
300 | |||
314 | mostrecentpack = next(iter(store.packs), None) |
|
301 | mostrecentpack = next(iter(store.packs), None) | |
315 | self.assertEquals( |
|
302 | self.assertEquals( | |
316 | mostrecentpack.getdeltachain(revision[0], revision[1]), |
|
303 | mostrecentpack.getdeltachain(revision[0], revision[1]), | |
317 | chain |
|
304 | chain | |
318 | ) |
|
305 | ) | |
319 |
|
306 | |||
320 | self.assertEquals(randomchain.index(revision) + 1, len(chain)) |
|
307 | self.assertEquals(randomchain.index(revision) + 1, len(chain)) | |
321 |
|
308 | |||
322 | # perf test off by default since it's slow |
|
309 | # perf test off by default since it's slow | |
323 | def _testIndexPerf(self): |
|
310 | def _testIndexPerf(self): | |
324 | random.seed(0) |
|
311 | random.seed(0) | |
325 | print("Multi-get perf test") |
|
312 | print("Multi-get perf test") | |
326 | packsizes = [ |
|
313 | packsizes = [ | |
327 | 100, |
|
314 | 100, | |
328 | 10000, |
|
315 | 10000, | |
329 | 100000, |
|
316 | 100000, | |
330 | 500000, |
|
317 | 500000, | |
331 | 1000000, |
|
318 | 1000000, | |
332 | 3000000, |
|
319 | 3000000, | |
333 | ] |
|
320 | ] | |
334 | lookupsizes = [ |
|
321 | lookupsizes = [ | |
335 | 10, |
|
322 | 10, | |
336 | 100, |
|
323 | 100, | |
337 | 1000, |
|
324 | 1000, | |
338 | 10000, |
|
325 | 10000, | |
339 | 100000, |
|
326 | 100000, | |
340 | 1000000, |
|
327 | 1000000, | |
341 | ] |
|
328 | ] | |
342 | for packsize in packsizes: |
|
329 | for packsize in packsizes: | |
343 | revisions = [] |
|
330 | revisions = [] | |
344 | for i in xrange(packsize): |
|
331 | for i in xrange(packsize): | |
345 | filename = "filename-%s" % i |
|
332 | filename = "filename-%s" % i | |
346 | content = "content-%s" % i |
|
333 | content = "content-%s" % i | |
347 | node = self.getHash(content) |
|
334 | node = self.getHash(content) | |
348 | revisions.append((filename, node, nullid, content)) |
|
335 | revisions.append((filename, node, nullid, content)) | |
349 |
|
336 | |||
350 | path = self.createPack(revisions).path |
|
337 | path = self.createPack(revisions).path | |
351 |
|
338 | |||
352 | # Perf of large multi-get |
|
339 | # Perf of large multi-get | |
353 | import gc |
|
340 | import gc | |
354 | gc.disable() |
|
341 | gc.disable() | |
355 | pack = self.datapackreader(path) |
|
342 | pack = self.datapackreader(path) | |
356 | for lookupsize in lookupsizes: |
|
343 | for lookupsize in lookupsizes: | |
357 | if lookupsize > packsize: |
|
344 | if lookupsize > packsize: | |
358 | continue |
|
345 | continue | |
359 | random.shuffle(revisions) |
|
346 | random.shuffle(revisions) | |
360 | findnodes = [(rev[0], rev[1]) for rev in revisions] |
|
347 | findnodes = [(rev[0], rev[1]) for rev in revisions] | |
361 |
|
348 | |||
362 | start = time.time() |
|
349 | start = time.time() | |
363 | pack.getmissing(findnodes[:lookupsize]) |
|
350 | pack.getmissing(findnodes[:lookupsize]) | |
364 | elapsed = time.time() - start |
|
351 | elapsed = time.time() - start | |
365 | print ("%s pack %s lookups = %0.04f" % |
|
352 | print ("%s pack %s lookups = %0.04f" % | |
366 | (('%s' % packsize).rjust(7), |
|
353 | (('%s' % packsize).rjust(7), | |
367 | ('%s' % lookupsize).rjust(7), |
|
354 | ('%s' % lookupsize).rjust(7), | |
368 | elapsed)) |
|
355 | elapsed)) | |
369 |
|
356 | |||
370 | print("") |
|
357 | print("") | |
371 | gc.enable() |
|
358 | gc.enable() | |
372 |
|
359 | |||
373 | # The perf test is meant to produce output, so we always fail the test |
|
360 | # The perf test is meant to produce output, so we always fail the test | |
374 | # so the user sees the output. |
|
361 | # so the user sees the output. | |
375 | raise RuntimeError("perf test always fails") |
|
362 | raise RuntimeError("perf test always fails") | |
376 |
|
363 | |||
377 | class datapacktests(datapacktestsbase, unittest.TestCase): |
|
364 | class datapacktests(datapacktestsbase, unittest.TestCase): | |
378 | def __init__(self, *args, **kwargs): |
|
365 | def __init__(self, *args, **kwargs): | |
379 | datapacktestsbase.__init__(self, datapack.datapack, True) |
|
366 | datapacktestsbase.__init__(self, datapack.datapack, True) | |
380 | unittest.TestCase.__init__(self, *args, **kwargs) |
|
367 | unittest.TestCase.__init__(self, *args, **kwargs) | |
381 |
|
368 | |||
382 | # TODO: |
|
369 | # TODO: | |
383 | # datapack store: |
|
370 | # datapack store: | |
384 | # - getmissing |
|
371 | # - getmissing | |
385 | # - GC two packs into one |
|
372 | # - GC two packs into one | |
386 |
|
373 | |||
387 | if __name__ == '__main__': |
|
374 | if __name__ == '__main__': | |
388 | silenttestrunner.main(__name__) |
|
375 | silenttestrunner.main(__name__) |
@@ -1,113 +1,113 b'' | |||||
1 | $ PYTHONPATH=$TESTDIR/..:$PYTHONPATH |
|
1 | $ PYTHONPATH=$TESTDIR/..:$PYTHONPATH | |
2 | $ export PYTHONPATH |
|
2 | $ export PYTHONPATH | |
3 |
|
3 | |||
4 | $ . "$TESTDIR/remotefilelog-library.sh" |
|
4 | $ . "$TESTDIR/remotefilelog-library.sh" | |
5 |
|
5 | |||
6 | $ hginit master |
|
6 | $ hginit master | |
7 | $ cd master |
|
7 | $ cd master | |
8 | $ cat >> .hg/hgrc <<EOF |
|
8 | $ cat >> .hg/hgrc <<EOF | |
9 | > [remotefilelog] |
|
9 | > [remotefilelog] | |
10 | > server=True |
|
10 | > server=True | |
11 | > serverexpiration=-1 |
|
11 | > serverexpiration=-1 | |
12 | > EOF |
|
12 | > EOF | |
13 | $ echo x > x |
|
13 | $ echo x > x | |
14 | $ hg commit -qAm x |
|
14 | $ hg commit -qAm x | |
15 | $ cd .. |
|
15 | $ cd .. | |
16 |
|
16 | |||
17 | $ hgcloneshallow ssh://user@dummy/master shallow -q |
|
17 | $ hgcloneshallow ssh://user@dummy/master shallow -q | |
18 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob) |
|
18 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob) | |
19 |
|
19 | |||
20 | # Set the prefetchdays config to zero so that all commits are prefetched |
|
20 | # Set the prefetchdays config to zero so that all commits are prefetched | |
21 | # no matter what their creation date is. |
|
21 | # no matter what their creation date is. | |
22 | $ cd shallow |
|
22 | $ cd shallow | |
23 | $ cat >> .hg/hgrc <<EOF |
|
23 | $ cat >> .hg/hgrc <<EOF | |
24 | > [remotefilelog] |
|
24 | > [remotefilelog] | |
25 | > prefetchdays=0 |
|
25 | > prefetchdays=0 | |
26 | > EOF |
|
26 | > EOF | |
27 | $ cd .. |
|
27 | $ cd .. | |
28 |
|
28 | |||
29 | # commit a new version of x so we can gc the old one |
|
29 | # commit a new version of x so we can gc the old one | |
30 |
|
30 | |||
31 | $ cd master |
|
31 | $ cd master | |
32 | $ echo y > x |
|
32 | $ echo y > x | |
33 | $ hg commit -qAm y |
|
33 | $ hg commit -qAm y | |
34 | $ cd .. |
|
34 | $ cd .. | |
35 |
|
35 | |||
36 | $ cd shallow |
|
36 | $ cd shallow | |
37 | $ hg pull -q |
|
37 | $ hg pull -q | |
38 | $ hg update -q |
|
38 | $ hg update -q | |
39 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob) |
|
39 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob) | |
40 | $ cd .. |
|
40 | $ cd .. | |
41 |
|
41 | |||
42 | # gc client cache |
|
42 | # gc client cache | |
43 |
|
43 | |||
44 | $ lastweek=`$PYTHON -c 'import datetime,time; print(datetime.datetime.fromtimestamp(time.time() - (86400 * 7)).strftime("%y%m%d%H%M"))'` |
|
44 | $ lastweek=`$PYTHON -c 'import datetime,time; print(datetime.datetime.fromtimestamp(time.time() - (86400 * 7)).strftime("%y%m%d%H%M"))'` | |
45 | $ find $CACHEDIR -type f -exec touch -t $lastweek {} \; |
|
45 | $ find $CACHEDIR -type f -exec touch -t $lastweek {} \; | |
46 |
|
46 | |||
47 | $ find $CACHEDIR -type f | sort |
|
47 | $ find $CACHEDIR -type f | sort | |
48 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 (glob) |
|
48 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 (glob) | |
49 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob) |
|
49 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob) | |
50 | $TESTTMP/hgcache/repos (glob) |
|
50 | $TESTTMP/hgcache/repos (glob) | |
51 | $ hg gc |
|
51 | $ hg gc | |
52 | finished: removed 1 of 2 files (0.00 GB to 0.00 GB) |
|
52 | finished: removed 1 of 2 files (0.00 GB to 0.00 GB) | |
53 | $ find $CACHEDIR -type f | sort |
|
53 | $ find $CACHEDIR -type f | sort | |
54 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob) |
|
54 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob) | |
55 | $TESTTMP/hgcache/repos |
|
55 | $TESTTMP/hgcache/repos | |
56 |
|
56 | |||
57 | # gc server cache |
|
57 | # gc server cache | |
58 |
|
58 | |||
59 | $ find master/.hg/remotefilelogcache -type f | sort |
|
59 | $ find master/.hg/remotefilelogcache -type f | sort | |
60 | master/.hg/remotefilelogcache/x/1406e74118627694268417491f018a4a883152f0 (glob) |
|
60 | master/.hg/remotefilelogcache/x/1406e74118627694268417491f018a4a883152f0 (glob) | |
61 | master/.hg/remotefilelogcache/x/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob) |
|
61 | master/.hg/remotefilelogcache/x/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob) | |
62 | $ hg gc master |
|
62 | $ hg gc master | |
63 | finished: removed 0 of 1 files (0.00 GB to 0.00 GB) |
|
63 | finished: removed 0 of 1 files (0.00 GB to 0.00 GB) | |
64 | $ find master/.hg/remotefilelogcache -type f | sort |
|
64 | $ find master/.hg/remotefilelogcache -type f | sort | |
65 | master/.hg/remotefilelogcache/x/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob) |
|
65 | master/.hg/remotefilelogcache/x/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob) | |
66 |
|
66 | |||
67 | # Test that GC keepset includes pullprefetch revset if it is configured |
|
67 | # Test that GC keepset includes pullprefetch revset if it is configured | |
68 |
|
68 | |||
69 | $ cd shallow |
|
69 | $ cd shallow | |
70 | $ cat >> .hg/hgrc <<EOF |
|
70 | $ cat >> .hg/hgrc <<EOF | |
71 | > [remotefilelog] |
|
71 | > [remotefilelog] | |
72 | > pullprefetch=all() |
|
72 | > pullprefetch=all() | |
73 | > EOF |
|
73 | > EOF | |
74 | $ hg prefetch |
|
74 | $ hg prefetch | |
75 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob) |
|
75 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob) | |
76 |
|
76 | |||
77 | $ cd .. |
|
77 | $ cd .. | |
78 | $ hg gc |
|
78 | $ hg gc | |
79 | finished: removed 0 of 2 files (0.00 GB to 0.00 GB) |
|
79 | finished: removed 0 of 2 files (0.00 GB to 0.00 GB) | |
80 |
|
80 | |||
81 | # Ensure that there are 2 versions of the file in cache |
|
81 | # Ensure that there are 2 versions of the file in cache | |
82 | $ find $CACHEDIR -type f | sort |
|
82 | $ find $CACHEDIR -type f | sort | |
83 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 (glob) |
|
83 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 (glob) | |
84 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob) |
|
84 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob) | |
85 | $TESTTMP/hgcache/repos (glob) |
|
85 | $TESTTMP/hgcache/repos (glob) | |
86 |
|
86 | |||
87 | # Test that if garbage collection on repack and repack on hg gc flags are set then incremental repack with garbage collector is run |
|
87 | # Test that if garbage collection on repack and repack on hg gc flags are set then incremental repack with garbage collector is run | |
88 |
|
88 | |||
89 | $ hg gc --config remotefilelog.gcrepack=True --config remotefilelog.repackonhggc=True |
|
89 | $ hg gc --config remotefilelog.gcrepack=True --config remotefilelog.repackonhggc=True | |
90 |
|
90 | |||
91 | # Ensure that loose files are repacked |
|
91 | # Ensure that loose files are repacked | |
92 | $ find $CACHEDIR -type f | sort |
|
92 | $ find $CACHEDIR -type f | sort | |
93 | $TESTTMP/hgcache/master/packs/8d3499c65d926e4f107cf03c6b0df833222025b4.histidx |
|
93 | $TESTTMP/hgcache/master/packs/173691d550fabb9d33db8da192f1c9bc62dd11a4.dataidx | |
94 | $TESTTMP/hgcache/master/packs/8d3499c65d926e4f107cf03c6b0df833222025b4.histpack |
|
94 | $TESTTMP/hgcache/master/packs/173691d550fabb9d33db8da192f1c9bc62dd11a4.datapack | |
95 | $TESTTMP/hgcache/master/packs/9c7046f8cad0417c39aa7c03ce13e0ba991306c2.dataidx |
|
95 | $TESTTMP/hgcache/master/packs/837b83c1ef6485a336eb4421ac5973c0ec130fbb.histidx | |
96 | $TESTTMP/hgcache/master/packs/9c7046f8cad0417c39aa7c03ce13e0ba991306c2.datapack |
|
96 | $TESTTMP/hgcache/master/packs/837b83c1ef6485a336eb4421ac5973c0ec130fbb.histpack | |
97 | $TESTTMP/hgcache/master/packs/repacklock |
|
97 | $TESTTMP/hgcache/master/packs/repacklock | |
98 | $TESTTMP/hgcache/repos |
|
98 | $TESTTMP/hgcache/repos | |
99 |
|
99 | |||
100 | # Test that warning is displayed when there are no valid repos in repofile |
|
100 | # Test that warning is displayed when there are no valid repos in repofile | |
101 |
|
101 | |||
102 | $ cp $CACHEDIR/repos $CACHEDIR/repos.bak |
|
102 | $ cp $CACHEDIR/repos $CACHEDIR/repos.bak | |
103 | $ echo " " > $CACHEDIR/repos |
|
103 | $ echo " " > $CACHEDIR/repos | |
104 | $ hg gc |
|
104 | $ hg gc | |
105 | warning: no valid repos in repofile |
|
105 | warning: no valid repos in repofile | |
106 | $ mv $CACHEDIR/repos.bak $CACHEDIR/repos |
|
106 | $ mv $CACHEDIR/repos.bak $CACHEDIR/repos | |
107 |
|
107 | |||
108 | # Test that warning is displayed when the repo path is malformed |
|
108 | # Test that warning is displayed when the repo path is malformed | |
109 |
|
109 | |||
110 | $ printf "asdas\0das" >> $CACHEDIR/repos |
|
110 | $ printf "asdas\0das" >> $CACHEDIR/repos | |
111 | $ hg gc 2>&1 | head -n2 |
|
111 | $ hg gc 2>&1 | head -n2 | |
112 | warning: malformed path: * (glob) |
|
112 | warning: malformed path: * (glob) | |
113 | Traceback (most recent call last): |
|
113 | Traceback (most recent call last): |
@@ -1,160 +1,160 b'' | |||||
1 | $ PYTHONPATH=$TESTDIR/..:$PYTHONPATH |
|
1 | $ PYTHONPATH=$TESTDIR/..:$PYTHONPATH | |
2 | $ export PYTHONPATH |
|
2 | $ export PYTHONPATH | |
3 |
|
3 | |||
4 | $ . "$TESTDIR/remotefilelog-library.sh" |
|
4 | $ . "$TESTDIR/remotefilelog-library.sh" | |
5 |
|
5 | |||
6 | $ hginit master |
|
6 | $ hginit master | |
7 | $ cd master |
|
7 | $ cd master | |
8 | $ cat >> .hg/hgrc <<EOF |
|
8 | $ cat >> .hg/hgrc <<EOF | |
9 | > [remotefilelog] |
|
9 | > [remotefilelog] | |
10 | > server=True |
|
10 | > server=True | |
11 | > EOF |
|
11 | > EOF | |
12 | $ echo x > x |
|
12 | $ echo x > x | |
13 | $ hg commit -qAm x |
|
13 | $ hg commit -qAm x | |
14 | $ echo y > y |
|
14 | $ echo y > y | |
15 | $ rm x |
|
15 | $ rm x | |
16 | $ hg commit -qAm DxAy |
|
16 | $ hg commit -qAm DxAy | |
17 | $ echo yy > y |
|
17 | $ echo yy > y | |
18 | $ hg commit -qAm y |
|
18 | $ hg commit -qAm y | |
19 | $ cd .. |
|
19 | $ cd .. | |
20 |
|
20 | |||
21 | $ hgcloneshallow ssh://user@dummy/master shallow -q |
|
21 | $ hgcloneshallow ssh://user@dummy/master shallow -q | |
22 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob) |
|
22 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob) | |
23 |
|
23 | |||
24 | # Set the prefetchdays config to zero so that all commits are prefetched |
|
24 | # Set the prefetchdays config to zero so that all commits are prefetched | |
25 | # no matter what their creation date is. |
|
25 | # no matter what their creation date is. | |
26 | $ cd shallow |
|
26 | $ cd shallow | |
27 | $ cat >> .hg/hgrc <<EOF |
|
27 | $ cat >> .hg/hgrc <<EOF | |
28 | > [remotefilelog] |
|
28 | > [remotefilelog] | |
29 | > prefetchdays=0 |
|
29 | > prefetchdays=0 | |
30 | > EOF |
|
30 | > EOF | |
31 | $ cd .. |
|
31 | $ cd .. | |
32 |
|
32 | |||
33 | # Prefetch all data and repack |
|
33 | # Prefetch all data and repack | |
34 |
|
34 | |||
35 | $ cd shallow |
|
35 | $ cd shallow | |
36 | $ cat >> .hg/hgrc <<EOF |
|
36 | $ cat >> .hg/hgrc <<EOF | |
37 | > [remotefilelog] |
|
37 | > [remotefilelog] | |
38 | > bgprefetchrevs=all() |
|
38 | > bgprefetchrevs=all() | |
39 | > EOF |
|
39 | > EOF | |
40 |
|
40 | |||
41 | $ hg prefetch |
|
41 | $ hg prefetch | |
42 | 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob) |
|
42 | 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob) | |
43 | $ hg repack |
|
43 | $ hg repack | |
44 | $ sleep 0.5 |
|
44 | $ sleep 0.5 | |
45 | $ hg debugwaitonrepack >/dev/null 2>%1 |
|
45 | $ hg debugwaitonrepack >/dev/null 2>%1 | |
46 |
|
46 | |||
47 | $ find $CACHEDIR | sort | grep ".datapack\|.histpack" |
|
47 | $ find $CACHEDIR | sort | grep ".datapack\|.histpack" | |
48 |
$TESTTMP/hgcache/master/packs/ |
|
48 | $TESTTMP/hgcache/master/packs/7bcd2d90b99395ca43172a0dd24e18860b2902f9.histpack | |
49 |
$TESTTMP/hgcache/master/packs/ |
|
49 | $TESTTMP/hgcache/master/packs/be1bfed71a51645d8c3b9dc73e234e3a8ff06ac1.datapack | |
50 |
|
50 | |||
51 | # Ensure that all file versions were prefetched |
|
51 | # Ensure that all file versions were prefetched | |
52 |
|
52 | |||
53 | $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1` |
|
53 | $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1` | |
54 | $TESTTMP/hgcache/master/packs/f7a942a6e4673d2c7b697fdd926ca2d153831ca4: |
|
54 | $TESTTMP/hgcache/master/packs/be1bfed71a51645d8c3b9dc73e234e3a8ff06ac1: | |
55 | x: |
|
55 | x: | |
56 | Node Delta Base Delta Length Blob Size |
|
56 | Node Delta Base Delta Length Blob Size | |
57 | 1406e7411862 000000000000 2 2 |
|
57 | 1406e7411862 000000000000 2 2 | |
58 |
|
58 | |||
59 | Total: 2 2 (0.0% bigger) |
|
59 | Total: 2 2 (0.0% bigger) | |
60 | y: |
|
60 | y: | |
61 | Node Delta Base Delta Length Blob Size |
|
61 | Node Delta Base Delta Length Blob Size | |
62 | 50dbc4572b8e 000000000000 3 3 |
|
62 | 50dbc4572b8e 000000000000 3 3 | |
63 | 076f5e2225b3 50dbc4572b8e 14 2 |
|
63 | 076f5e2225b3 50dbc4572b8e 14 2 | |
64 |
|
64 | |||
65 | Total: 17 5 (240.0% bigger) |
|
65 | Total: 17 5 (240.0% bigger) | |
66 |
|
66 | |||
67 | # Test garbage collection during repack |
|
67 | # Test garbage collection during repack | |
68 |
|
68 | |||
69 | $ cat >> .hg/hgrc <<EOF |
|
69 | $ cat >> .hg/hgrc <<EOF | |
70 | > [remotefilelog] |
|
70 | > [remotefilelog] | |
71 | > bgprefetchrevs=tip |
|
71 | > bgprefetchrevs=tip | |
72 | > gcrepack=True |
|
72 | > gcrepack=True | |
73 | > nodettl=86400 |
|
73 | > nodettl=86400 | |
74 | > EOF |
|
74 | > EOF | |
75 |
|
75 | |||
76 | $ hg repack |
|
76 | $ hg repack | |
77 | $ sleep 0.5 |
|
77 | $ sleep 0.5 | |
78 | $ hg debugwaitonrepack >/dev/null 2>%1 |
|
78 | $ hg debugwaitonrepack >/dev/null 2>%1 | |
79 |
|
79 | |||
80 | $ find $CACHEDIR | sort | grep ".datapack\|.histpack" |
|
80 | $ find $CACHEDIR | sort | grep ".datapack\|.histpack" | |
81 |
$TESTTMP/hgcache/master/packs/ |
|
81 | $TESTTMP/hgcache/master/packs/7bcd2d90b99395ca43172a0dd24e18860b2902f9.histpack | |
82 |
$TESTTMP/hgcache/master/packs/ |
|
82 | $TESTTMP/hgcache/master/packs/b868298fad9bf477b4e9d9455226c440b0135fe6.datapack | |
83 |
|
83 | |||
84 | # Ensure that file 'x' was garbage collected. It should be GCed because it is not in the keepset |
|
84 | # Ensure that file 'x' was garbage collected. It should be GCed because it is not in the keepset | |
85 | # and is old (commit date is 0.0 in tests). Ensure that file 'y' is present as it is in the keepset. |
|
85 | # and is old (commit date is 0.0 in tests). Ensure that file 'y' is present as it is in the keepset. | |
86 |
|
86 | |||
87 | $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1` |
|
87 | $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1` | |
88 | $TESTTMP/hgcache/master/packs/05baa499c6b07f2bf0ea3d2c8151da1cb86f5e33: |
|
88 | $TESTTMP/hgcache/master/packs/b868298fad9bf477b4e9d9455226c440b0135fe6: | |
89 | y: |
|
89 | y: | |
90 | Node Delta Base Delta Length Blob Size |
|
90 | Node Delta Base Delta Length Blob Size | |
91 | 50dbc4572b8e 000000000000 3 3 |
|
91 | 50dbc4572b8e 000000000000 3 3 | |
92 |
|
92 | |||
93 | Total: 3 3 (0.0% bigger) |
|
93 | Total: 3 3 (0.0% bigger) | |
94 |
|
94 | |||
95 | # Prefetch all data again and repack for later garbage collection |
|
95 | # Prefetch all data again and repack for later garbage collection | |
96 |
|
96 | |||
97 | $ cat >> .hg/hgrc <<EOF |
|
97 | $ cat >> .hg/hgrc <<EOF | |
98 | > [remotefilelog] |
|
98 | > [remotefilelog] | |
99 | > bgprefetchrevs=all() |
|
99 | > bgprefetchrevs=all() | |
100 | > EOF |
|
100 | > EOF | |
101 |
|
101 | |||
102 | $ hg prefetch |
|
102 | $ hg prefetch | |
103 | 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob) |
|
103 | 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob) | |
104 | $ hg repack |
|
104 | $ hg repack | |
105 | $ sleep 0.5 |
|
105 | $ sleep 0.5 | |
106 | $ hg debugwaitonrepack >/dev/null 2>%1 |
|
106 | $ hg debugwaitonrepack >/dev/null 2>%1 | |
107 |
|
107 | |||
108 | $ find $CACHEDIR | sort | grep ".datapack\|.histpack" |
|
108 | $ find $CACHEDIR | sort | grep ".datapack\|.histpack" | |
109 |
$TESTTMP/hgcache/master/packs/ |
|
109 | $TESTTMP/hgcache/master/packs/7bcd2d90b99395ca43172a0dd24e18860b2902f9.histpack | |
110 |
$TESTTMP/hgcache/master/packs/ |
|
110 | $TESTTMP/hgcache/master/packs/be1bfed71a51645d8c3b9dc73e234e3a8ff06ac1.datapack | |
111 |
|
111 | |||
112 | # Ensure that all file versions were prefetched |
|
112 | # Ensure that all file versions were prefetched | |
113 |
|
113 | |||
114 | $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1` |
|
114 | $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1` | |
115 | $TESTTMP/hgcache/master/packs/f7a942a6e4673d2c7b697fdd926ca2d153831ca4: |
|
115 | $TESTTMP/hgcache/master/packs/be1bfed71a51645d8c3b9dc73e234e3a8ff06ac1: | |
116 | x: |
|
116 | x: | |
117 | Node Delta Base Delta Length Blob Size |
|
117 | Node Delta Base Delta Length Blob Size | |
118 | 1406e7411862 000000000000 2 2 |
|
118 | 1406e7411862 000000000000 2 2 | |
119 |
|
119 | |||
120 | Total: 2 2 (0.0% bigger) |
|
120 | Total: 2 2 (0.0% bigger) | |
121 | y: |
|
121 | y: | |
122 | Node Delta Base Delta Length Blob Size |
|
122 | Node Delta Base Delta Length Blob Size | |
123 | 50dbc4572b8e 000000000000 3 3 |
|
123 | 50dbc4572b8e 000000000000 3 3 | |
124 | 076f5e2225b3 50dbc4572b8e 14 2 |
|
124 | 076f5e2225b3 50dbc4572b8e 14 2 | |
125 |
|
125 | |||
126 | Total: 17 5 (240.0% bigger) |
|
126 | Total: 17 5 (240.0% bigger) | |
127 |
|
127 | |||
128 | # Test garbage collection during repack. Ensure that new files are not removed even though they are not in the keepset |
|
128 | # Test garbage collection during repack. Ensure that new files are not removed even though they are not in the keepset | |
129 | # For the purposes of the test the TTL of a file is set to current time + 100 seconds. i.e. all commits in tests have |
|
129 | # For the purposes of the test the TTL of a file is set to current time + 100 seconds. i.e. all commits in tests have | |
130 | # a date of 1970 and therefore to prevent garbage collection we have to set nodettl to be farther from 1970 than we are now. |
|
130 | # a date of 1970 and therefore to prevent garbage collection we have to set nodettl to be farther from 1970 than we are now. | |
131 |
|
131 | |||
132 | $ cat >> .hg/hgrc <<EOF |
|
132 | $ cat >> .hg/hgrc <<EOF | |
133 | > [remotefilelog] |
|
133 | > [remotefilelog] | |
134 | > bgprefetchrevs= |
|
134 | > bgprefetchrevs= | |
135 | > nodettl=$(($(date +%s) + 100)) |
|
135 | > nodettl=$(($(date +%s) + 100)) | |
136 | > EOF |
|
136 | > EOF | |
137 |
|
137 | |||
138 | $ hg repack |
|
138 | $ hg repack | |
139 | $ sleep 0.5 |
|
139 | $ sleep 0.5 | |
140 | $ hg debugwaitonrepack >/dev/null 2>%1 |
|
140 | $ hg debugwaitonrepack >/dev/null 2>%1 | |
141 |
|
141 | |||
142 | $ find $CACHEDIR | sort | grep ".datapack\|.histpack" |
|
142 | $ find $CACHEDIR | sort | grep ".datapack\|.histpack" | |
143 |
$TESTTMP/hgcache/master/packs/ |
|
143 | $TESTTMP/hgcache/master/packs/7bcd2d90b99395ca43172a0dd24e18860b2902f9.histpack | |
144 |
$TESTTMP/hgcache/master/packs/ |
|
144 | $TESTTMP/hgcache/master/packs/be1bfed71a51645d8c3b9dc73e234e3a8ff06ac1.datapack | |
145 |
|
145 | |||
146 | # Ensure that all file versions were prefetched |
|
146 | # Ensure that all file versions were prefetched | |
147 |
|
147 | |||
148 | $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1` |
|
148 | $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1` | |
149 | $TESTTMP/hgcache/master/packs/f7a942a6e4673d2c7b697fdd926ca2d153831ca4: |
|
149 | $TESTTMP/hgcache/master/packs/be1bfed71a51645d8c3b9dc73e234e3a8ff06ac1: | |
150 | x: |
|
150 | x: | |
151 | Node Delta Base Delta Length Blob Size |
|
151 | Node Delta Base Delta Length Blob Size | |
152 | 1406e7411862 000000000000 2 2 |
|
152 | 1406e7411862 000000000000 2 2 | |
153 |
|
153 | |||
154 | Total: 2 2 (0.0% bigger) |
|
154 | Total: 2 2 (0.0% bigger) | |
155 | y: |
|
155 | y: | |
156 | Node Delta Base Delta Length Blob Size |
|
156 | Node Delta Base Delta Length Blob Size | |
157 | 50dbc4572b8e 000000000000 3 3 |
|
157 | 50dbc4572b8e 000000000000 3 3 | |
158 | 076f5e2225b3 50dbc4572b8e 14 2 |
|
158 | 076f5e2225b3 50dbc4572b8e 14 2 | |
159 |
|
159 | |||
160 | Total: 17 5 (240.0% bigger) |
|
160 | Total: 17 5 (240.0% bigger) |
@@ -1,274 +1,274 b'' | |||||
1 | #!/usr/bin/env python |
|
1 | #!/usr/bin/env python | |
2 | from __future__ import absolute_import |
|
2 | from __future__ import absolute_import | |
3 |
|
3 | |||
4 | import hashlib |
|
4 | import hashlib | |
5 | import os |
|
5 | import os | |
6 | import random |
|
6 | import random | |
7 | import shutil |
|
7 | import shutil | |
8 | import stat |
|
8 | import stat | |
9 | import struct |
|
9 | import struct | |
10 | import sys |
|
10 | import sys | |
11 | import tempfile |
|
11 | import tempfile | |
12 | import unittest |
|
12 | import unittest | |
13 |
|
13 | |||
14 | import silenttestrunner |
|
14 | import silenttestrunner | |
15 |
|
15 | |||
16 | from mercurial.node import nullid |
|
16 | from mercurial.node import nullid | |
17 | from mercurial import ( |
|
17 | from mercurial import ( | |
18 | ui as uimod, |
|
18 | ui as uimod, | |
19 | ) |
|
19 | ) | |
20 | # Load the local remotefilelog, not the system one |
|
20 | # Load the local remotefilelog, not the system one | |
21 | sys.path[0:0] = [os.path.join(os.path.dirname(__file__), '..')] |
|
21 | sys.path[0:0] = [os.path.join(os.path.dirname(__file__), '..')] | |
22 | from hgext.remotefilelog import ( |
|
22 | from hgext.remotefilelog import ( | |
23 | basepack, |
|
23 | basepack, | |
24 | historypack, |
|
24 | historypack, | |
25 | ) |
|
25 | ) | |
26 |
|
26 | |||
27 | class histpacktests(unittest.TestCase): |
|
27 | class histpacktests(unittest.TestCase): | |
28 | def setUp(self): |
|
28 | def setUp(self): | |
29 | self.tempdirs = [] |
|
29 | self.tempdirs = [] | |
30 |
|
30 | |||
31 | def tearDown(self): |
|
31 | def tearDown(self): | |
32 | for d in self.tempdirs: |
|
32 | for d in self.tempdirs: | |
33 | shutil.rmtree(d) |
|
33 | shutil.rmtree(d) | |
34 |
|
34 | |||
35 | def makeTempDir(self): |
|
35 | def makeTempDir(self): | |
36 | tempdir = tempfile.mkdtemp() |
|
36 | tempdir = tempfile.mkdtemp() | |
37 | self.tempdirs.append(tempdir) |
|
37 | self.tempdirs.append(tempdir) | |
38 | return tempdir |
|
38 | return tempdir | |
39 |
|
39 | |||
40 | def getHash(self, content): |
|
40 | def getHash(self, content): | |
41 | return hashlib.sha1(content).digest() |
|
41 | return hashlib.sha1(content).digest() | |
42 |
|
42 | |||
43 | def getFakeHash(self): |
|
43 | def getFakeHash(self): | |
44 | return ''.join(chr(random.randint(0, 255)) for _ in range(20)) |
|
44 | return ''.join(chr(random.randint(0, 255)) for _ in range(20)) | |
45 |
|
45 | |||
46 | def createPack(self, revisions=None): |
|
46 | def createPack(self, revisions=None): | |
47 | """Creates and returns a historypack containing the specified revisions. |
|
47 | """Creates and returns a historypack containing the specified revisions. | |
48 |
|
48 | |||
49 | `revisions` is a list of tuples, where each tuple contains a filanem, |
|
49 | `revisions` is a list of tuples, where each tuple contains a filanem, | |
50 | node, p1node, p2node, and linknode. |
|
50 | node, p1node, p2node, and linknode. | |
51 | """ |
|
51 | """ | |
52 | if revisions is None: |
|
52 | if revisions is None: | |
53 | revisions = [("filename", self.getFakeHash(), nullid, nullid, |
|
53 | revisions = [("filename", self.getFakeHash(), nullid, nullid, | |
54 | self.getFakeHash(), None)] |
|
54 | self.getFakeHash(), None)] | |
55 |
|
55 | |||
56 | packdir = self.makeTempDir() |
|
56 | packdir = self.makeTempDir() | |
57 | packer = historypack.mutablehistorypack(uimod.ui(), packdir, |
|
57 | packer = historypack.mutablehistorypack(uimod.ui(), packdir, | |
58 |
version= |
|
58 | version=2) | |
59 |
|
59 | |||
60 | for filename, node, p1, p2, linknode, copyfrom in revisions: |
|
60 | for filename, node, p1, p2, linknode, copyfrom in revisions: | |
61 | packer.add(filename, node, p1, p2, linknode, copyfrom) |
|
61 | packer.add(filename, node, p1, p2, linknode, copyfrom) | |
62 |
|
62 | |||
63 | path = packer.close() |
|
63 | path = packer.close() | |
64 | return historypack.historypack(path) |
|
64 | return historypack.historypack(path) | |
65 |
|
65 | |||
66 | def testAddSingle(self): |
|
66 | def testAddSingle(self): | |
67 | """Test putting a single entry into a pack and reading it out. |
|
67 | """Test putting a single entry into a pack and reading it out. | |
68 | """ |
|
68 | """ | |
69 | filename = "foo" |
|
69 | filename = "foo" | |
70 | node = self.getFakeHash() |
|
70 | node = self.getFakeHash() | |
71 | p1 = self.getFakeHash() |
|
71 | p1 = self.getFakeHash() | |
72 | p2 = self.getFakeHash() |
|
72 | p2 = self.getFakeHash() | |
73 | linknode = self.getFakeHash() |
|
73 | linknode = self.getFakeHash() | |
74 |
|
74 | |||
75 | revisions = [(filename, node, p1, p2, linknode, None)] |
|
75 | revisions = [(filename, node, p1, p2, linknode, None)] | |
76 | pack = self.createPack(revisions) |
|
76 | pack = self.createPack(revisions) | |
77 |
|
77 | |||
78 | actual = pack.getancestors(filename, node)[node] |
|
78 | actual = pack.getancestors(filename, node)[node] | |
79 | self.assertEquals(p1, actual[0]) |
|
79 | self.assertEquals(p1, actual[0]) | |
80 | self.assertEquals(p2, actual[1]) |
|
80 | self.assertEquals(p2, actual[1]) | |
81 | self.assertEquals(linknode, actual[2]) |
|
81 | self.assertEquals(linknode, actual[2]) | |
82 |
|
82 | |||
83 | def testAddMultiple(self): |
|
83 | def testAddMultiple(self): | |
84 | """Test putting multiple unrelated revisions into a pack and reading |
|
84 | """Test putting multiple unrelated revisions into a pack and reading | |
85 | them out. |
|
85 | them out. | |
86 | """ |
|
86 | """ | |
87 | revisions = [] |
|
87 | revisions = [] | |
88 | for i in range(10): |
|
88 | for i in range(10): | |
89 | filename = "foo-%s" % i |
|
89 | filename = "foo-%s" % i | |
90 | node = self.getFakeHash() |
|
90 | node = self.getFakeHash() | |
91 | p1 = self.getFakeHash() |
|
91 | p1 = self.getFakeHash() | |
92 | p2 = self.getFakeHash() |
|
92 | p2 = self.getFakeHash() | |
93 | linknode = self.getFakeHash() |
|
93 | linknode = self.getFakeHash() | |
94 | revisions.append((filename, node, p1, p2, linknode, None)) |
|
94 | revisions.append((filename, node, p1, p2, linknode, None)) | |
95 |
|
95 | |||
96 | pack = self.createPack(revisions) |
|
96 | pack = self.createPack(revisions) | |
97 |
|
97 | |||
98 | for filename, node, p1, p2, linknode, copyfrom in revisions: |
|
98 | for filename, node, p1, p2, linknode, copyfrom in revisions: | |
99 | actual = pack.getancestors(filename, node)[node] |
|
99 | actual = pack.getancestors(filename, node)[node] | |
100 | self.assertEquals(p1, actual[0]) |
|
100 | self.assertEquals(p1, actual[0]) | |
101 | self.assertEquals(p2, actual[1]) |
|
101 | self.assertEquals(p2, actual[1]) | |
102 | self.assertEquals(linknode, actual[2]) |
|
102 | self.assertEquals(linknode, actual[2]) | |
103 | self.assertEquals(copyfrom, actual[3]) |
|
103 | self.assertEquals(copyfrom, actual[3]) | |
104 |
|
104 | |||
105 | def testAddAncestorChain(self): |
|
105 | def testAddAncestorChain(self): | |
106 | """Test putting multiple revisions in into a pack and read the ancestor |
|
106 | """Test putting multiple revisions in into a pack and read the ancestor | |
107 | chain. |
|
107 | chain. | |
108 | """ |
|
108 | """ | |
109 | revisions = [] |
|
109 | revisions = [] | |
110 | filename = "foo" |
|
110 | filename = "foo" | |
111 | lastnode = nullid |
|
111 | lastnode = nullid | |
112 | for i in range(10): |
|
112 | for i in range(10): | |
113 | node = self.getFakeHash() |
|
113 | node = self.getFakeHash() | |
114 | revisions.append((filename, node, lastnode, nullid, nullid, None)) |
|
114 | revisions.append((filename, node, lastnode, nullid, nullid, None)) | |
115 | lastnode = node |
|
115 | lastnode = node | |
116 |
|
116 | |||
117 | # revisions must be added in topological order, newest first |
|
117 | # revisions must be added in topological order, newest first | |
118 | revisions = list(reversed(revisions)) |
|
118 | revisions = list(reversed(revisions)) | |
119 | pack = self.createPack(revisions) |
|
119 | pack = self.createPack(revisions) | |
120 |
|
120 | |||
121 | # Test that the chain has all the entries |
|
121 | # Test that the chain has all the entries | |
122 | ancestors = pack.getancestors(revisions[0][0], revisions[0][1]) |
|
122 | ancestors = pack.getancestors(revisions[0][0], revisions[0][1]) | |
123 | for filename, node, p1, p2, linknode, copyfrom in revisions: |
|
123 | for filename, node, p1, p2, linknode, copyfrom in revisions: | |
124 | ap1, ap2, alinknode, acopyfrom = ancestors[node] |
|
124 | ap1, ap2, alinknode, acopyfrom = ancestors[node] | |
125 | self.assertEquals(ap1, p1) |
|
125 | self.assertEquals(ap1, p1) | |
126 | self.assertEquals(ap2, p2) |
|
126 | self.assertEquals(ap2, p2) | |
127 | self.assertEquals(alinknode, linknode) |
|
127 | self.assertEquals(alinknode, linknode) | |
128 | self.assertEquals(acopyfrom, copyfrom) |
|
128 | self.assertEquals(acopyfrom, copyfrom) | |
129 |
|
129 | |||
130 | def testPackMany(self): |
|
130 | def testPackMany(self): | |
131 | """Pack many related and unrelated ancestors. |
|
131 | """Pack many related and unrelated ancestors. | |
132 | """ |
|
132 | """ | |
133 | # Build a random pack file |
|
133 | # Build a random pack file | |
134 | allentries = {} |
|
134 | allentries = {} | |
135 | ancestorcounts = {} |
|
135 | ancestorcounts = {} | |
136 | revisions = [] |
|
136 | revisions = [] | |
137 | random.seed(0) |
|
137 | random.seed(0) | |
138 | for i in range(100): |
|
138 | for i in range(100): | |
139 | filename = "filename-%s" % i |
|
139 | filename = "filename-%s" % i | |
140 | entries = [] |
|
140 | entries = [] | |
141 | p2 = nullid |
|
141 | p2 = nullid | |
142 | linknode = nullid |
|
142 | linknode = nullid | |
143 | for j in range(random.randint(1, 100)): |
|
143 | for j in range(random.randint(1, 100)): | |
144 | node = self.getFakeHash() |
|
144 | node = self.getFakeHash() | |
145 | p1 = nullid |
|
145 | p1 = nullid | |
146 | if len(entries) > 0: |
|
146 | if len(entries) > 0: | |
147 | p1 = entries[random.randint(0, len(entries) - 1)] |
|
147 | p1 = entries[random.randint(0, len(entries) - 1)] | |
148 | entries.append(node) |
|
148 | entries.append(node) | |
149 | revisions.append((filename, node, p1, p2, linknode, None)) |
|
149 | revisions.append((filename, node, p1, p2, linknode, None)) | |
150 | allentries[(filename, node)] = (p1, p2, linknode) |
|
150 | allentries[(filename, node)] = (p1, p2, linknode) | |
151 | if p1 == nullid: |
|
151 | if p1 == nullid: | |
152 | ancestorcounts[(filename, node)] = 1 |
|
152 | ancestorcounts[(filename, node)] = 1 | |
153 | else: |
|
153 | else: | |
154 | newcount = ancestorcounts[(filename, p1)] + 1 |
|
154 | newcount = ancestorcounts[(filename, p1)] + 1 | |
155 | ancestorcounts[(filename, node)] = newcount |
|
155 | ancestorcounts[(filename, node)] = newcount | |
156 |
|
156 | |||
157 | # Must add file entries in reverse topological order |
|
157 | # Must add file entries in reverse topological order | |
158 | revisions = list(reversed(revisions)) |
|
158 | revisions = list(reversed(revisions)) | |
159 | pack = self.createPack(revisions) |
|
159 | pack = self.createPack(revisions) | |
160 |
|
160 | |||
161 | # Verify the pack contents |
|
161 | # Verify the pack contents | |
162 | for (filename, node), (p1, p2, lastnode) in allentries.iteritems(): |
|
162 | for (filename, node), (p1, p2, lastnode) in allentries.iteritems(): | |
163 | ancestors = pack.getancestors(filename, node) |
|
163 | ancestors = pack.getancestors(filename, node) | |
164 | self.assertEquals(ancestorcounts[(filename, node)], |
|
164 | self.assertEquals(ancestorcounts[(filename, node)], | |
165 | len(ancestors)) |
|
165 | len(ancestors)) | |
166 | for anode, (ap1, ap2, alinknode, copyfrom) in ancestors.iteritems(): |
|
166 | for anode, (ap1, ap2, alinknode, copyfrom) in ancestors.iteritems(): | |
167 | ep1, ep2, elinknode = allentries[(filename, anode)] |
|
167 | ep1, ep2, elinknode = allentries[(filename, anode)] | |
168 | self.assertEquals(ap1, ep1) |
|
168 | self.assertEquals(ap1, ep1) | |
169 | self.assertEquals(ap2, ep2) |
|
169 | self.assertEquals(ap2, ep2) | |
170 | self.assertEquals(alinknode, elinknode) |
|
170 | self.assertEquals(alinknode, elinknode) | |
171 | self.assertEquals(copyfrom, None) |
|
171 | self.assertEquals(copyfrom, None) | |
172 |
|
172 | |||
173 | def testGetNodeInfo(self): |
|
173 | def testGetNodeInfo(self): | |
174 | revisions = [] |
|
174 | revisions = [] | |
175 | filename = "foo" |
|
175 | filename = "foo" | |
176 | lastnode = nullid |
|
176 | lastnode = nullid | |
177 | for i in range(10): |
|
177 | for i in range(10): | |
178 | node = self.getFakeHash() |
|
178 | node = self.getFakeHash() | |
179 | revisions.append((filename, node, lastnode, nullid, nullid, None)) |
|
179 | revisions.append((filename, node, lastnode, nullid, nullid, None)) | |
180 | lastnode = node |
|
180 | lastnode = node | |
181 |
|
181 | |||
182 | pack = self.createPack(revisions) |
|
182 | pack = self.createPack(revisions) | |
183 |
|
183 | |||
184 | # Test that getnodeinfo returns the expected results |
|
184 | # Test that getnodeinfo returns the expected results | |
185 | for filename, node, p1, p2, linknode, copyfrom in revisions: |
|
185 | for filename, node, p1, p2, linknode, copyfrom in revisions: | |
186 | ap1, ap2, alinknode, acopyfrom = pack.getnodeinfo(filename, node) |
|
186 | ap1, ap2, alinknode, acopyfrom = pack.getnodeinfo(filename, node) | |
187 | self.assertEquals(ap1, p1) |
|
187 | self.assertEquals(ap1, p1) | |
188 | self.assertEquals(ap2, p2) |
|
188 | self.assertEquals(ap2, p2) | |
189 | self.assertEquals(alinknode, linknode) |
|
189 | self.assertEquals(alinknode, linknode) | |
190 | self.assertEquals(acopyfrom, copyfrom) |
|
190 | self.assertEquals(acopyfrom, copyfrom) | |
191 |
|
191 | |||
192 | def testGetMissing(self): |
|
192 | def testGetMissing(self): | |
193 | """Test the getmissing() api. |
|
193 | """Test the getmissing() api. | |
194 | """ |
|
194 | """ | |
195 | revisions = [] |
|
195 | revisions = [] | |
196 | filename = "foo" |
|
196 | filename = "foo" | |
197 | for i in range(10): |
|
197 | for i in range(10): | |
198 | node = self.getFakeHash() |
|
198 | node = self.getFakeHash() | |
199 | p1 = self.getFakeHash() |
|
199 | p1 = self.getFakeHash() | |
200 | p2 = self.getFakeHash() |
|
200 | p2 = self.getFakeHash() | |
201 | linknode = self.getFakeHash() |
|
201 | linknode = self.getFakeHash() | |
202 | revisions.append((filename, node, p1, p2, linknode, None)) |
|
202 | revisions.append((filename, node, p1, p2, linknode, None)) | |
203 |
|
203 | |||
204 | pack = self.createPack(revisions) |
|
204 | pack = self.createPack(revisions) | |
205 |
|
205 | |||
206 | missing = pack.getmissing([(filename, revisions[0][1])]) |
|
206 | missing = pack.getmissing([(filename, revisions[0][1])]) | |
207 | self.assertFalse(missing) |
|
207 | self.assertFalse(missing) | |
208 |
|
208 | |||
209 | missing = pack.getmissing([(filename, revisions[0][1]), |
|
209 | missing = pack.getmissing([(filename, revisions[0][1]), | |
210 | (filename, revisions[1][1])]) |
|
210 | (filename, revisions[1][1])]) | |
211 | self.assertFalse(missing) |
|
211 | self.assertFalse(missing) | |
212 |
|
212 | |||
213 | fakenode = self.getFakeHash() |
|
213 | fakenode = self.getFakeHash() | |
214 | missing = pack.getmissing([(filename, revisions[0][1]), |
|
214 | missing = pack.getmissing([(filename, revisions[0][1]), | |
215 | (filename, fakenode)]) |
|
215 | (filename, fakenode)]) | |
216 | self.assertEquals(missing, [(filename, fakenode)]) |
|
216 | self.assertEquals(missing, [(filename, fakenode)]) | |
217 |
|
217 | |||
218 | # Test getmissing on a non-existant filename |
|
218 | # Test getmissing on a non-existant filename | |
219 | missing = pack.getmissing([("bar", fakenode)]) |
|
219 | missing = pack.getmissing([("bar", fakenode)]) | |
220 | self.assertEquals(missing, [("bar", fakenode)]) |
|
220 | self.assertEquals(missing, [("bar", fakenode)]) | |
221 |
|
221 | |||
222 | def testAddThrows(self): |
|
222 | def testAddThrows(self): | |
223 | pack = self.createPack() |
|
223 | pack = self.createPack() | |
224 |
|
224 | |||
225 | try: |
|
225 | try: | |
226 | pack.add('filename', nullid, nullid, nullid, nullid, None) |
|
226 | pack.add('filename', nullid, nullid, nullid, nullid, None) | |
227 | self.assertTrue(False, "historypack.add should throw") |
|
227 | self.assertTrue(False, "historypack.add should throw") | |
228 | except RuntimeError: |
|
228 | except RuntimeError: | |
229 | pass |
|
229 | pass | |
230 |
|
230 | |||
231 | def testBadVersionThrows(self): |
|
231 | def testBadVersionThrows(self): | |
232 | pack = self.createPack() |
|
232 | pack = self.createPack() | |
233 | path = pack.path + '.histpack' |
|
233 | path = pack.path + '.histpack' | |
234 | with open(path) as f: |
|
234 | with open(path) as f: | |
235 | raw = f.read() |
|
235 | raw = f.read() | |
236 | raw = struct.pack('!B', 255) + raw[1:] |
|
236 | raw = struct.pack('!B', 255) + raw[1:] | |
237 | os.chmod(path, os.stat(path).st_mode | stat.S_IWRITE) |
|
237 | os.chmod(path, os.stat(path).st_mode | stat.S_IWRITE) | |
238 | with open(path, 'w+') as f: |
|
238 | with open(path, 'w+') as f: | |
239 | f.write(raw) |
|
239 | f.write(raw) | |
240 |
|
240 | |||
241 | try: |
|
241 | try: | |
242 | pack = historypack.historypack(pack.path) |
|
242 | pack = historypack.historypack(pack.path) | |
243 | self.assertTrue(False, "bad version number should have thrown") |
|
243 | self.assertTrue(False, "bad version number should have thrown") | |
244 | except RuntimeError: |
|
244 | except RuntimeError: | |
245 | pass |
|
245 | pass | |
246 |
|
246 | |||
247 | def testLargePack(self): |
|
247 | def testLargePack(self): | |
248 | """Test creating and reading from a large pack with over X entries. |
|
248 | """Test creating and reading from a large pack with over X entries. | |
249 | This causes it to use a 2^16 fanout table instead.""" |
|
249 | This causes it to use a 2^16 fanout table instead.""" | |
250 | total = basepack.SMALLFANOUTCUTOFF + 1 |
|
250 | total = basepack.SMALLFANOUTCUTOFF + 1 | |
251 | revisions = [] |
|
251 | revisions = [] | |
252 | for i in xrange(total): |
|
252 | for i in xrange(total): | |
253 | filename = "foo-%s" % i |
|
253 | filename = "foo-%s" % i | |
254 | node = self.getFakeHash() |
|
254 | node = self.getFakeHash() | |
255 | p1 = self.getFakeHash() |
|
255 | p1 = self.getFakeHash() | |
256 | p2 = self.getFakeHash() |
|
256 | p2 = self.getFakeHash() | |
257 | linknode = self.getFakeHash() |
|
257 | linknode = self.getFakeHash() | |
258 | revisions.append((filename, node, p1, p2, linknode, None)) |
|
258 | revisions.append((filename, node, p1, p2, linknode, None)) | |
259 |
|
259 | |||
260 | pack = self.createPack(revisions) |
|
260 | pack = self.createPack(revisions) | |
261 | self.assertEquals(pack.params.fanoutprefix, basepack.LARGEFANOUTPREFIX) |
|
261 | self.assertEquals(pack.params.fanoutprefix, basepack.LARGEFANOUTPREFIX) | |
262 |
|
262 | |||
263 | for filename, node, p1, p2, linknode, copyfrom in revisions: |
|
263 | for filename, node, p1, p2, linknode, copyfrom in revisions: | |
264 | actual = pack.getancestors(filename, node)[node] |
|
264 | actual = pack.getancestors(filename, node)[node] | |
265 | self.assertEquals(p1, actual[0]) |
|
265 | self.assertEquals(p1, actual[0]) | |
266 | self.assertEquals(p2, actual[1]) |
|
266 | self.assertEquals(p2, actual[1]) | |
267 | self.assertEquals(linknode, actual[2]) |
|
267 | self.assertEquals(linknode, actual[2]) | |
268 | self.assertEquals(copyfrom, actual[3]) |
|
268 | self.assertEquals(copyfrom, actual[3]) | |
269 | # TODO: |
|
269 | # TODO: | |
270 | # histpack store: |
|
270 | # histpack store: | |
271 | # - repack two packs into one |
|
271 | # - repack two packs into one | |
272 |
|
272 | |||
273 | if __name__ == '__main__': |
|
273 | if __name__ == '__main__': | |
274 | silenttestrunner.main(__name__) |
|
274 | silenttestrunner.main(__name__) |
@@ -1,385 +1,385 b'' | |||||
1 | $ PYTHONPATH=$TESTDIR/..:$PYTHONPATH |
|
1 | $ PYTHONPATH=$TESTDIR/..:$PYTHONPATH | |
2 | $ export PYTHONPATH |
|
2 | $ export PYTHONPATH | |
3 |
|
3 | |||
4 | $ . "$TESTDIR/remotefilelog-library.sh" |
|
4 | $ . "$TESTDIR/remotefilelog-library.sh" | |
5 |
|
5 | |||
6 | $ cat >> $HGRCPATH <<EOF |
|
6 | $ cat >> $HGRCPATH <<EOF | |
7 | > [remotefilelog] |
|
7 | > [remotefilelog] | |
8 | > fastdatapack=True |
|
8 | > fastdatapack=True | |
9 | > EOF |
|
9 | > EOF | |
10 |
|
10 | |||
11 | $ hginit master |
|
11 | $ hginit master | |
12 | $ cd master |
|
12 | $ cd master | |
13 | $ cat >> .hg/hgrc <<EOF |
|
13 | $ cat >> .hg/hgrc <<EOF | |
14 | > [remotefilelog] |
|
14 | > [remotefilelog] | |
15 | > server=True |
|
15 | > server=True | |
16 | > serverexpiration=-1 |
|
16 | > serverexpiration=-1 | |
17 | > EOF |
|
17 | > EOF | |
18 | $ echo x > x |
|
18 | $ echo x > x | |
19 | $ hg commit -qAm x |
|
19 | $ hg commit -qAm x | |
20 | $ echo x >> x |
|
20 | $ echo x >> x | |
21 | $ hg commit -qAm x2 |
|
21 | $ hg commit -qAm x2 | |
22 | $ cd .. |
|
22 | $ cd .. | |
23 |
|
23 | |||
24 | $ hgcloneshallow ssh://user@dummy/master shallow -q |
|
24 | $ hgcloneshallow ssh://user@dummy/master shallow -q | |
25 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob) |
|
25 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob) | |
26 |
|
26 | |||
27 | # Set the prefetchdays config to zero so that all commits are prefetched |
|
27 | # Set the prefetchdays config to zero so that all commits are prefetched | |
28 | # no matter what their creation date is. |
|
28 | # no matter what their creation date is. | |
29 | $ cd shallow |
|
29 | $ cd shallow | |
30 | $ cat >> .hg/hgrc <<EOF |
|
30 | $ cat >> .hg/hgrc <<EOF | |
31 | > [remotefilelog] |
|
31 | > [remotefilelog] | |
32 | > prefetchdays=0 |
|
32 | > prefetchdays=0 | |
33 | > EOF |
|
33 | > EOF | |
34 | $ cd .. |
|
34 | $ cd .. | |
35 |
|
35 | |||
36 | # Test that repack cleans up the old files and creates new packs |
|
36 | # Test that repack cleans up the old files and creates new packs | |
37 |
|
37 | |||
38 | $ cd shallow |
|
38 | $ cd shallow | |
39 | $ find $CACHEDIR | sort |
|
39 | $ find $CACHEDIR | sort | |
40 | $TESTTMP/hgcache |
|
40 | $TESTTMP/hgcache | |
41 | $TESTTMP/hgcache/master |
|
41 | $TESTTMP/hgcache/master | |
42 | $TESTTMP/hgcache/master/11 |
|
42 | $TESTTMP/hgcache/master/11 | |
43 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072 |
|
43 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072 | |
44 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/aee31534993a501858fb6dd96a065671922e7d51 |
|
44 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/aee31534993a501858fb6dd96a065671922e7d51 | |
45 | $TESTTMP/hgcache/repos |
|
45 | $TESTTMP/hgcache/repos | |
46 |
|
46 | |||
47 | $ hg repack |
|
47 | $ hg repack | |
48 |
|
48 | |||
49 | $ find $CACHEDIR | sort |
|
49 | $ find $CACHEDIR | sort | |
50 | $TESTTMP/hgcache |
|
50 | $TESTTMP/hgcache | |
51 | $TESTTMP/hgcache/master |
|
51 | $TESTTMP/hgcache/master | |
52 | $TESTTMP/hgcache/master/packs |
|
52 | $TESTTMP/hgcache/master/packs | |
53 |
$TESTTMP/hgcache/master/packs/ |
|
53 | $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx | |
54 |
$TESTTMP/hgcache/master/packs/ |
|
54 | $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack | |
55 |
$TESTTMP/hgcache/master/packs/ |
|
55 | $TESTTMP/hgcache/master/packs/add67cb28ae0a2962111588ce49467ca9ebb9195.dataidx | |
56 |
$TESTTMP/hgcache/master/packs/ |
|
56 | $TESTTMP/hgcache/master/packs/add67cb28ae0a2962111588ce49467ca9ebb9195.datapack | |
57 | $TESTTMP/hgcache/master/packs/repacklock |
|
57 | $TESTTMP/hgcache/master/packs/repacklock | |
58 | $TESTTMP/hgcache/repos |
|
58 | $TESTTMP/hgcache/repos | |
59 |
|
59 | |||
60 | # Test that the packs are readonly |
|
60 | # Test that the packs are readonly | |
61 | $ ls_l $CACHEDIR/master/packs |
|
61 | $ ls_l $CACHEDIR/master/packs | |
62 | -r--r--r-- 1145 276d308429d0303762befa376788300f0310f90e.histidx |
|
62 | -r--r--r-- 1145 1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx | |
63 | -r--r--r-- 172 276d308429d0303762befa376788300f0310f90e.histpack |
|
63 | -r--r--r-- 172 1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack | |
64 | -r--r--r-- 1074 8e25dec685d5e0bb1f1b39df3acebda0e0d75c6e.dataidx |
|
64 | -r--r--r-- 1074 add67cb28ae0a2962111588ce49467ca9ebb9195.dataidx | |
65 | -r--r--r-- 69 8e25dec685d5e0bb1f1b39df3acebda0e0d75c6e.datapack |
|
65 | -r--r--r-- 69 add67cb28ae0a2962111588ce49467ca9ebb9195.datapack | |
66 | -rw-r--r-- 0 repacklock |
|
66 | -rw-r--r-- 0 repacklock | |
67 |
|
67 | |||
68 | # Test that the data in the new packs is accessible |
|
68 | # Test that the data in the new packs is accessible | |
69 | $ hg cat -r . x |
|
69 | $ hg cat -r . x | |
70 | x |
|
70 | x | |
71 | x |
|
71 | x | |
72 |
|
72 | |||
73 | # Test that adding new data and repacking it results in the loose data and the |
|
73 | # Test that adding new data and repacking it results in the loose data and the | |
74 | # old packs being combined. |
|
74 | # old packs being combined. | |
75 |
|
75 | |||
76 | $ cd ../master |
|
76 | $ cd ../master | |
77 | $ echo x >> x |
|
77 | $ echo x >> x | |
78 | $ hg commit -m x3 |
|
78 | $ hg commit -m x3 | |
79 | $ cd ../shallow |
|
79 | $ cd ../shallow | |
80 | $ hg pull -q |
|
80 | $ hg pull -q | |
81 | $ hg up -q tip |
|
81 | $ hg up -q tip | |
82 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
82 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
83 |
|
83 | |||
84 | $ find $CACHEDIR -type f | sort |
|
84 | $ find $CACHEDIR -type f | sort | |
85 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/d4a3ed9310e5bd9887e3bf779da5077efab28216 |
|
85 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/d4a3ed9310e5bd9887e3bf779da5077efab28216 | |
86 |
$TESTTMP/hgcache/master/packs/ |
|
86 | $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx | |
87 |
$TESTTMP/hgcache/master/packs/ |
|
87 | $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack | |
88 |
$TESTTMP/hgcache/master/packs/ |
|
88 | $TESTTMP/hgcache/master/packs/add67cb28ae0a2962111588ce49467ca9ebb9195.dataidx | |
89 |
$TESTTMP/hgcache/master/packs/ |
|
89 | $TESTTMP/hgcache/master/packs/add67cb28ae0a2962111588ce49467ca9ebb9195.datapack | |
90 | $TESTTMP/hgcache/master/packs/repacklock |
|
90 | $TESTTMP/hgcache/master/packs/repacklock | |
91 | $TESTTMP/hgcache/repos |
|
91 | $TESTTMP/hgcache/repos | |
92 |
|
92 | |||
93 | $ hg repack --traceback |
|
93 | $ hg repack --traceback | |
94 |
|
94 | |||
95 | $ find $CACHEDIR -type f | sort |
|
95 | $ find $CACHEDIR -type f | sort | |
96 |
$TESTTMP/hgcache/master/packs/ |
|
96 | $TESTTMP/hgcache/master/packs/1bd27e610ee06450e5f3bb0cd3afb6870e4cf375.dataidx | |
97 |
$TESTTMP/hgcache/master/packs/ |
|
97 | $TESTTMP/hgcache/master/packs/1bd27e610ee06450e5f3bb0cd3afb6870e4cf375.datapack | |
98 |
$TESTTMP/hgcache/master/packs/ |
|
98 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx | |
99 |
$TESTTMP/hgcache/master/packs/ |
|
99 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack | |
100 | $TESTTMP/hgcache/master/packs/repacklock |
|
100 | $TESTTMP/hgcache/master/packs/repacklock | |
101 | $TESTTMP/hgcache/repos |
|
101 | $TESTTMP/hgcache/repos | |
102 |
|
102 | |||
103 | # Verify all the file data is still available |
|
103 | # Verify all the file data is still available | |
104 | $ hg cat -r . x |
|
104 | $ hg cat -r . x | |
105 | x |
|
105 | x | |
106 | x |
|
106 | x | |
107 | x |
|
107 | x | |
108 | $ hg cat -r '.^' x |
|
108 | $ hg cat -r '.^' x | |
109 | x |
|
109 | x | |
110 | x |
|
110 | x | |
111 |
|
111 | |||
112 | # Test that repacking again without new data does not delete the pack files |
|
112 | # Test that repacking again without new data does not delete the pack files | |
113 | # and did not change the pack names |
|
113 | # and did not change the pack names | |
114 | $ hg repack |
|
114 | $ hg repack | |
115 | $ find $CACHEDIR -type f | sort |
|
115 | $ find $CACHEDIR -type f | sort | |
116 |
$TESTTMP/hgcache/master/packs/ |
|
116 | $TESTTMP/hgcache/master/packs/1bd27e610ee06450e5f3bb0cd3afb6870e4cf375.dataidx | |
117 |
$TESTTMP/hgcache/master/packs/ |
|
117 | $TESTTMP/hgcache/master/packs/1bd27e610ee06450e5f3bb0cd3afb6870e4cf375.datapack | |
118 |
$TESTTMP/hgcache/master/packs/ |
|
118 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx | |
119 |
$TESTTMP/hgcache/master/packs/ |
|
119 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack | |
120 | $TESTTMP/hgcache/master/packs/repacklock |
|
120 | $TESTTMP/hgcache/master/packs/repacklock | |
121 | $TESTTMP/hgcache/repos |
|
121 | $TESTTMP/hgcache/repos | |
122 |
|
122 | |||
123 | # Run two repacks at once |
|
123 | # Run two repacks at once | |
124 | $ hg repack --config "hooks.prerepack=sleep 3" & |
|
124 | $ hg repack --config "hooks.prerepack=sleep 3" & | |
125 | $ sleep 1 |
|
125 | $ sleep 1 | |
126 | $ hg repack |
|
126 | $ hg repack | |
127 | skipping repack - another repack is already running |
|
127 | skipping repack - another repack is already running | |
128 | $ hg debugwaitonrepack >/dev/null 2>&1 |
|
128 | $ hg debugwaitonrepack >/dev/null 2>&1 | |
129 |
|
129 | |||
130 | # Run repack in the background |
|
130 | # Run repack in the background | |
131 | $ cd ../master |
|
131 | $ cd ../master | |
132 | $ echo x >> x |
|
132 | $ echo x >> x | |
133 | $ hg commit -m x4 |
|
133 | $ hg commit -m x4 | |
134 | $ cd ../shallow |
|
134 | $ cd ../shallow | |
135 | $ hg pull -q |
|
135 | $ hg pull -q | |
136 | $ hg up -q tip |
|
136 | $ hg up -q tip | |
137 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
137 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
138 | $ find $CACHEDIR -type f | sort |
|
138 | $ find $CACHEDIR -type f | sort | |
139 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1bb2e6237e035c8f8ef508e281f1ce075bc6db72 |
|
139 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1bb2e6237e035c8f8ef508e281f1ce075bc6db72 | |
140 |
$TESTTMP/hgcache/master/packs/ |
|
140 | $TESTTMP/hgcache/master/packs/1bd27e610ee06450e5f3bb0cd3afb6870e4cf375.dataidx | |
141 |
$TESTTMP/hgcache/master/packs/ |
|
141 | $TESTTMP/hgcache/master/packs/1bd27e610ee06450e5f3bb0cd3afb6870e4cf375.datapack | |
142 |
$TESTTMP/hgcache/master/packs/ |
|
142 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx | |
143 |
$TESTTMP/hgcache/master/packs/ |
|
143 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack | |
144 | $TESTTMP/hgcache/master/packs/repacklock |
|
144 | $TESTTMP/hgcache/master/packs/repacklock | |
145 | $TESTTMP/hgcache/repos |
|
145 | $TESTTMP/hgcache/repos | |
146 |
|
146 | |||
147 | $ hg repack --background |
|
147 | $ hg repack --background | |
148 | (running background repack) |
|
148 | (running background repack) | |
149 | $ sleep 0.5 |
|
149 | $ sleep 0.5 | |
150 | $ hg debugwaitonrepack >/dev/null 2>&1 |
|
150 | $ hg debugwaitonrepack >/dev/null 2>&1 | |
151 | $ find $CACHEDIR -type f | sort |
|
151 | $ find $CACHEDIR -type f | sort | |
152 | $TESTTMP/hgcache/master/packs/094b530486dad4427a0faf6bcbc031571b99ca24.histidx |
|
152 | $TESTTMP/hgcache/master/packs/06ae46494f0e3b9beda53eae8fc0e55139f13123.dataidx | |
153 | $TESTTMP/hgcache/master/packs/094b530486dad4427a0faf6bcbc031571b99ca24.histpack |
|
153 | $TESTTMP/hgcache/master/packs/06ae46494f0e3b9beda53eae8fc0e55139f13123.datapack | |
154 | $TESTTMP/hgcache/master/packs/8fe685c56f6f7edf550bfcec74eeecc5f3c2ba15.dataidx |
|
154 | $TESTTMP/hgcache/master/packs/604552d403a1381749faf656feca0ca265a6d52c.histidx | |
155 | $TESTTMP/hgcache/master/packs/8fe685c56f6f7edf550bfcec74eeecc5f3c2ba15.datapack |
|
155 | $TESTTMP/hgcache/master/packs/604552d403a1381749faf656feca0ca265a6d52c.histpack | |
156 | $TESTTMP/hgcache/master/packs/repacklock |
|
156 | $TESTTMP/hgcache/master/packs/repacklock | |
157 | $TESTTMP/hgcache/repos |
|
157 | $TESTTMP/hgcache/repos | |
158 |
|
158 | |||
159 | # Test debug commands |
|
159 | # Test debug commands | |
160 |
|
160 | |||
161 | $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack |
|
161 | $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack | |
162 | $TESTTMP/hgcache/master/packs/8fe685c56f6f7edf550bfcec74eeecc5f3c2ba15: |
|
162 | $TESTTMP/hgcache/master/packs/06ae46494f0e3b9beda53eae8fc0e55139f13123: | |
163 | x: |
|
163 | x: | |
164 | Node Delta Base Delta Length Blob Size |
|
164 | Node Delta Base Delta Length Blob Size | |
165 | 1bb2e6237e03 000000000000 8 8 |
|
165 | 1bb2e6237e03 000000000000 8 8 | |
166 | d4a3ed9310e5 1bb2e6237e03 12 6 |
|
166 | d4a3ed9310e5 1bb2e6237e03 12 6 | |
167 | aee31534993a d4a3ed9310e5 12 4 |
|
167 | aee31534993a d4a3ed9310e5 12 4 | |
168 |
|
168 | |||
169 | Total: 32 18 (77.8% bigger) |
|
169 | Total: 32 18 (77.8% bigger) | |
170 | $ hg debugdatapack --long $TESTTMP/hgcache/master/packs/*.datapack |
|
170 | $ hg debugdatapack --long $TESTTMP/hgcache/master/packs/*.datapack | |
171 | $TESTTMP/hgcache/master/packs/8fe685c56f6f7edf550bfcec74eeecc5f3c2ba15: |
|
171 | $TESTTMP/hgcache/master/packs/06ae46494f0e3b9beda53eae8fc0e55139f13123: | |
172 | x: |
|
172 | x: | |
173 | Node Delta Base Delta Length Blob Size |
|
173 | Node Delta Base Delta Length Blob Size | |
174 | 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 0000000000000000000000000000000000000000 8 8 |
|
174 | 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 0000000000000000000000000000000000000000 8 8 | |
175 | d4a3ed9310e5bd9887e3bf779da5077efab28216 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 12 6 |
|
175 | d4a3ed9310e5bd9887e3bf779da5077efab28216 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 12 6 | |
176 | aee31534993a501858fb6dd96a065671922e7d51 d4a3ed9310e5bd9887e3bf779da5077efab28216 12 4 |
|
176 | aee31534993a501858fb6dd96a065671922e7d51 d4a3ed9310e5bd9887e3bf779da5077efab28216 12 4 | |
177 |
|
177 | |||
178 | Total: 32 18 (77.8% bigger) |
|
178 | Total: 32 18 (77.8% bigger) | |
179 | $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack --node d4a3ed9310e5bd9887e3bf779da5077efab28216 |
|
179 | $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack --node d4a3ed9310e5bd9887e3bf779da5077efab28216 | |
180 | $TESTTMP/hgcache/master/packs/8fe685c56f6f7edf550bfcec74eeecc5f3c2ba15: |
|
180 | $TESTTMP/hgcache/master/packs/06ae46494f0e3b9beda53eae8fc0e55139f13123: | |
181 |
|
181 | |||
182 | x |
|
182 | x | |
183 | Node Delta Base Delta SHA1 Delta Length |
|
183 | Node Delta Base Delta SHA1 Delta Length | |
184 | d4a3ed9310e5bd9887e3bf779da5077efab28216 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 77029ab56e83ea2115dd53ff87483682abe5d7ca 12 |
|
184 | d4a3ed9310e5bd9887e3bf779da5077efab28216 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 77029ab56e83ea2115dd53ff87483682abe5d7ca 12 | |
185 | Node Delta Base Delta SHA1 Delta Length |
|
185 | Node Delta Base Delta SHA1 Delta Length | |
186 | 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 0000000000000000000000000000000000000000 7ca8c71a64f7b56380e77573da2f7a5fdd2ecdb5 8 |
|
186 | 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 0000000000000000000000000000000000000000 7ca8c71a64f7b56380e77573da2f7a5fdd2ecdb5 8 | |
187 | $ hg debughistorypack $TESTTMP/hgcache/master/packs/*.histidx |
|
187 | $ hg debughistorypack $TESTTMP/hgcache/master/packs/*.histidx | |
188 |
|
188 | |||
189 | x |
|
189 | x | |
190 | Node P1 Node P2 Node Link Node Copy From |
|
190 | Node P1 Node P2 Node Link Node Copy From | |
191 | 1bb2e6237e03 d4a3ed9310e5 000000000000 0b03bbc9e1e7 |
|
191 | 1bb2e6237e03 d4a3ed9310e5 000000000000 0b03bbc9e1e7 | |
192 | d4a3ed9310e5 aee31534993a 000000000000 421535db10b6 |
|
192 | d4a3ed9310e5 aee31534993a 000000000000 421535db10b6 | |
193 | aee31534993a 1406e7411862 000000000000 a89d614e2364 |
|
193 | aee31534993a 1406e7411862 000000000000 a89d614e2364 | |
194 | 1406e7411862 000000000000 000000000000 b292c1e3311f |
|
194 | 1406e7411862 000000000000 000000000000 b292c1e3311f | |
195 |
|
195 | |||
196 | # Test copy tracing from a pack |
|
196 | # Test copy tracing from a pack | |
197 | $ cd ../master |
|
197 | $ cd ../master | |
198 | $ hg mv x y |
|
198 | $ hg mv x y | |
199 | $ hg commit -m 'move x to y' |
|
199 | $ hg commit -m 'move x to y' | |
200 | $ cd ../shallow |
|
200 | $ cd ../shallow | |
201 | $ hg pull -q |
|
201 | $ hg pull -q | |
202 | $ hg up -q tip |
|
202 | $ hg up -q tip | |
203 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
203 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
204 | $ hg repack |
|
204 | $ hg repack | |
205 | $ hg log -f y -T '{desc}\n' |
|
205 | $ hg log -f y -T '{desc}\n' | |
206 | move x to y |
|
206 | move x to y | |
207 | x4 |
|
207 | x4 | |
208 | x3 |
|
208 | x3 | |
209 | x2 |
|
209 | x2 | |
210 | x |
|
210 | x | |
211 |
|
211 | |||
212 | # Test copy trace across rename and back |
|
212 | # Test copy trace across rename and back | |
213 | $ cp -R $TESTTMP/hgcache/master/packs $TESTTMP/backuppacks |
|
213 | $ cp -R $TESTTMP/hgcache/master/packs $TESTTMP/backuppacks | |
214 | $ cd ../master |
|
214 | $ cd ../master | |
215 | $ hg mv y x |
|
215 | $ hg mv y x | |
216 | $ hg commit -m 'move y back to x' |
|
216 | $ hg commit -m 'move y back to x' | |
217 | $ hg revert -r 0 x |
|
217 | $ hg revert -r 0 x | |
218 | $ mv x y |
|
218 | $ mv x y | |
219 | $ hg add y |
|
219 | $ hg add y | |
220 | $ echo >> y |
|
220 | $ echo >> y | |
221 | $ hg revert x |
|
221 | $ hg revert x | |
222 | $ hg commit -m 'add y back without metadata' |
|
222 | $ hg commit -m 'add y back without metadata' | |
223 | $ cd ../shallow |
|
223 | $ cd ../shallow | |
224 | $ hg pull -q |
|
224 | $ hg pull -q | |
225 | $ hg up -q tip |
|
225 | $ hg up -q tip | |
226 | 2 files fetched over 2 fetches - (2 misses, 0.00% hit ratio) over * (glob) |
|
226 | 2 files fetched over 2 fetches - (2 misses, 0.00% hit ratio) over * (glob) | |
227 | $ hg repack |
|
227 | $ hg repack | |
228 | $ ls $TESTTMP/hgcache/master/packs |
|
228 | $ ls $TESTTMP/hgcache/master/packs | |
229 | e8fdf7ae22b772dcc291f905b9c6e5f381d28739.dataidx |
|
229 | 308a7aba9c54a0b71ae5adbbccd00c0aff20876e.dataidx | |
230 | e8fdf7ae22b772dcc291f905b9c6e5f381d28739.datapack |
|
230 | 308a7aba9c54a0b71ae5adbbccd00c0aff20876e.datapack | |
231 | ebbd7411e00456c0eec8d1150a77e2b3ef490f3f.histidx |
|
231 | bfd60adb76018bb952e27cd23fc151bf94865d7d.histidx | |
232 | ebbd7411e00456c0eec8d1150a77e2b3ef490f3f.histpack |
|
232 | bfd60adb76018bb952e27cd23fc151bf94865d7d.histpack | |
233 | repacklock |
|
233 | repacklock | |
234 | $ hg debughistorypack $TESTTMP/hgcache/master/packs/*.histidx |
|
234 | $ hg debughistorypack $TESTTMP/hgcache/master/packs/*.histidx | |
235 |
|
235 | |||
236 | x |
|
236 | x | |
237 | Node P1 Node P2 Node Link Node Copy From |
|
237 | Node P1 Node P2 Node Link Node Copy From | |
238 | cd410a44d584 577959738234 000000000000 609547eda446 y |
|
238 | cd410a44d584 577959738234 000000000000 609547eda446 y | |
239 | 1bb2e6237e03 d4a3ed9310e5 000000000000 0b03bbc9e1e7 |
|
239 | 1bb2e6237e03 d4a3ed9310e5 000000000000 0b03bbc9e1e7 | |
240 | d4a3ed9310e5 aee31534993a 000000000000 421535db10b6 |
|
240 | d4a3ed9310e5 aee31534993a 000000000000 421535db10b6 | |
241 | aee31534993a 1406e7411862 000000000000 a89d614e2364 |
|
241 | aee31534993a 1406e7411862 000000000000 a89d614e2364 | |
242 | 1406e7411862 000000000000 000000000000 b292c1e3311f |
|
242 | 1406e7411862 000000000000 000000000000 b292c1e3311f | |
243 |
|
243 | |||
244 | y |
|
244 | y | |
245 | Node P1 Node P2 Node Link Node Copy From |
|
245 | Node P1 Node P2 Node Link Node Copy From | |
246 | 577959738234 1bb2e6237e03 000000000000 c7faf2fc439a x |
|
246 | 577959738234 1bb2e6237e03 000000000000 c7faf2fc439a x | |
247 | 21f46f2721e7 000000000000 000000000000 d6868642b790 |
|
247 | 21f46f2721e7 000000000000 000000000000 d6868642b790 | |
248 | $ hg strip -r '.^' |
|
248 | $ hg strip -r '.^' | |
249 | 1 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
249 | 1 files updated, 0 files merged, 1 files removed, 0 files unresolved | |
250 | saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/609547eda446-b26b56a8-backup.hg (glob) |
|
250 | saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/609547eda446-b26b56a8-backup.hg (glob) | |
251 | $ hg -R ../master strip -r '.^' |
|
251 | $ hg -R ../master strip -r '.^' | |
252 | 1 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
252 | 1 files updated, 0 files merged, 1 files removed, 0 files unresolved | |
253 | saved backup bundle to $TESTTMP/master/.hg/strip-backup/609547eda446-b26b56a8-backup.hg (glob) |
|
253 | saved backup bundle to $TESTTMP/master/.hg/strip-backup/609547eda446-b26b56a8-backup.hg (glob) | |
254 |
|
254 | |||
255 | $ rm -rf $TESTTMP/hgcache/master/packs |
|
255 | $ rm -rf $TESTTMP/hgcache/master/packs | |
256 | $ cp -R $TESTTMP/backuppacks $TESTTMP/hgcache/master/packs |
|
256 | $ cp -R $TESTTMP/backuppacks $TESTTMP/hgcache/master/packs | |
257 |
|
257 | |||
258 | # Test repacking datapack without history |
|
258 | # Test repacking datapack without history | |
259 | $ rm -rf $CACHEDIR/master/packs/*hist* |
|
259 | $ rm -rf $CACHEDIR/master/packs/*hist* | |
260 | $ hg repack |
|
260 | $ hg repack | |
261 | $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack |
|
261 | $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack | |
262 | $TESTTMP/hgcache/master/packs/a8d86ff8e1a11a77a85f5fea567f56a757583eda: |
|
262 | $TESTTMP/hgcache/master/packs/ba4649b56263282b0699f9a6e7e34a4a2bac1638: | |
263 | x: |
|
263 | x: | |
264 | Node Delta Base Delta Length Blob Size |
|
264 | Node Delta Base Delta Length Blob Size | |
265 | 1bb2e6237e03 000000000000 8 8 |
|
265 | 1bb2e6237e03 000000000000 8 8 | |
266 | d4a3ed9310e5 1bb2e6237e03 12 6 |
|
266 | d4a3ed9310e5 1bb2e6237e03 12 6 | |
267 | aee31534993a d4a3ed9310e5 12 4 |
|
267 | aee31534993a d4a3ed9310e5 12 4 | |
268 |
|
268 | |||
269 | Total: 32 18 (77.8% bigger) |
|
269 | Total: 32 18 (77.8% bigger) | |
270 | y: |
|
270 | y: | |
271 | Node Delta Base Delta Length Blob Size |
|
271 | Node Delta Base Delta Length Blob Size | |
272 | 577959738234 000000000000 70 8 |
|
272 | 577959738234 000000000000 70 8 | |
273 |
|
273 | |||
274 | Total: 70 8 (775.0% bigger) |
|
274 | Total: 70 8 (775.0% bigger) | |
275 |
|
275 | |||
276 | $ hg cat -r ".^" x |
|
276 | $ hg cat -r ".^" x | |
277 | x |
|
277 | x | |
278 | x |
|
278 | x | |
279 | x |
|
279 | x | |
280 | x |
|
280 | x | |
281 |
|
281 | |||
282 | Incremental repack |
|
282 | Incremental repack | |
283 | $ rm -rf $CACHEDIR/master/packs/* |
|
283 | $ rm -rf $CACHEDIR/master/packs/* | |
284 | $ cat >> .hg/hgrc <<EOF |
|
284 | $ cat >> .hg/hgrc <<EOF | |
285 | > [remotefilelog] |
|
285 | > [remotefilelog] | |
286 | > data.generations=60 |
|
286 | > data.generations=60 | |
287 | > 150 |
|
287 | > 150 | |
288 | > EOF |
|
288 | > EOF | |
289 |
|
289 | |||
290 | Single pack - repack does nothing |
|
290 | Single pack - repack does nothing | |
291 | $ hg prefetch -r 0 |
|
291 | $ hg prefetch -r 0 | |
292 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
292 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
293 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack |
|
293 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack | |
294 | [1] |
|
294 | [1] | |
295 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack |
|
295 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack | |
296 | [1] |
|
296 | [1] | |
297 | $ hg repack --incremental |
|
297 | $ hg repack --incremental | |
298 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack |
|
298 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack | |
299 | -r--r--r-- 67 b5a62f3496ccbd2479497cdbc7345f3304735f33.datapack |
|
299 | -r--r--r-- 67 6409c5a1d61b251906689d4d1282ac44df6a7898.datapack | |
300 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack |
|
300 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack | |
301 |
-r--r--r-- 90 |
|
301 | -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack | |
302 |
|
302 | |||
303 | 3 gen1 packs, 1 gen0 pack - packs 3 gen1 into 1 |
|
303 | 3 gen1 packs, 1 gen0 pack - packs 3 gen1 into 1 | |
304 | $ hg prefetch -r 1 |
|
304 | $ hg prefetch -r 1 | |
305 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
305 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
306 | $ hg prefetch -r 2 |
|
306 | $ hg prefetch -r 2 | |
307 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
307 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
308 | $ hg prefetch -r 3 |
|
308 | $ hg prefetch -r 3 | |
309 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
309 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
310 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack |
|
310 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack | |
311 | -r--r--r-- 67 b5a62f3496ccbd2479497cdbc7345f3304735f33.datapack |
|
311 | -r--r--r-- 67 6409c5a1d61b251906689d4d1282ac44df6a7898.datapack | |
312 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack |
|
312 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack | |
313 |
-r--r--r-- 90 |
|
313 | -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack | |
314 | $ hg repack --incremental |
|
314 | $ hg repack --incremental | |
315 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack |
|
315 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack | |
316 | -r--r--r-- 225 8fe685c56f6f7edf550bfcec74eeecc5f3c2ba15.datapack |
|
316 | -r--r--r-- 225 06ae46494f0e3b9beda53eae8fc0e55139f13123.datapack | |
317 | -r--r--r-- 67 b5a62f3496ccbd2479497cdbc7345f3304735f33.datapack |
|
317 | -r--r--r-- 67 6409c5a1d61b251906689d4d1282ac44df6a7898.datapack | |
318 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack |
|
318 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack | |
319 |
-r--r--r-- 336 |
|
319 | -r--r--r-- 336 604552d403a1381749faf656feca0ca265a6d52c.histpack | |
320 |
-r--r--r-- 90 |
|
320 | -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack | |
321 |
|
321 | |||
322 | 1 gen3 pack, 1 gen0 pack - does nothing |
|
322 | 1 gen3 pack, 1 gen0 pack - does nothing | |
323 | $ hg repack --incremental |
|
323 | $ hg repack --incremental | |
324 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack |
|
324 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack | |
325 | -r--r--r-- 225 8fe685c56f6f7edf550bfcec74eeecc5f3c2ba15.datapack |
|
325 | -r--r--r-- 225 06ae46494f0e3b9beda53eae8fc0e55139f13123.datapack | |
326 | -r--r--r-- 67 b5a62f3496ccbd2479497cdbc7345f3304735f33.datapack |
|
326 | -r--r--r-- 67 6409c5a1d61b251906689d4d1282ac44df6a7898.datapack | |
327 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack |
|
327 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack | |
328 |
-r--r--r-- 336 |
|
328 | -r--r--r-- 336 604552d403a1381749faf656feca0ca265a6d52c.histpack | |
329 |
-r--r--r-- 90 |
|
329 | -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack | |
330 |
|
330 | |||
331 | Pull should run background repack |
|
331 | Pull should run background repack | |
332 | $ cat >> .hg/hgrc <<EOF |
|
332 | $ cat >> .hg/hgrc <<EOF | |
333 | > [remotefilelog] |
|
333 | > [remotefilelog] | |
334 | > backgroundrepack=True |
|
334 | > backgroundrepack=True | |
335 | > EOF |
|
335 | > EOF | |
336 | $ clearcache |
|
336 | $ clearcache | |
337 | $ hg prefetch -r 0 |
|
337 | $ hg prefetch -r 0 | |
338 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
338 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
339 | $ hg prefetch -r 1 |
|
339 | $ hg prefetch -r 1 | |
340 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
340 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
341 | $ hg prefetch -r 2 |
|
341 | $ hg prefetch -r 2 | |
342 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
342 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
343 | $ hg prefetch -r 3 |
|
343 | $ hg prefetch -r 3 | |
344 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
344 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
345 |
|
345 | |||
346 | $ hg pull |
|
346 | $ hg pull | |
347 | pulling from ssh://user@dummy/master |
|
347 | pulling from ssh://user@dummy/master | |
348 | searching for changes |
|
348 | searching for changes | |
349 | no changes found |
|
349 | no changes found | |
350 | (running background incremental repack) |
|
350 | (running background incremental repack) | |
351 | $ sleep 0.5 |
|
351 | $ sleep 0.5 | |
352 | $ hg debugwaitonrepack >/dev/null 2>&1 |
|
352 | $ hg debugwaitonrepack >/dev/null 2>&1 | |
353 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack |
|
353 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack | |
354 | -r--r--r-- 301 09b8bf49256b3fc2175977ba97d6402e91a9a604.datapack |
|
354 | -r--r--r-- 301 671913bebdb7b95aae52a546662753eac7606e40.datapack | |
355 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack |
|
355 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack | |
356 |
-r--r--r-- 336 |
|
356 | -r--r--r-- 336 604552d403a1381749faf656feca0ca265a6d52c.histpack | |
357 |
|
357 | |||
358 | Test environment variable resolution |
|
358 | Test environment variable resolution | |
359 | $ CACHEPATH=$TESTTMP/envcache hg prefetch --config 'remotefilelog.cachepath=$CACHEPATH' |
|
359 | $ CACHEPATH=$TESTTMP/envcache hg prefetch --config 'remotefilelog.cachepath=$CACHEPATH' | |
360 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
360 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
361 | $ find $TESTTMP/envcache | sort |
|
361 | $ find $TESTTMP/envcache | sort | |
362 | $TESTTMP/envcache |
|
362 | $TESTTMP/envcache | |
363 | $TESTTMP/envcache/master |
|
363 | $TESTTMP/envcache/master | |
364 | $TESTTMP/envcache/master/95 |
|
364 | $TESTTMP/envcache/master/95 | |
365 | $TESTTMP/envcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a |
|
365 | $TESTTMP/envcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a | |
366 | $TESTTMP/envcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/577959738234a1eb241ed3ed4b22a575833f56e0 |
|
366 | $TESTTMP/envcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/577959738234a1eb241ed3ed4b22a575833f56e0 | |
367 | $TESTTMP/envcache/repos |
|
367 | $TESTTMP/envcache/repos | |
368 |
|
368 | |||
369 | Test local remotefilelog blob is correct when based on a pack |
|
369 | Test local remotefilelog blob is correct when based on a pack | |
370 | $ hg prefetch -r . |
|
370 | $ hg prefetch -r . | |
371 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
371 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
372 | $ echo >> y |
|
372 | $ echo >> y | |
373 | $ hg commit -m y2 |
|
373 | $ hg commit -m y2 | |
374 | $ hg debugremotefilelog .hg/store/data/95cb0bfd2977c761298d9624e4b4d4c72a39974a/b70860edba4f8242a1d52f2a94679dd23cb76808 |
|
374 | $ hg debugremotefilelog .hg/store/data/95cb0bfd2977c761298d9624e4b4d4c72a39974a/b70860edba4f8242a1d52f2a94679dd23cb76808 | |
375 | size: 9 bytes |
|
375 | size: 9 bytes | |
376 | path: .hg/store/data/95cb0bfd2977c761298d9624e4b4d4c72a39974a/b70860edba4f8242a1d52f2a94679dd23cb76808 |
|
376 | path: .hg/store/data/95cb0bfd2977c761298d9624e4b4d4c72a39974a/b70860edba4f8242a1d52f2a94679dd23cb76808 | |
377 | key: b70860edba4f |
|
377 | key: b70860edba4f | |
378 |
|
378 | |||
379 | node => p1 p2 linknode copyfrom |
|
379 | node => p1 p2 linknode copyfrom | |
380 | b70860edba4f => 577959738234 000000000000 08d3fbc98c48 |
|
380 | b70860edba4f => 577959738234 000000000000 08d3fbc98c48 | |
381 | 577959738234 => 1bb2e6237e03 000000000000 c7faf2fc439a x |
|
381 | 577959738234 => 1bb2e6237e03 000000000000 c7faf2fc439a x | |
382 | 1bb2e6237e03 => d4a3ed9310e5 000000000000 0b03bbc9e1e7 |
|
382 | 1bb2e6237e03 => d4a3ed9310e5 000000000000 0b03bbc9e1e7 | |
383 | d4a3ed9310e5 => aee31534993a 000000000000 421535db10b6 |
|
383 | d4a3ed9310e5 => aee31534993a 000000000000 421535db10b6 | |
384 | aee31534993a => 1406e7411862 000000000000 a89d614e2364 |
|
384 | aee31534993a => 1406e7411862 000000000000 a89d614e2364 | |
385 | 1406e7411862 => 000000000000 000000000000 b292c1e3311f |
|
385 | 1406e7411862 => 000000000000 000000000000 b292c1e3311f |
@@ -1,463 +1,463 b'' | |||||
1 | $ PYTHONPATH=$TESTDIR/..:$PYTHONPATH |
|
1 | $ PYTHONPATH=$TESTDIR/..:$PYTHONPATH | |
2 | $ export PYTHONPATH |
|
2 | $ export PYTHONPATH | |
3 |
|
3 | |||
4 | $ . "$TESTDIR/remotefilelog-library.sh" |
|
4 | $ . "$TESTDIR/remotefilelog-library.sh" | |
5 |
|
5 | |||
6 | $ hginit master |
|
6 | $ hginit master | |
7 | $ cd master |
|
7 | $ cd master | |
8 | $ cat >> .hg/hgrc <<EOF |
|
8 | $ cat >> .hg/hgrc <<EOF | |
9 | > [remotefilelog] |
|
9 | > [remotefilelog] | |
10 | > server=True |
|
10 | > server=True | |
11 | > serverexpiration=-1 |
|
11 | > serverexpiration=-1 | |
12 | > EOF |
|
12 | > EOF | |
13 | $ echo x > x |
|
13 | $ echo x > x | |
14 | $ hg commit -qAm x |
|
14 | $ hg commit -qAm x | |
15 | $ echo x >> x |
|
15 | $ echo x >> x | |
16 | $ hg commit -qAm x2 |
|
16 | $ hg commit -qAm x2 | |
17 | $ cd .. |
|
17 | $ cd .. | |
18 |
|
18 | |||
19 | $ hgcloneshallow ssh://user@dummy/master shallow -q |
|
19 | $ hgcloneshallow ssh://user@dummy/master shallow -q | |
20 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob) |
|
20 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob) | |
21 |
|
21 | |||
22 | # Set the prefetchdays config to zero so that all commits are prefetched |
|
22 | # Set the prefetchdays config to zero so that all commits are prefetched | |
23 | # no matter what their creation date is. |
|
23 | # no matter what their creation date is. | |
24 | $ cd shallow |
|
24 | $ cd shallow | |
25 | $ cat >> .hg/hgrc <<EOF |
|
25 | $ cat >> .hg/hgrc <<EOF | |
26 | > [remotefilelog] |
|
26 | > [remotefilelog] | |
27 | > prefetchdays=0 |
|
27 | > prefetchdays=0 | |
28 | > EOF |
|
28 | > EOF | |
29 | $ cd .. |
|
29 | $ cd .. | |
30 |
|
30 | |||
31 | # Test that repack cleans up the old files and creates new packs |
|
31 | # Test that repack cleans up the old files and creates new packs | |
32 |
|
32 | |||
33 | $ cd shallow |
|
33 | $ cd shallow | |
34 | $ find $CACHEDIR | sort |
|
34 | $ find $CACHEDIR | sort | |
35 | $TESTTMP/hgcache |
|
35 | $TESTTMP/hgcache | |
36 | $TESTTMP/hgcache/master |
|
36 | $TESTTMP/hgcache/master | |
37 | $TESTTMP/hgcache/master/11 |
|
37 | $TESTTMP/hgcache/master/11 | |
38 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072 |
|
38 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072 | |
39 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/aee31534993a501858fb6dd96a065671922e7d51 |
|
39 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/aee31534993a501858fb6dd96a065671922e7d51 | |
40 | $TESTTMP/hgcache/repos |
|
40 | $TESTTMP/hgcache/repos | |
41 |
|
41 | |||
42 | $ hg repack |
|
42 | $ hg repack | |
43 |
|
43 | |||
44 | $ find $CACHEDIR | sort |
|
44 | $ find $CACHEDIR | sort | |
45 | $TESTTMP/hgcache |
|
45 | $TESTTMP/hgcache | |
46 | $TESTTMP/hgcache/master |
|
46 | $TESTTMP/hgcache/master | |
47 | $TESTTMP/hgcache/master/packs |
|
47 | $TESTTMP/hgcache/master/packs | |
48 |
$TESTTMP/hgcache/master/packs/ |
|
48 | $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx | |
49 |
$TESTTMP/hgcache/master/packs/ |
|
49 | $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack | |
50 |
$TESTTMP/hgcache/master/packs/ |
|
50 | $TESTTMP/hgcache/master/packs/add67cb28ae0a2962111588ce49467ca9ebb9195.dataidx | |
51 |
$TESTTMP/hgcache/master/packs/ |
|
51 | $TESTTMP/hgcache/master/packs/add67cb28ae0a2962111588ce49467ca9ebb9195.datapack | |
52 | $TESTTMP/hgcache/master/packs/repacklock |
|
52 | $TESTTMP/hgcache/master/packs/repacklock | |
53 | $TESTTMP/hgcache/repos |
|
53 | $TESTTMP/hgcache/repos | |
54 |
|
54 | |||
55 | # Test that the packs are readonly |
|
55 | # Test that the packs are readonly | |
56 | $ ls_l $CACHEDIR/master/packs |
|
56 | $ ls_l $CACHEDIR/master/packs | |
57 | -r--r--r-- 1145 276d308429d0303762befa376788300f0310f90e.histidx |
|
57 | -r--r--r-- 1145 1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx | |
58 | -r--r--r-- 172 276d308429d0303762befa376788300f0310f90e.histpack |
|
58 | -r--r--r-- 172 1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack | |
59 | -r--r--r-- 1074 8e25dec685d5e0bb1f1b39df3acebda0e0d75c6e.dataidx |
|
59 | -r--r--r-- 1074 add67cb28ae0a2962111588ce49467ca9ebb9195.dataidx | |
60 | -r--r--r-- 69 8e25dec685d5e0bb1f1b39df3acebda0e0d75c6e.datapack |
|
60 | -r--r--r-- 69 add67cb28ae0a2962111588ce49467ca9ebb9195.datapack | |
61 | -rw-r--r-- 0 repacklock |
|
61 | -rw-r--r-- 0 repacklock | |
62 |
|
62 | |||
63 | # Test that the data in the new packs is accessible |
|
63 | # Test that the data in the new packs is accessible | |
64 | $ hg cat -r . x |
|
64 | $ hg cat -r . x | |
65 | x |
|
65 | x | |
66 | x |
|
66 | x | |
67 |
|
67 | |||
68 | # Test that adding new data and repacking it results in the loose data and the |
|
68 | # Test that adding new data and repacking it results in the loose data and the | |
69 | # old packs being combined. |
|
69 | # old packs being combined. | |
70 |
|
70 | |||
71 | $ cd ../master |
|
71 | $ cd ../master | |
72 | $ echo x >> x |
|
72 | $ echo x >> x | |
73 | $ hg commit -m x3 |
|
73 | $ hg commit -m x3 | |
74 | $ cd ../shallow |
|
74 | $ cd ../shallow | |
75 | $ hg pull -q |
|
75 | $ hg pull -q | |
76 | $ hg up -q tip |
|
76 | $ hg up -q tip | |
77 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
77 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
78 |
|
78 | |||
79 | $ find $CACHEDIR -type f | sort |
|
79 | $ find $CACHEDIR -type f | sort | |
80 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/d4a3ed9310e5bd9887e3bf779da5077efab28216 |
|
80 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/d4a3ed9310e5bd9887e3bf779da5077efab28216 | |
81 |
$TESTTMP/hgcache/master/packs/ |
|
81 | $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx | |
82 |
$TESTTMP/hgcache/master/packs/ |
|
82 | $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack | |
83 |
$TESTTMP/hgcache/master/packs/ |
|
83 | $TESTTMP/hgcache/master/packs/add67cb28ae0a2962111588ce49467ca9ebb9195.dataidx | |
84 |
$TESTTMP/hgcache/master/packs/ |
|
84 | $TESTTMP/hgcache/master/packs/add67cb28ae0a2962111588ce49467ca9ebb9195.datapack | |
85 | $TESTTMP/hgcache/master/packs/repacklock |
|
85 | $TESTTMP/hgcache/master/packs/repacklock | |
86 | $TESTTMP/hgcache/repos |
|
86 | $TESTTMP/hgcache/repos | |
87 |
|
87 | |||
88 | # First assert that with --packsonly, the loose object will be ignored: |
|
88 | # First assert that with --packsonly, the loose object will be ignored: | |
89 |
|
89 | |||
90 | $ hg repack --packsonly |
|
90 | $ hg repack --packsonly | |
91 |
|
91 | |||
92 | $ find $CACHEDIR -type f | sort |
|
92 | $ find $CACHEDIR -type f | sort | |
93 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/d4a3ed9310e5bd9887e3bf779da5077efab28216 |
|
93 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/d4a3ed9310e5bd9887e3bf779da5077efab28216 | |
94 |
$TESTTMP/hgcache/master/packs/ |
|
94 | $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx | |
95 |
$TESTTMP/hgcache/master/packs/ |
|
95 | $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack | |
96 |
$TESTTMP/hgcache/master/packs/ |
|
96 | $TESTTMP/hgcache/master/packs/add67cb28ae0a2962111588ce49467ca9ebb9195.dataidx | |
97 |
$TESTTMP/hgcache/master/packs/ |
|
97 | $TESTTMP/hgcache/master/packs/add67cb28ae0a2962111588ce49467ca9ebb9195.datapack | |
98 | $TESTTMP/hgcache/master/packs/repacklock |
|
98 | $TESTTMP/hgcache/master/packs/repacklock | |
99 | $TESTTMP/hgcache/repos |
|
99 | $TESTTMP/hgcache/repos | |
100 |
|
100 | |||
101 | $ hg repack --traceback |
|
101 | $ hg repack --traceback | |
102 |
|
102 | |||
103 | $ find $CACHEDIR -type f | sort |
|
103 | $ find $CACHEDIR -type f | sort | |
104 |
$TESTTMP/hgcache/master/packs/ |
|
104 | $TESTTMP/hgcache/master/packs/1bd27e610ee06450e5f3bb0cd3afb6870e4cf375.dataidx | |
105 |
$TESTTMP/hgcache/master/packs/ |
|
105 | $TESTTMP/hgcache/master/packs/1bd27e610ee06450e5f3bb0cd3afb6870e4cf375.datapack | |
106 |
$TESTTMP/hgcache/master/packs/ |
|
106 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx | |
107 |
$TESTTMP/hgcache/master/packs/ |
|
107 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack | |
108 | $TESTTMP/hgcache/master/packs/repacklock |
|
108 | $TESTTMP/hgcache/master/packs/repacklock | |
109 | $TESTTMP/hgcache/repos |
|
109 | $TESTTMP/hgcache/repos | |
110 |
|
110 | |||
111 | # Verify all the file data is still available |
|
111 | # Verify all the file data is still available | |
112 | $ hg cat -r . x |
|
112 | $ hg cat -r . x | |
113 | x |
|
113 | x | |
114 | x |
|
114 | x | |
115 | x |
|
115 | x | |
116 | $ hg cat -r '.^' x |
|
116 | $ hg cat -r '.^' x | |
117 | x |
|
117 | x | |
118 | x |
|
118 | x | |
119 |
|
119 | |||
120 | # Test that repacking again without new data does not delete the pack files |
|
120 | # Test that repacking again without new data does not delete the pack files | |
121 | # and did not change the pack names |
|
121 | # and did not change the pack names | |
122 | $ hg repack |
|
122 | $ hg repack | |
123 | $ find $CACHEDIR -type f | sort |
|
123 | $ find $CACHEDIR -type f | sort | |
124 |
$TESTTMP/hgcache/master/packs/ |
|
124 | $TESTTMP/hgcache/master/packs/1bd27e610ee06450e5f3bb0cd3afb6870e4cf375.dataidx | |
125 |
$TESTTMP/hgcache/master/packs/ |
|
125 | $TESTTMP/hgcache/master/packs/1bd27e610ee06450e5f3bb0cd3afb6870e4cf375.datapack | |
126 |
$TESTTMP/hgcache/master/packs/ |
|
126 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx | |
127 |
$TESTTMP/hgcache/master/packs/ |
|
127 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack | |
128 | $TESTTMP/hgcache/master/packs/repacklock |
|
128 | $TESTTMP/hgcache/master/packs/repacklock | |
129 | $TESTTMP/hgcache/repos |
|
129 | $TESTTMP/hgcache/repos | |
130 |
|
130 | |||
131 | # Run two repacks at once |
|
131 | # Run two repacks at once | |
132 | $ hg repack --config "hooks.prerepack=sleep 3" & |
|
132 | $ hg repack --config "hooks.prerepack=sleep 3" & | |
133 | $ sleep 1 |
|
133 | $ sleep 1 | |
134 | $ hg repack |
|
134 | $ hg repack | |
135 | skipping repack - another repack is already running |
|
135 | skipping repack - another repack is already running | |
136 | $ hg debugwaitonrepack >/dev/null 2>&1 |
|
136 | $ hg debugwaitonrepack >/dev/null 2>&1 | |
137 |
|
137 | |||
138 | # Run repack in the background |
|
138 | # Run repack in the background | |
139 | $ cd ../master |
|
139 | $ cd ../master | |
140 | $ echo x >> x |
|
140 | $ echo x >> x | |
141 | $ hg commit -m x4 |
|
141 | $ hg commit -m x4 | |
142 | $ cd ../shallow |
|
142 | $ cd ../shallow | |
143 | $ hg pull -q |
|
143 | $ hg pull -q | |
144 | $ hg up -q tip |
|
144 | $ hg up -q tip | |
145 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
145 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
146 | $ find $CACHEDIR -type f | sort |
|
146 | $ find $CACHEDIR -type f | sort | |
147 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1bb2e6237e035c8f8ef508e281f1ce075bc6db72 |
|
147 | $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1bb2e6237e035c8f8ef508e281f1ce075bc6db72 | |
148 |
$TESTTMP/hgcache/master/packs/ |
|
148 | $TESTTMP/hgcache/master/packs/1bd27e610ee06450e5f3bb0cd3afb6870e4cf375.dataidx | |
149 |
$TESTTMP/hgcache/master/packs/ |
|
149 | $TESTTMP/hgcache/master/packs/1bd27e610ee06450e5f3bb0cd3afb6870e4cf375.datapack | |
150 |
$TESTTMP/hgcache/master/packs/ |
|
150 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx | |
151 |
$TESTTMP/hgcache/master/packs/ |
|
151 | $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack | |
152 | $TESTTMP/hgcache/master/packs/repacklock |
|
152 | $TESTTMP/hgcache/master/packs/repacklock | |
153 | $TESTTMP/hgcache/repos |
|
153 | $TESTTMP/hgcache/repos | |
154 |
|
154 | |||
155 | $ hg repack --background |
|
155 | $ hg repack --background | |
156 | (running background repack) |
|
156 | (running background repack) | |
157 | $ sleep 0.5 |
|
157 | $ sleep 0.5 | |
158 | $ hg debugwaitonrepack >/dev/null 2>&1 |
|
158 | $ hg debugwaitonrepack >/dev/null 2>&1 | |
159 | $ find $CACHEDIR -type f | sort |
|
159 | $ find $CACHEDIR -type f | sort | |
160 | $TESTTMP/hgcache/master/packs/094b530486dad4427a0faf6bcbc031571b99ca24.histidx |
|
160 | $TESTTMP/hgcache/master/packs/06ae46494f0e3b9beda53eae8fc0e55139f13123.dataidx | |
161 | $TESTTMP/hgcache/master/packs/094b530486dad4427a0faf6bcbc031571b99ca24.histpack |
|
161 | $TESTTMP/hgcache/master/packs/06ae46494f0e3b9beda53eae8fc0e55139f13123.datapack | |
162 | $TESTTMP/hgcache/master/packs/8fe685c56f6f7edf550bfcec74eeecc5f3c2ba15.dataidx |
|
162 | $TESTTMP/hgcache/master/packs/604552d403a1381749faf656feca0ca265a6d52c.histidx | |
163 | $TESTTMP/hgcache/master/packs/8fe685c56f6f7edf550bfcec74eeecc5f3c2ba15.datapack |
|
163 | $TESTTMP/hgcache/master/packs/604552d403a1381749faf656feca0ca265a6d52c.histpack | |
164 | $TESTTMP/hgcache/master/packs/repacklock |
|
164 | $TESTTMP/hgcache/master/packs/repacklock | |
165 | $TESTTMP/hgcache/repos |
|
165 | $TESTTMP/hgcache/repos | |
166 |
|
166 | |||
167 | # Test debug commands |
|
167 | # Test debug commands | |
168 |
|
168 | |||
169 | $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack |
|
169 | $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack | |
170 | $TESTTMP/hgcache/master/packs/8fe685c56f6f7edf550bfcec74eeecc5f3c2ba15: |
|
170 | $TESTTMP/hgcache/master/packs/06ae46494f0e3b9beda53eae8fc0e55139f13123: | |
171 | x: |
|
171 | x: | |
172 | Node Delta Base Delta Length Blob Size |
|
172 | Node Delta Base Delta Length Blob Size | |
173 | 1bb2e6237e03 000000000000 8 8 |
|
173 | 1bb2e6237e03 000000000000 8 8 | |
174 | d4a3ed9310e5 1bb2e6237e03 12 6 |
|
174 | d4a3ed9310e5 1bb2e6237e03 12 6 | |
175 | aee31534993a d4a3ed9310e5 12 4 |
|
175 | aee31534993a d4a3ed9310e5 12 4 | |
176 |
|
176 | |||
177 | Total: 32 18 (77.8% bigger) |
|
177 | Total: 32 18 (77.8% bigger) | |
178 | $ hg debugdatapack --long $TESTTMP/hgcache/master/packs/*.datapack |
|
178 | $ hg debugdatapack --long $TESTTMP/hgcache/master/packs/*.datapack | |
179 | $TESTTMP/hgcache/master/packs/8fe685c56f6f7edf550bfcec74eeecc5f3c2ba15: |
|
179 | $TESTTMP/hgcache/master/packs/06ae46494f0e3b9beda53eae8fc0e55139f13123: | |
180 | x: |
|
180 | x: | |
181 | Node Delta Base Delta Length Blob Size |
|
181 | Node Delta Base Delta Length Blob Size | |
182 | 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 0000000000000000000000000000000000000000 8 8 |
|
182 | 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 0000000000000000000000000000000000000000 8 8 | |
183 | d4a3ed9310e5bd9887e3bf779da5077efab28216 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 12 6 |
|
183 | d4a3ed9310e5bd9887e3bf779da5077efab28216 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 12 6 | |
184 | aee31534993a501858fb6dd96a065671922e7d51 d4a3ed9310e5bd9887e3bf779da5077efab28216 12 4 |
|
184 | aee31534993a501858fb6dd96a065671922e7d51 d4a3ed9310e5bd9887e3bf779da5077efab28216 12 4 | |
185 |
|
185 | |||
186 | Total: 32 18 (77.8% bigger) |
|
186 | Total: 32 18 (77.8% bigger) | |
187 | $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack --node d4a3ed9310e5bd9887e3bf779da5077efab28216 |
|
187 | $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack --node d4a3ed9310e5bd9887e3bf779da5077efab28216 | |
188 | $TESTTMP/hgcache/master/packs/8fe685c56f6f7edf550bfcec74eeecc5f3c2ba15: |
|
188 | $TESTTMP/hgcache/master/packs/06ae46494f0e3b9beda53eae8fc0e55139f13123: | |
189 |
|
189 | |||
190 | x |
|
190 | x | |
191 | Node Delta Base Delta SHA1 Delta Length |
|
191 | Node Delta Base Delta SHA1 Delta Length | |
192 | d4a3ed9310e5bd9887e3bf779da5077efab28216 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 77029ab56e83ea2115dd53ff87483682abe5d7ca 12 |
|
192 | d4a3ed9310e5bd9887e3bf779da5077efab28216 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 77029ab56e83ea2115dd53ff87483682abe5d7ca 12 | |
193 | Node Delta Base Delta SHA1 Delta Length |
|
193 | Node Delta Base Delta SHA1 Delta Length | |
194 | 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 0000000000000000000000000000000000000000 7ca8c71a64f7b56380e77573da2f7a5fdd2ecdb5 8 |
|
194 | 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 0000000000000000000000000000000000000000 7ca8c71a64f7b56380e77573da2f7a5fdd2ecdb5 8 | |
195 | $ hg debughistorypack $TESTTMP/hgcache/master/packs/*.histidx |
|
195 | $ hg debughistorypack $TESTTMP/hgcache/master/packs/*.histidx | |
196 |
|
196 | |||
197 | x |
|
197 | x | |
198 | Node P1 Node P2 Node Link Node Copy From |
|
198 | Node P1 Node P2 Node Link Node Copy From | |
199 | 1bb2e6237e03 d4a3ed9310e5 000000000000 0b03bbc9e1e7 |
|
199 | 1bb2e6237e03 d4a3ed9310e5 000000000000 0b03bbc9e1e7 | |
200 | d4a3ed9310e5 aee31534993a 000000000000 421535db10b6 |
|
200 | d4a3ed9310e5 aee31534993a 000000000000 421535db10b6 | |
201 | aee31534993a 1406e7411862 000000000000 a89d614e2364 |
|
201 | aee31534993a 1406e7411862 000000000000 a89d614e2364 | |
202 | 1406e7411862 000000000000 000000000000 b292c1e3311f |
|
202 | 1406e7411862 000000000000 000000000000 b292c1e3311f | |
203 |
|
203 | |||
204 | # Test copy tracing from a pack |
|
204 | # Test copy tracing from a pack | |
205 | $ cd ../master |
|
205 | $ cd ../master | |
206 | $ hg mv x y |
|
206 | $ hg mv x y | |
207 | $ hg commit -m 'move x to y' |
|
207 | $ hg commit -m 'move x to y' | |
208 | $ cd ../shallow |
|
208 | $ cd ../shallow | |
209 | $ hg pull -q |
|
209 | $ hg pull -q | |
210 | $ hg up -q tip |
|
210 | $ hg up -q tip | |
211 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
211 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
212 | $ hg repack |
|
212 | $ hg repack | |
213 | $ hg log -f y -T '{desc}\n' |
|
213 | $ hg log -f y -T '{desc}\n' | |
214 | move x to y |
|
214 | move x to y | |
215 | x4 |
|
215 | x4 | |
216 | x3 |
|
216 | x3 | |
217 | x2 |
|
217 | x2 | |
218 | x |
|
218 | x | |
219 |
|
219 | |||
220 | # Test copy trace across rename and back |
|
220 | # Test copy trace across rename and back | |
221 | $ cp -R $TESTTMP/hgcache/master/packs $TESTTMP/backuppacks |
|
221 | $ cp -R $TESTTMP/hgcache/master/packs $TESTTMP/backuppacks | |
222 | $ cd ../master |
|
222 | $ cd ../master | |
223 | $ hg mv y x |
|
223 | $ hg mv y x | |
224 | $ hg commit -m 'move y back to x' |
|
224 | $ hg commit -m 'move y back to x' | |
225 | $ hg revert -r 0 x |
|
225 | $ hg revert -r 0 x | |
226 | $ mv x y |
|
226 | $ mv x y | |
227 | $ hg add y |
|
227 | $ hg add y | |
228 | $ echo >> y |
|
228 | $ echo >> y | |
229 | $ hg revert x |
|
229 | $ hg revert x | |
230 | $ hg commit -m 'add y back without metadata' |
|
230 | $ hg commit -m 'add y back without metadata' | |
231 | $ cd ../shallow |
|
231 | $ cd ../shallow | |
232 | $ hg pull -q |
|
232 | $ hg pull -q | |
233 | $ hg up -q tip |
|
233 | $ hg up -q tip | |
234 | 2 files fetched over 2 fetches - (2 misses, 0.00% hit ratio) over * (glob) |
|
234 | 2 files fetched over 2 fetches - (2 misses, 0.00% hit ratio) over * (glob) | |
235 | $ hg repack |
|
235 | $ hg repack | |
236 | $ ls $TESTTMP/hgcache/master/packs |
|
236 | $ ls $TESTTMP/hgcache/master/packs | |
237 | e8fdf7ae22b772dcc291f905b9c6e5f381d28739.dataidx |
|
237 | 308a7aba9c54a0b71ae5adbbccd00c0aff20876e.dataidx | |
238 | e8fdf7ae22b772dcc291f905b9c6e5f381d28739.datapack |
|
238 | 308a7aba9c54a0b71ae5adbbccd00c0aff20876e.datapack | |
239 | ebbd7411e00456c0eec8d1150a77e2b3ef490f3f.histidx |
|
239 | bfd60adb76018bb952e27cd23fc151bf94865d7d.histidx | |
240 | ebbd7411e00456c0eec8d1150a77e2b3ef490f3f.histpack |
|
240 | bfd60adb76018bb952e27cd23fc151bf94865d7d.histpack | |
241 | repacklock |
|
241 | repacklock | |
242 | $ hg debughistorypack $TESTTMP/hgcache/master/packs/*.histidx |
|
242 | $ hg debughistorypack $TESTTMP/hgcache/master/packs/*.histidx | |
243 |
|
243 | |||
244 | x |
|
244 | x | |
245 | Node P1 Node P2 Node Link Node Copy From |
|
245 | Node P1 Node P2 Node Link Node Copy From | |
246 | cd410a44d584 577959738234 000000000000 609547eda446 y |
|
246 | cd410a44d584 577959738234 000000000000 609547eda446 y | |
247 | 1bb2e6237e03 d4a3ed9310e5 000000000000 0b03bbc9e1e7 |
|
247 | 1bb2e6237e03 d4a3ed9310e5 000000000000 0b03bbc9e1e7 | |
248 | d4a3ed9310e5 aee31534993a 000000000000 421535db10b6 |
|
248 | d4a3ed9310e5 aee31534993a 000000000000 421535db10b6 | |
249 | aee31534993a 1406e7411862 000000000000 a89d614e2364 |
|
249 | aee31534993a 1406e7411862 000000000000 a89d614e2364 | |
250 | 1406e7411862 000000000000 000000000000 b292c1e3311f |
|
250 | 1406e7411862 000000000000 000000000000 b292c1e3311f | |
251 |
|
251 | |||
252 | y |
|
252 | y | |
253 | Node P1 Node P2 Node Link Node Copy From |
|
253 | Node P1 Node P2 Node Link Node Copy From | |
254 | 577959738234 1bb2e6237e03 000000000000 c7faf2fc439a x |
|
254 | 577959738234 1bb2e6237e03 000000000000 c7faf2fc439a x | |
255 | 21f46f2721e7 000000000000 000000000000 d6868642b790 |
|
255 | 21f46f2721e7 000000000000 000000000000 d6868642b790 | |
256 | $ hg strip -r '.^' |
|
256 | $ hg strip -r '.^' | |
257 | 1 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
257 | 1 files updated, 0 files merged, 1 files removed, 0 files unresolved | |
258 | saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/609547eda446-b26b56a8-backup.hg (glob) |
|
258 | saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/609547eda446-b26b56a8-backup.hg (glob) | |
259 | $ hg -R ../master strip -r '.^' |
|
259 | $ hg -R ../master strip -r '.^' | |
260 | 1 files updated, 0 files merged, 1 files removed, 0 files unresolved |
|
260 | 1 files updated, 0 files merged, 1 files removed, 0 files unresolved | |
261 | saved backup bundle to $TESTTMP/master/.hg/strip-backup/609547eda446-b26b56a8-backup.hg (glob) |
|
261 | saved backup bundle to $TESTTMP/master/.hg/strip-backup/609547eda446-b26b56a8-backup.hg (glob) | |
262 |
|
262 | |||
263 | $ rm -rf $TESTTMP/hgcache/master/packs |
|
263 | $ rm -rf $TESTTMP/hgcache/master/packs | |
264 | $ cp -R $TESTTMP/backuppacks $TESTTMP/hgcache/master/packs |
|
264 | $ cp -R $TESTTMP/backuppacks $TESTTMP/hgcache/master/packs | |
265 |
|
265 | |||
266 | # Test repacking datapack without history |
|
266 | # Test repacking datapack without history | |
267 | $ rm -rf $CACHEDIR/master/packs/*hist* |
|
267 | $ rm -rf $CACHEDIR/master/packs/*hist* | |
268 | $ hg repack |
|
268 | $ hg repack | |
269 | $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack |
|
269 | $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack | |
270 | $TESTTMP/hgcache/master/packs/a8d86ff8e1a11a77a85f5fea567f56a757583eda: |
|
270 | $TESTTMP/hgcache/master/packs/ba4649b56263282b0699f9a6e7e34a4a2bac1638: | |
271 | x: |
|
271 | x: | |
272 | Node Delta Base Delta Length Blob Size |
|
272 | Node Delta Base Delta Length Blob Size | |
273 | 1bb2e6237e03 000000000000 8 8 |
|
273 | 1bb2e6237e03 000000000000 8 8 | |
274 | d4a3ed9310e5 1bb2e6237e03 12 6 |
|
274 | d4a3ed9310e5 1bb2e6237e03 12 6 | |
275 | aee31534993a d4a3ed9310e5 12 4 |
|
275 | aee31534993a d4a3ed9310e5 12 4 | |
276 |
|
276 | |||
277 | Total: 32 18 (77.8% bigger) |
|
277 | Total: 32 18 (77.8% bigger) | |
278 | y: |
|
278 | y: | |
279 | Node Delta Base Delta Length Blob Size |
|
279 | Node Delta Base Delta Length Blob Size | |
280 | 577959738234 000000000000 70 8 |
|
280 | 577959738234 000000000000 70 8 | |
281 |
|
281 | |||
282 | Total: 70 8 (775.0% bigger) |
|
282 | Total: 70 8 (775.0% bigger) | |
283 |
|
283 | |||
284 | $ hg cat -r ".^" x |
|
284 | $ hg cat -r ".^" x | |
285 | x |
|
285 | x | |
286 | x |
|
286 | x | |
287 | x |
|
287 | x | |
288 | x |
|
288 | x | |
289 |
|
289 | |||
290 | Incremental repack |
|
290 | Incremental repack | |
291 | $ rm -rf $CACHEDIR/master/packs/* |
|
291 | $ rm -rf $CACHEDIR/master/packs/* | |
292 | $ cat >> .hg/hgrc <<EOF |
|
292 | $ cat >> .hg/hgrc <<EOF | |
293 | > [remotefilelog] |
|
293 | > [remotefilelog] | |
294 | > data.generations=60 |
|
294 | > data.generations=60 | |
295 | > 150 |
|
295 | > 150 | |
296 | > EOF |
|
296 | > EOF | |
297 |
|
297 | |||
298 | Single pack - repack does nothing |
|
298 | Single pack - repack does nothing | |
299 | $ hg prefetch -r 0 |
|
299 | $ hg prefetch -r 0 | |
300 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
300 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
301 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack |
|
301 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack | |
302 | [1] |
|
302 | [1] | |
303 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack |
|
303 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack | |
304 | [1] |
|
304 | [1] | |
305 | $ hg repack --incremental |
|
305 | $ hg repack --incremental | |
306 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack |
|
306 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack | |
307 | -r--r--r-- 67 b5a62f3496ccbd2479497cdbc7345f3304735f33.datapack |
|
307 | -r--r--r-- 67 6409c5a1d61b251906689d4d1282ac44df6a7898.datapack | |
308 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack |
|
308 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack | |
309 |
-r--r--r-- 90 |
|
309 | -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack | |
310 |
|
310 | |||
311 | 3 gen1 packs, 1 gen0 pack - packs 3 gen1 into 1 |
|
311 | 3 gen1 packs, 1 gen0 pack - packs 3 gen1 into 1 | |
312 | $ hg prefetch -r 1 |
|
312 | $ hg prefetch -r 1 | |
313 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
313 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
314 | $ hg prefetch -r 2 |
|
314 | $ hg prefetch -r 2 | |
315 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
315 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
316 | $ hg prefetch -r 38 |
|
316 | $ hg prefetch -r 38 | |
317 | abort: unknown revision '38'! |
|
317 | abort: unknown revision '38'! | |
318 | [255] |
|
318 | [255] | |
319 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack |
|
319 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack | |
320 | -r--r--r-- 67 b5a62f3496ccbd2479497cdbc7345f3304735f33.datapack |
|
320 | -r--r--r-- 67 6409c5a1d61b251906689d4d1282ac44df6a7898.datapack | |
321 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack |
|
321 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack | |
322 |
-r--r--r-- 90 |
|
322 | -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack | |
323 |
|
323 | |||
324 | For the data packs, setting the limit for the repackmaxpacksize to be 64 such |
|
324 | For the data packs, setting the limit for the repackmaxpacksize to be 64 such | |
325 | that data pack with size 65 is more than the limit. This effectively ensures |
|
325 | that data pack with size 65 is more than the limit. This effectively ensures | |
326 | that no generation has 3 packs and therefore, no packs are chosen for the |
|
326 | that no generation has 3 packs and therefore, no packs are chosen for the | |
327 | incremental repacking. As for the history packs, setting repackmaxpacksize to be |
|
327 | incremental repacking. As for the history packs, setting repackmaxpacksize to be | |
328 | 0 which should always result in no repacking. |
|
328 | 0 which should always result in no repacking. | |
329 | $ hg repack --incremental --config remotefilelog.data.repackmaxpacksize=64 \ |
|
329 | $ hg repack --incremental --config remotefilelog.data.repackmaxpacksize=64 \ | |
330 | > --config remotefilelog.history.repackmaxpacksize=0 |
|
330 | > --config remotefilelog.history.repackmaxpacksize=0 | |
331 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack |
|
331 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack | |
332 | -r--r--r-- 147 935861cae0be6ce41a0d47a529e4d097e9e68a69.datapack |
|
332 | -r--r--r-- 147 1bd27e610ee06450e5f3bb0cd3afb6870e4cf375.datapack | |
333 | -r--r--r-- 67 b5a62f3496ccbd2479497cdbc7345f3304735f33.datapack |
|
333 | -r--r--r-- 67 6409c5a1d61b251906689d4d1282ac44df6a7898.datapack | |
334 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack |
|
334 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack | |
335 | -r--r--r-- 254 077e7ce5dfe862dc40cc8f3c9742d96a056865f2.histpack |
|
335 | -r--r--r-- 254 8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack | |
336 |
-r--r--r-- 90 |
|
336 | -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack | |
337 |
|
337 | |||
338 | Setting limit for the repackmaxpacksize to be the size of the biggest pack file |
|
338 | Setting limit for the repackmaxpacksize to be the size of the biggest pack file | |
339 | which ensures that it is effectively ignored in the incremental repacking. |
|
339 | which ensures that it is effectively ignored in the incremental repacking. | |
340 | $ hg repack --incremental --config remotefilelog.data.repackmaxpacksize=65 \ |
|
340 | $ hg repack --incremental --config remotefilelog.data.repackmaxpacksize=65 \ | |
341 | > --config remotefilelog.history.repackmaxpacksize=336 |
|
341 | > --config remotefilelog.history.repackmaxpacksize=336 | |
342 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack |
|
342 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack | |
343 | -r--r--r-- 147 935861cae0be6ce41a0d47a529e4d097e9e68a69.datapack |
|
343 | -r--r--r-- 147 1bd27e610ee06450e5f3bb0cd3afb6870e4cf375.datapack | |
344 | -r--r--r-- 67 b5a62f3496ccbd2479497cdbc7345f3304735f33.datapack |
|
344 | -r--r--r-- 67 6409c5a1d61b251906689d4d1282ac44df6a7898.datapack | |
345 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack |
|
345 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack | |
346 | -r--r--r-- 254 077e7ce5dfe862dc40cc8f3c9742d96a056865f2.histpack |
|
346 | -r--r--r-- 254 8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack | |
347 |
-r--r--r-- 90 |
|
347 | -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack | |
348 |
|
348 | |||
349 | 1 gen3 pack, 1 gen0 pack - does nothing |
|
349 | 1 gen3 pack, 1 gen0 pack - does nothing | |
350 | $ hg repack --incremental |
|
350 | $ hg repack --incremental | |
351 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack |
|
351 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack | |
352 | -r--r--r-- 147 935861cae0be6ce41a0d47a529e4d097e9e68a69.datapack |
|
352 | -r--r--r-- 147 1bd27e610ee06450e5f3bb0cd3afb6870e4cf375.datapack | |
353 | -r--r--r-- 67 b5a62f3496ccbd2479497cdbc7345f3304735f33.datapack |
|
353 | -r--r--r-- 67 6409c5a1d61b251906689d4d1282ac44df6a7898.datapack | |
354 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack |
|
354 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack | |
355 | -r--r--r-- 254 077e7ce5dfe862dc40cc8f3c9742d96a056865f2.histpack |
|
355 | -r--r--r-- 254 8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack | |
356 |
-r--r--r-- 90 |
|
356 | -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack | |
357 |
|
357 | |||
358 | Pull should run background repack |
|
358 | Pull should run background repack | |
359 | $ cat >> .hg/hgrc <<EOF |
|
359 | $ cat >> .hg/hgrc <<EOF | |
360 | > [remotefilelog] |
|
360 | > [remotefilelog] | |
361 | > backgroundrepack=True |
|
361 | > backgroundrepack=True | |
362 | > EOF |
|
362 | > EOF | |
363 | $ clearcache |
|
363 | $ clearcache | |
364 | $ hg prefetch -r 0 |
|
364 | $ hg prefetch -r 0 | |
365 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
365 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
366 | $ hg prefetch -r 1 |
|
366 | $ hg prefetch -r 1 | |
367 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
367 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
368 | $ hg prefetch -r 2 |
|
368 | $ hg prefetch -r 2 | |
369 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
369 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
370 | $ hg prefetch -r 3 |
|
370 | $ hg prefetch -r 3 | |
371 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
371 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
372 |
|
372 | |||
373 | $ hg pull |
|
373 | $ hg pull | |
374 | pulling from ssh://user@dummy/master |
|
374 | pulling from ssh://user@dummy/master | |
375 | searching for changes |
|
375 | searching for changes | |
376 | no changes found |
|
376 | no changes found | |
377 | (running background incremental repack) |
|
377 | (running background incremental repack) | |
378 | $ sleep 0.5 |
|
378 | $ sleep 0.5 | |
379 | $ hg debugwaitonrepack >/dev/null 2>&1 |
|
379 | $ hg debugwaitonrepack >/dev/null 2>&1 | |
380 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack |
|
380 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack | |
381 | -r--r--r-- 301 09b8bf49256b3fc2175977ba97d6402e91a9a604.datapack |
|
381 | -r--r--r-- 301 671913bebdb7b95aae52a546662753eac7606e40.datapack | |
382 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack |
|
382 | $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack | |
383 |
-r--r--r-- 336 |
|
383 | -r--r--r-- 336 604552d403a1381749faf656feca0ca265a6d52c.histpack | |
384 |
|
384 | |||
385 | Test environment variable resolution |
|
385 | Test environment variable resolution | |
386 | $ CACHEPATH=$TESTTMP/envcache hg prefetch --config 'remotefilelog.cachepath=$CACHEPATH' |
|
386 | $ CACHEPATH=$TESTTMP/envcache hg prefetch --config 'remotefilelog.cachepath=$CACHEPATH' | |
387 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
387 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
388 | $ find $TESTTMP/envcache | sort |
|
388 | $ find $TESTTMP/envcache | sort | |
389 | $TESTTMP/envcache |
|
389 | $TESTTMP/envcache | |
390 | $TESTTMP/envcache/master |
|
390 | $TESTTMP/envcache/master | |
391 | $TESTTMP/envcache/master/95 |
|
391 | $TESTTMP/envcache/master/95 | |
392 | $TESTTMP/envcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a |
|
392 | $TESTTMP/envcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a | |
393 | $TESTTMP/envcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/577959738234a1eb241ed3ed4b22a575833f56e0 |
|
393 | $TESTTMP/envcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/577959738234a1eb241ed3ed4b22a575833f56e0 | |
394 | $TESTTMP/envcache/repos |
|
394 | $TESTTMP/envcache/repos | |
395 |
|
395 | |||
396 | Test local remotefilelog blob is correct when based on a pack |
|
396 | Test local remotefilelog blob is correct when based on a pack | |
397 | $ hg prefetch -r . |
|
397 | $ hg prefetch -r . | |
398 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) |
|
398 | 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob) | |
399 | $ echo >> y |
|
399 | $ echo >> y | |
400 | $ hg commit -m y2 |
|
400 | $ hg commit -m y2 | |
401 | $ hg debugremotefilelog .hg/store/data/95cb0bfd2977c761298d9624e4b4d4c72a39974a/b70860edba4f8242a1d52f2a94679dd23cb76808 |
|
401 | $ hg debugremotefilelog .hg/store/data/95cb0bfd2977c761298d9624e4b4d4c72a39974a/b70860edba4f8242a1d52f2a94679dd23cb76808 | |
402 | size: 9 bytes |
|
402 | size: 9 bytes | |
403 | path: .hg/store/data/95cb0bfd2977c761298d9624e4b4d4c72a39974a/b70860edba4f8242a1d52f2a94679dd23cb76808 |
|
403 | path: .hg/store/data/95cb0bfd2977c761298d9624e4b4d4c72a39974a/b70860edba4f8242a1d52f2a94679dd23cb76808 | |
404 | key: b70860edba4f |
|
404 | key: b70860edba4f | |
405 |
|
405 | |||
406 | node => p1 p2 linknode copyfrom |
|
406 | node => p1 p2 linknode copyfrom | |
407 | b70860edba4f => 577959738234 000000000000 08d3fbc98c48 |
|
407 | b70860edba4f => 577959738234 000000000000 08d3fbc98c48 | |
408 | 577959738234 => 1bb2e6237e03 000000000000 c7faf2fc439a x |
|
408 | 577959738234 => 1bb2e6237e03 000000000000 c7faf2fc439a x | |
409 | 1bb2e6237e03 => d4a3ed9310e5 000000000000 0b03bbc9e1e7 |
|
409 | 1bb2e6237e03 => d4a3ed9310e5 000000000000 0b03bbc9e1e7 | |
410 | d4a3ed9310e5 => aee31534993a 000000000000 421535db10b6 |
|
410 | d4a3ed9310e5 => aee31534993a 000000000000 421535db10b6 | |
411 | aee31534993a => 1406e7411862 000000000000 a89d614e2364 |
|
411 | aee31534993a => 1406e7411862 000000000000 a89d614e2364 | |
412 | 1406e7411862 => 000000000000 000000000000 b292c1e3311f |
|
412 | 1406e7411862 => 000000000000 000000000000 b292c1e3311f | |
413 |
|
413 | |||
414 | Test limiting the max delta chain length |
|
414 | Test limiting the max delta chain length | |
415 | $ hg repack --config packs.maxchainlen=1 |
|
415 | $ hg repack --config packs.maxchainlen=1 | |
416 | $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.dataidx |
|
416 | $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.dataidx | |
417 | $TESTTMP/hgcache/master/packs/a8378d888fe4325250720e51311a65d2509be742: |
|
417 | $TESTTMP/hgcache/master/packs/80f7c299aeb22849ebc28fbd11bb76078aa55918: | |
418 | x: |
|
418 | x: | |
419 | Node Delta Base Delta Length Blob Size |
|
419 | Node Delta Base Delta Length Blob Size | |
420 | 1bb2e6237e03 000000000000 8 8 |
|
420 | 1bb2e6237e03 000000000000 8 8 | |
421 | d4a3ed9310e5 1bb2e6237e03 12 6 |
|
421 | d4a3ed9310e5 1bb2e6237e03 12 6 | |
422 | aee31534993a 000000000000 4 4 |
|
422 | aee31534993a 000000000000 4 4 | |
423 | 1406e7411862 aee31534993a 12 2 |
|
423 | 1406e7411862 aee31534993a 12 2 | |
424 |
|
424 | |||
425 | Total: 36 20 (80.0% bigger) |
|
425 | Total: 36 20 (80.0% bigger) | |
426 | y: |
|
426 | y: | |
427 | Node Delta Base Delta Length Blob Size |
|
427 | Node Delta Base Delta Length Blob Size | |
428 | 577959738234 000000000000 70 8 |
|
428 | 577959738234 000000000000 70 8 | |
429 |
|
429 | |||
430 | Total: 70 8 (775.0% bigger) |
|
430 | Total: 70 8 (775.0% bigger) | |
431 |
|
431 | |||
432 | Test huge pack cleanup using different values of packs.maxpacksize: |
|
432 | Test huge pack cleanup using different values of packs.maxpacksize: | |
433 | $ hg repack --incremental --debug |
|
433 | $ hg repack --incremental --debug | |
434 | $ hg repack --incremental --debug --config packs.maxpacksize=512 |
|
434 | $ hg repack --incremental --debug --config packs.maxpacksize=512 | |
435 |
removing oversize packfile $TESTTMP/hgcache/master/packs/ |
|
435 | removing oversize packfile $TESTTMP/hgcache/master/packs/80f7c299aeb22849ebc28fbd11bb76078aa55918.datapack (426 bytes) | |
436 |
removing oversize packfile $TESTTMP/hgcache/master/packs/ |
|
436 | removing oversize packfile $TESTTMP/hgcache/master/packs/80f7c299aeb22849ebc28fbd11bb76078aa55918.dataidx (1.21 KB) | |
437 |
|
437 | |||
438 | Do a repack where the new pack reuses a delta from the old pack |
|
438 | Do a repack where the new pack reuses a delta from the old pack | |
439 | $ clearcache |
|
439 | $ clearcache | |
440 | $ hg prefetch -r '2::3' |
|
440 | $ hg prefetch -r '2::3' | |
441 | 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over * (glob) |
|
441 | 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over * (glob) | |
442 | $ hg repack |
|
442 | $ hg repack | |
443 | $ hg debugdatapack $CACHEDIR/master/packs/*.datapack |
|
443 | $ hg debugdatapack $CACHEDIR/master/packs/*.datapack | |
444 |
$TESTTMP/hgcache/master/packs/ |
|
444 | $TESTTMP/hgcache/master/packs/90cfb1a45e2549500caace30add04b58a6b243af: | |
445 | x: |
|
445 | x: | |
446 | Node Delta Base Delta Length Blob Size |
|
446 | Node Delta Base Delta Length Blob Size | |
447 | 1bb2e6237e03 000000000000 8 8 |
|
447 | 1bb2e6237e03 000000000000 8 8 | |
448 | d4a3ed9310e5 1bb2e6237e03 12 6 |
|
448 | d4a3ed9310e5 1bb2e6237e03 12 6 | |
449 |
|
449 | |||
450 | Total: 20 14 (42.9% bigger) |
|
450 | Total: 20 14 (42.9% bigger) | |
451 | $ hg prefetch -r '0::1' |
|
451 | $ hg prefetch -r '0::1' | |
452 | 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over * (glob) |
|
452 | 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over * (glob) | |
453 | $ hg repack |
|
453 | $ hg repack | |
454 | $ hg debugdatapack $CACHEDIR/master/packs/*.datapack |
|
454 | $ hg debugdatapack $CACHEDIR/master/packs/*.datapack | |
455 |
$TESTTMP/hgcache/master/packs/ |
|
455 | $TESTTMP/hgcache/master/packs/671913bebdb7b95aae52a546662753eac7606e40: | |
456 | x: |
|
456 | x: | |
457 | Node Delta Base Delta Length Blob Size |
|
457 | Node Delta Base Delta Length Blob Size | |
458 | 1bb2e6237e03 000000000000 8 8 |
|
458 | 1bb2e6237e03 000000000000 8 8 | |
459 | d4a3ed9310e5 1bb2e6237e03 12 6 |
|
459 | d4a3ed9310e5 1bb2e6237e03 12 6 | |
460 | aee31534993a d4a3ed9310e5 12 4 |
|
460 | aee31534993a d4a3ed9310e5 12 4 | |
461 | 1406e7411862 aee31534993a 12 2 |
|
461 | 1406e7411862 aee31534993a 12 2 | |
462 |
|
462 | |||
463 | Total: 44 20 (120.0% bigger) |
|
463 | Total: 44 20 (120.0% bigger) |
General Comments 0
You need to be logged in to leave comments.
Login now