Show More
@@ -2138,6 +2138,8 b' def debugnodemap(ui, repo, **opts):' | |||||
2138 | docket, data = nm_data |
|
2138 | docket, data = nm_data | |
2139 | ui.write((b"uid: %s\n") % docket.uid) |
|
2139 | ui.write((b"uid: %s\n") % docket.uid) | |
2140 | ui.write((b"tip-rev: %d\n") % docket.tip_rev) |
|
2140 | ui.write((b"tip-rev: %d\n") % docket.tip_rev) | |
|
2141 | ui.write((b"data-length: %d\n") % docket.data_length) | |||
|
2142 | ui.write((b"data-unused: %d\n") % docket.data_unused) | |||
2141 |
|
2143 | |||
2142 |
|
2144 | |||
2143 | @command( |
|
2145 | @command( |
@@ -164,11 +164,11 b' class PersistentNodeMapIndexObject(Index' | |||||
164 | """ |
|
164 | """ | |
165 | if self._nm_root is None: |
|
165 | if self._nm_root is None: | |
166 | return None |
|
166 | return None | |
167 | data = nodemaputil.update_persistent_data( |
|
167 | changed, data = nodemaputil.update_persistent_data( | |
168 | self, self._nm_root, self._nm_max_idx, self._nm_rev |
|
168 | self, self._nm_root, self._nm_max_idx, self._nm_rev | |
169 | ) |
|
169 | ) | |
170 | self._nm_root = self._nm_max_idx = self._nm_rev = None |
|
170 | self._nm_root = self._nm_max_idx = self._nm_rev = None | |
171 | return data |
|
171 | return changed, data | |
172 |
|
172 | |||
173 | def update_nodemap_data(self, docket, nm_data): |
|
173 | def update_nodemap_data(self, docket, nm_data): | |
174 | """provide full block of persisted binary data for a nodemap |
|
174 | """provide full block of persisted binary data for a nodemap |
@@ -37,10 +37,12 b' def persisted_data(revlog):' | |||||
37 | return None |
|
37 | return None | |
38 | offset += S_VERSION.size |
|
38 | offset += S_VERSION.size | |
39 | headers = S_HEADER.unpack(pdata[offset : offset + S_HEADER.size]) |
|
39 | headers = S_HEADER.unpack(pdata[offset : offset + S_HEADER.size]) | |
40 | uid_size, tip_rev = headers |
|
40 | uid_size, tip_rev, data_length, data_unused = headers | |
41 | offset += S_HEADER.size |
|
41 | offset += S_HEADER.size | |
42 | docket = NodeMapDocket(pdata[offset : offset + uid_size]) |
|
42 | docket = NodeMapDocket(pdata[offset : offset + uid_size]) | |
43 | docket.tip_rev = tip_rev |
|
43 | docket.tip_rev = tip_rev | |
|
44 | docket.data_length = data_length | |||
|
45 | docket.data_unused = data_unused | |||
44 |
|
46 | |||
45 | filename = _rawdata_filepath(revlog, docket) |
|
47 | filename = _rawdata_filepath(revlog, docket) | |
46 | return docket, revlog.opener.tryread(filename) |
|
48 | return docket, revlog.opener.tryread(filename) | |
@@ -78,12 +80,14 b' def _persist_nodemap(tr, revlog):' | |||||
78 | # first attemp an incremental update of the data |
|
80 | # first attemp an incremental update of the data | |
79 | if can_incremental and ondisk_docket is not None: |
|
81 | if can_incremental and ondisk_docket is not None: | |
80 | target_docket = revlog._nodemap_docket.copy() |
|
82 | target_docket = revlog._nodemap_docket.copy() | |
81 | data = revlog.index.nodemap_data_incremental() |
|
83 | data_changed_count, data = revlog.index.nodemap_data_incremental() | |
82 | datafile = _rawdata_filepath(revlog, target_docket) |
|
84 | datafile = _rawdata_filepath(revlog, target_docket) | |
83 | # EXP-TODO: if this is a cache, this should use a cache vfs, not a |
|
85 | # EXP-TODO: if this is a cache, this should use a cache vfs, not a | |
84 | # store vfs |
|
86 | # store vfs | |
85 | with revlog.opener(datafile, b'a') as fd: |
|
87 | with revlog.opener(datafile, b'a') as fd: | |
86 | fd.write(data) |
|
88 | fd.write(data) | |
|
89 | target_docket.data_length += len(data) | |||
|
90 | target_docket.data_unused += data_changed_count | |||
87 | else: |
|
91 | else: | |
88 | # otherwise fallback to a full new export |
|
92 | # otherwise fallback to a full new export | |
89 | target_docket = NodeMapDocket() |
|
93 | target_docket = NodeMapDocket() | |
@@ -96,6 +100,7 b' def _persist_nodemap(tr, revlog):' | |||||
96 | # store vfs |
|
100 | # store vfs | |
97 | with revlog.opener(datafile, b'w') as fd: |
|
101 | with revlog.opener(datafile, b'w') as fd: | |
98 | fd.write(data) |
|
102 | fd.write(data) | |
|
103 | target_docket.data_length = len(data) | |||
99 | target_docket.tip_rev = revlog.tiprev() |
|
104 | target_docket.tip_rev = revlog.tiprev() | |
100 | # EXP-TODO: if this is a cache, this should use a cache vfs, not a |
|
105 | # EXP-TODO: if this is a cache, this should use a cache vfs, not a | |
101 | # store vfs |
|
106 | # store vfs | |
@@ -143,9 +148,8 b' def _persist_nodemap(tr, revlog):' | |||||
143 |
|
148 | |||
144 | # version 0 is experimental, no BC garantee, do no use outside of tests. |
|
149 | # version 0 is experimental, no BC garantee, do no use outside of tests. | |
145 | ONDISK_VERSION = 0 |
|
150 | ONDISK_VERSION = 0 | |
146 |
|
||||
147 | S_VERSION = struct.Struct(">B") |
|
151 | S_VERSION = struct.Struct(">B") | |
148 | S_HEADER = struct.Struct(">BQ") |
|
152 | S_HEADER = struct.Struct(">BQQQ") | |
149 |
|
153 | |||
150 | ID_SIZE = 8 |
|
154 | ID_SIZE = 8 | |
151 |
|
155 | |||
@@ -168,17 +172,26 b' class NodeMapDocket(object):' | |||||
168 | uid = _make_uid() |
|
172 | uid = _make_uid() | |
169 | self.uid = uid |
|
173 | self.uid = uid | |
170 | self.tip_rev = None |
|
174 | self.tip_rev = None | |
|
175 | self.data_length = None | |||
|
176 | self.data_unused = 0 | |||
171 |
|
177 | |||
172 | def copy(self): |
|
178 | def copy(self): | |
173 | new = NodeMapDocket(uid=self.uid) |
|
179 | new = NodeMapDocket(uid=self.uid) | |
174 | new.tip_rev = self.tip_rev |
|
180 | new.tip_rev = self.tip_rev | |
|
181 | new.data_length = self.data_length | |||
|
182 | new.data_unused = self.data_unused | |||
175 | return new |
|
183 | return new | |
176 |
|
184 | |||
177 | def serialize(self): |
|
185 | def serialize(self): | |
178 | """return serialized bytes for a docket using the passed uid""" |
|
186 | """return serialized bytes for a docket using the passed uid""" | |
179 | data = [] |
|
187 | data = [] | |
180 | data.append(S_VERSION.pack(ONDISK_VERSION)) |
|
188 | data.append(S_VERSION.pack(ONDISK_VERSION)) | |
181 | headers = (len(self.uid), self.tip_rev) |
|
189 | headers = ( | |
|
190 | len(self.uid), | |||
|
191 | self.tip_rev, | |||
|
192 | self.data_length, | |||
|
193 | self.data_unused, | |||
|
194 | ) | |||
182 | data.append(S_HEADER.pack(*headers)) |
|
195 | data.append(S_HEADER.pack(*headers)) | |
183 | data.append(self.uid) |
|
196 | data.append(self.uid) | |
184 | return b''.join(data) |
|
197 | return b''.join(data) | |
@@ -236,8 +249,11 b' def persistent_data(index):' | |||||
236 | def update_persistent_data(index, root, max_idx, last_rev): |
|
249 | def update_persistent_data(index, root, max_idx, last_rev): | |
237 | """return the incremental update for persistent nodemap from a given index |
|
250 | """return the incremental update for persistent nodemap from a given index | |
238 | """ |
|
251 | """ | |
239 | trie = _update_trie(index, root, last_rev) |
|
252 | changed_block, trie = _update_trie(index, root, last_rev) | |
240 | return _persist_trie(trie, existing_idx=max_idx) |
|
253 | return ( | |
|
254 | changed_block * S_BLOCK.size, | |||
|
255 | _persist_trie(trie, existing_idx=max_idx), | |||
|
256 | ) | |||
241 |
|
257 | |||
242 |
|
258 | |||
243 | S_BLOCK = struct.Struct(">" + ("l" * 16)) |
|
259 | S_BLOCK = struct.Struct(">" + ("l" * 16)) | |
@@ -294,10 +310,11 b' def _build_trie(index):' | |||||
294 |
|
310 | |||
295 | def _update_trie(index, root, last_rev): |
|
311 | def _update_trie(index, root, last_rev): | |
296 | """consume""" |
|
312 | """consume""" | |
|
313 | changed = 0 | |||
297 | for rev in range(last_rev + 1, len(index)): |
|
314 | for rev in range(last_rev + 1, len(index)): | |
298 | hex = nodemod.hex(index[rev][7]) |
|
315 | hex = nodemod.hex(index[rev][7]) | |
299 | _insert_into_block(index, 0, root, rev, hex) |
|
316 | changed += _insert_into_block(index, 0, root, rev, hex) | |
300 | return root |
|
317 | return changed, root | |
301 |
|
318 | |||
302 |
|
319 | |||
303 | def _insert_into_block(index, level, block, current_rev, current_hex): |
|
320 | def _insert_into_block(index, level, block, current_rev, current_hex): | |
@@ -309,6 +326,7 b' def _insert_into_block(index, level, blo' | |||||
309 | current_rev: the revision number we are adding |
|
326 | current_rev: the revision number we are adding | |
310 | current_hex: the hexadecimal representation of the of that revision |
|
327 | current_hex: the hexadecimal representation of the of that revision | |
311 | """ |
|
328 | """ | |
|
329 | changed = 1 | |||
312 | if block.ondisk_id is not None: |
|
330 | if block.ondisk_id is not None: | |
313 | block.ondisk_id = None |
|
331 | block.ondisk_id = None | |
314 | hex_digit = _to_int(current_hex[level : level + 1]) |
|
332 | hex_digit = _to_int(current_hex[level : level + 1]) | |
@@ -318,7 +336,9 b' def _insert_into_block(index, level, blo' | |||||
318 | block[hex_digit] = current_rev |
|
336 | block[hex_digit] = current_rev | |
319 | elif isinstance(entry, dict): |
|
337 | elif isinstance(entry, dict): | |
320 | # need to recurse to an underlying block |
|
338 | # need to recurse to an underlying block | |
321 | _insert_into_block(index, level + 1, entry, current_rev, current_hex) |
|
339 | changed += _insert_into_block( | |
|
340 | index, level + 1, entry, current_rev, current_hex | |||
|
341 | ) | |||
322 | else: |
|
342 | else: | |
323 | # collision with a previously unique prefix, inserting new |
|
343 | # collision with a previously unique prefix, inserting new | |
324 | # vertices to fit both entry. |
|
344 | # vertices to fit both entry. | |
@@ -328,6 +348,7 b' def _insert_into_block(index, level, blo' | |||||
328 | block[hex_digit] = new |
|
348 | block[hex_digit] = new | |
329 | _insert_into_block(index, level + 1, new, other_rev, other_hex) |
|
349 | _insert_into_block(index, level + 1, new, other_rev, other_hex) | |
330 | _insert_into_block(index, level + 1, new, current_rev, current_hex) |
|
350 | _insert_into_block(index, level + 1, new, current_rev, current_hex) | |
|
351 | return changed | |||
331 |
|
352 | |||
332 |
|
353 | |||
333 | def _persist_trie(root, existing_idx=None): |
|
354 | def _persist_trie(root, existing_idx=None): |
@@ -15,8 +15,10 b' Test the persistent on-disk nodemap' | |||||
15 | $ hg debugnodemap --metadata |
|
15 | $ hg debugnodemap --metadata | |
16 | uid: ???????????????? (glob) |
|
16 | uid: ???????????????? (glob) | |
17 | tip-rev: 5000 |
|
17 | tip-rev: 5000 | |
|
18 | data-length: 122880 | |||
|
19 | data-unused: 0 | |||
18 | $ f --size .hg/store/00changelog.n |
|
20 | $ f --size .hg/store/00changelog.n | |
19 |
.hg/store/00changelog.n: size=2 |
|
21 | .hg/store/00changelog.n: size=42 | |
20 | $ f --sha256 .hg/store/00changelog-*.nd |
|
22 | $ f --sha256 .hg/store/00changelog-*.nd | |
21 | .hg/store/00changelog-????????????????.nd: sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7 (glob) |
|
23 | .hg/store/00changelog-????????????????.nd: sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7 (glob) | |
22 | $ hg debugnodemap --dump-new | f --sha256 --size |
|
24 | $ hg debugnodemap --dump-new | f --sha256 --size | |
@@ -50,11 +52,22 b' add a new commit' | |||||
50 | $ echo foo > foo |
|
52 | $ echo foo > foo | |
51 | $ hg add foo |
|
53 | $ hg add foo | |
52 | $ hg ci -m 'foo' |
|
54 | $ hg ci -m 'foo' | |
|
55 | ||||
|
56 | #if pure | |||
53 | $ hg debugnodemap --metadata |
|
57 | $ hg debugnodemap --metadata | |
54 | uid: ???????????????? (glob) |
|
58 | uid: ???????????????? (glob) | |
55 | tip-rev: 5001 |
|
59 | tip-rev: 5001 | |
|
60 | data-length: 123072 | |||
|
61 | data-unused: 192 | |||
|
62 | #else | |||
|
63 | $ hg debugnodemap --metadata | |||
|
64 | uid: ???????????????? (glob) | |||
|
65 | tip-rev: 5001 | |||
|
66 | data-length: 122880 | |||
|
67 | data-unused: 0 | |||
|
68 | #endif | |||
56 | $ f --size .hg/store/00changelog.n |
|
69 | $ f --size .hg/store/00changelog.n | |
57 |
.hg/store/00changelog.n: size=2 |
|
70 | .hg/store/00changelog.n: size=42 | |
58 |
|
71 | |||
59 | (The pure code use the debug code that perform incremental update, the C code reencode from scratch) |
|
72 | (The pure code use the debug code that perform incremental update, the C code reencode from scratch) | |
60 |
|
73 |
General Comments 0
You need to be logged in to leave comments.
Login now