##// END OF EJS Templates
nodemap: introduce append-only incremental update of the persistent data...
marmoute -
r44805:50ad851e default
parent child Browse files
Show More
@@ -1,266 +1,284 b''
1 # parsers.py - Python implementation of parsers.c
1 # parsers.py - Python implementation of parsers.c
2 #
2 #
3 # Copyright 2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11 import zlib
11 import zlib
12
12
13 from ..node import nullid, nullrev
13 from ..node import nullid, nullrev
14 from .. import (
14 from .. import (
15 pycompat,
15 pycompat,
16 util,
16 util,
17 )
17 )
18
18
19 from ..revlogutils import nodemap as nodemaputil
19 from ..revlogutils import nodemap as nodemaputil
20
20
21 stringio = pycompat.bytesio
21 stringio = pycompat.bytesio
22
22
23
23
24 _pack = struct.pack
24 _pack = struct.pack
25 _unpack = struct.unpack
25 _unpack = struct.unpack
26 _compress = zlib.compress
26 _compress = zlib.compress
27 _decompress = zlib.decompress
27 _decompress = zlib.decompress
28
28
29 # Some code below makes tuples directly because it's more convenient. However,
29 # Some code below makes tuples directly because it's more convenient. However,
30 # code outside this module should always use dirstatetuple.
30 # code outside this module should always use dirstatetuple.
31 def dirstatetuple(*x):
31 def dirstatetuple(*x):
32 # x is a tuple
32 # x is a tuple
33 return x
33 return x
34
34
35
35
36 indexformatng = b">Qiiiiii20s12x"
36 indexformatng = b">Qiiiiii20s12x"
37 indexfirst = struct.calcsize(b'Q')
37 indexfirst = struct.calcsize(b'Q')
38 sizeint = struct.calcsize(b'i')
38 sizeint = struct.calcsize(b'i')
39 indexsize = struct.calcsize(indexformatng)
39 indexsize = struct.calcsize(indexformatng)
40
40
41
41
42 def gettype(q):
42 def gettype(q):
43 return int(q & 0xFFFF)
43 return int(q & 0xFFFF)
44
44
45
45
46 def offset_type(offset, type):
46 def offset_type(offset, type):
47 return int(int(offset) << 16 | type)
47 return int(int(offset) << 16 | type)
48
48
49
49
50 class BaseIndexObject(object):
50 class BaseIndexObject(object):
51 @property
51 @property
52 def nodemap(self):
52 def nodemap(self):
53 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
53 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
54 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
54 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
55 return self._nodemap
55 return self._nodemap
56
56
57 @util.propertycache
57 @util.propertycache
58 def _nodemap(self):
58 def _nodemap(self):
59 nodemap = nodemaputil.NodeMap({nullid: nullrev})
59 nodemap = nodemaputil.NodeMap({nullid: nullrev})
60 for r in range(0, len(self)):
60 for r in range(0, len(self)):
61 n = self[r][7]
61 n = self[r][7]
62 nodemap[n] = r
62 nodemap[n] = r
63 return nodemap
63 return nodemap
64
64
65 def has_node(self, node):
65 def has_node(self, node):
66 """return True if the node exist in the index"""
66 """return True if the node exist in the index"""
67 return node in self._nodemap
67 return node in self._nodemap
68
68
69 def rev(self, node):
69 def rev(self, node):
70 """return a revision for a node
70 """return a revision for a node
71
71
72 If the node is unknown, raise a RevlogError"""
72 If the node is unknown, raise a RevlogError"""
73 return self._nodemap[node]
73 return self._nodemap[node]
74
74
75 def get_rev(self, node):
75 def get_rev(self, node):
76 """return a revision for a node
76 """return a revision for a node
77
77
78 If the node is unknown, return None"""
78 If the node is unknown, return None"""
79 return self._nodemap.get(node)
79 return self._nodemap.get(node)
80
80
81 def _stripnodes(self, start):
81 def _stripnodes(self, start):
82 if '_nodemap' in vars(self):
82 if '_nodemap' in vars(self):
83 for r in range(start, len(self)):
83 for r in range(start, len(self)):
84 n = self[r][7]
84 n = self[r][7]
85 del self._nodemap[n]
85 del self._nodemap[n]
86
86
87 def clearcaches(self):
87 def clearcaches(self):
88 self.__dict__.pop('_nodemap', None)
88 self.__dict__.pop('_nodemap', None)
89
89
90 def __len__(self):
90 def __len__(self):
91 return self._lgt + len(self._extra)
91 return self._lgt + len(self._extra)
92
92
93 def append(self, tup):
93 def append(self, tup):
94 if '_nodemap' in vars(self):
94 if '_nodemap' in vars(self):
95 self._nodemap[tup[7]] = len(self)
95 self._nodemap[tup[7]] = len(self)
96 self._extra.append(tup)
96 self._extra.append(tup)
97
97
98 def _check_index(self, i):
98 def _check_index(self, i):
99 if not isinstance(i, int):
99 if not isinstance(i, int):
100 raise TypeError(b"expecting int indexes")
100 raise TypeError(b"expecting int indexes")
101 if i < 0 or i >= len(self):
101 if i < 0 or i >= len(self):
102 raise IndexError
102 raise IndexError
103
103
104 def __getitem__(self, i):
104 def __getitem__(self, i):
105 if i == -1:
105 if i == -1:
106 return (0, 0, 0, -1, -1, -1, -1, nullid)
106 return (0, 0, 0, -1, -1, -1, -1, nullid)
107 self._check_index(i)
107 self._check_index(i)
108 if i >= self._lgt:
108 if i >= self._lgt:
109 return self._extra[i - self._lgt]
109 return self._extra[i - self._lgt]
110 index = self._calculate_index(i)
110 index = self._calculate_index(i)
111 r = struct.unpack(indexformatng, self._data[index : index + indexsize])
111 r = struct.unpack(indexformatng, self._data[index : index + indexsize])
112 if i == 0:
112 if i == 0:
113 e = list(r)
113 e = list(r)
114 type = gettype(e[0])
114 type = gettype(e[0])
115 e[0] = offset_type(0, type)
115 e[0] = offset_type(0, type)
116 return tuple(e)
116 return tuple(e)
117 return r
117 return r
118
118
119
119
120 class IndexObject(BaseIndexObject):
120 class IndexObject(BaseIndexObject):
121 def __init__(self, data):
121 def __init__(self, data):
122 assert len(data) % indexsize == 0
122 assert len(data) % indexsize == 0
123 self._data = data
123 self._data = data
124 self._lgt = len(data) // indexsize
124 self._lgt = len(data) // indexsize
125 self._extra = []
125 self._extra = []
126
126
127 def _calculate_index(self, i):
127 def _calculate_index(self, i):
128 return i * indexsize
128 return i * indexsize
129
129
130 def __delitem__(self, i):
130 def __delitem__(self, i):
131 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
131 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
132 raise ValueError(b"deleting slices only supports a:-1 with step 1")
132 raise ValueError(b"deleting slices only supports a:-1 with step 1")
133 i = i.start
133 i = i.start
134 self._check_index(i)
134 self._check_index(i)
135 self._stripnodes(i)
135 self._stripnodes(i)
136 if i < self._lgt:
136 if i < self._lgt:
137 self._data = self._data[: i * indexsize]
137 self._data = self._data[: i * indexsize]
138 self._lgt = i
138 self._lgt = i
139 self._extra = []
139 self._extra = []
140 else:
140 else:
141 self._extra = self._extra[: i - self._lgt]
141 self._extra = self._extra[: i - self._lgt]
142
142
143
143
144 class PersistentNodeMapIndexObject(IndexObject):
144 class PersistentNodeMapIndexObject(IndexObject):
145 """a Debug oriented class to test persistent nodemap
145 """a Debug oriented class to test persistent nodemap
146
146
147 We need a simple python object to test API and higher level behavior. See
147 We need a simple python object to test API and higher level behavior. See
148 the Rust implementation for more serious usage. This should be used only
148 the Rust implementation for more serious usage. This should be used only
149 through the dedicated `devel.persistent-nodemap` config.
149 through the dedicated `devel.persistent-nodemap` config.
150 """
150 """
151
151
152 def nodemap_data_all(self):
152 def nodemap_data_all(self):
153 """Return bytes containing a full serialization of a nodemap
153 """Return bytes containing a full serialization of a nodemap
154
154
155 The nodemap should be valid for the full set of revisions in the
155 The nodemap should be valid for the full set of revisions in the
156 index."""
156 index."""
157 return nodemaputil.persistent_data(self)
157 return nodemaputil.persistent_data(self)
158
158
159 def nodemap_data_incremental(self):
160 """Return bytes containing a incremental update to persistent nodemap
161
162 This containst the data for an append-only update of the data provided
163 in the last call to `update_nodemap_data`.
164 """
165 if self._nm_root is None:
166 return None
167 data = nodemaputil.update_persistent_data(
168 self, self._nm_root, self._nm_max_idx, self._nm_rev
169 )
170 self._nm_root = self._nm_max_idx = self._nm_rev = None
171 return data
172
159 def update_nodemap_data(self, nm_data):
173 def update_nodemap_data(self, nm_data):
160 """provide full blokc of persisted binary data for a nodemap
174 """provide full blokc of persisted binary data for a nodemap
161
175
162 The data are expected to come from disk. See `nodemap_data_all` for a
176 The data are expected to come from disk. See `nodemap_data_all` for a
163 produceur of such data."""
177 produceur of such data."""
164 if nm_data is not None:
178 if nm_data is not None:
165 nodemaputil.parse_data(nm_data)
179 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
180 if self._nm_root:
181 self._nm_rev = len(self) - 1
182 else:
183 self._nm_root = self._nm_max_idx = self._nm_rev = None
166
184
167
185
168 class InlinedIndexObject(BaseIndexObject):
186 class InlinedIndexObject(BaseIndexObject):
169 def __init__(self, data, inline=0):
187 def __init__(self, data, inline=0):
170 self._data = data
188 self._data = data
171 self._lgt = self._inline_scan(None)
189 self._lgt = self._inline_scan(None)
172 self._inline_scan(self._lgt)
190 self._inline_scan(self._lgt)
173 self._extra = []
191 self._extra = []
174
192
175 def _inline_scan(self, lgt):
193 def _inline_scan(self, lgt):
176 off = 0
194 off = 0
177 if lgt is not None:
195 if lgt is not None:
178 self._offsets = [0] * lgt
196 self._offsets = [0] * lgt
179 count = 0
197 count = 0
180 while off <= len(self._data) - indexsize:
198 while off <= len(self._data) - indexsize:
181 (s,) = struct.unpack(
199 (s,) = struct.unpack(
182 b'>i', self._data[off + indexfirst : off + sizeint + indexfirst]
200 b'>i', self._data[off + indexfirst : off + sizeint + indexfirst]
183 )
201 )
184 if lgt is not None:
202 if lgt is not None:
185 self._offsets[count] = off
203 self._offsets[count] = off
186 count += 1
204 count += 1
187 off += indexsize + s
205 off += indexsize + s
188 if off != len(self._data):
206 if off != len(self._data):
189 raise ValueError(b"corrupted data")
207 raise ValueError(b"corrupted data")
190 return count
208 return count
191
209
192 def __delitem__(self, i):
210 def __delitem__(self, i):
193 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
211 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
194 raise ValueError(b"deleting slices only supports a:-1 with step 1")
212 raise ValueError(b"deleting slices only supports a:-1 with step 1")
195 i = i.start
213 i = i.start
196 self._check_index(i)
214 self._check_index(i)
197 self._stripnodes(i)
215 self._stripnodes(i)
198 if i < self._lgt:
216 if i < self._lgt:
199 self._offsets = self._offsets[:i]
217 self._offsets = self._offsets[:i]
200 self._lgt = i
218 self._lgt = i
201 self._extra = []
219 self._extra = []
202 else:
220 else:
203 self._extra = self._extra[: i - self._lgt]
221 self._extra = self._extra[: i - self._lgt]
204
222
205 def _calculate_index(self, i):
223 def _calculate_index(self, i):
206 return self._offsets[i]
224 return self._offsets[i]
207
225
208
226
209 def parse_index2(data, inline):
227 def parse_index2(data, inline):
210 if not inline:
228 if not inline:
211 return IndexObject(data), None
229 return IndexObject(data), None
212 return InlinedIndexObject(data, inline), (0, data)
230 return InlinedIndexObject(data, inline), (0, data)
213
231
214
232
215 def parse_index_devel_nodemap(data, inline):
233 def parse_index_devel_nodemap(data, inline):
216 """like parse_index2, but alway return a PersistentNodeMapIndexObject
234 """like parse_index2, but alway return a PersistentNodeMapIndexObject
217 """
235 """
218 return PersistentNodeMapIndexObject(data), None
236 return PersistentNodeMapIndexObject(data), None
219
237
220
238
221 def parse_dirstate(dmap, copymap, st):
239 def parse_dirstate(dmap, copymap, st):
222 parents = [st[:20], st[20:40]]
240 parents = [st[:20], st[20:40]]
223 # dereference fields so they will be local in loop
241 # dereference fields so they will be local in loop
224 format = b">cllll"
242 format = b">cllll"
225 e_size = struct.calcsize(format)
243 e_size = struct.calcsize(format)
226 pos1 = 40
244 pos1 = 40
227 l = len(st)
245 l = len(st)
228
246
229 # the inner loop
247 # the inner loop
230 while pos1 < l:
248 while pos1 < l:
231 pos2 = pos1 + e_size
249 pos2 = pos1 + e_size
232 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
250 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
233 pos1 = pos2 + e[4]
251 pos1 = pos2 + e[4]
234 f = st[pos2:pos1]
252 f = st[pos2:pos1]
235 if b'\0' in f:
253 if b'\0' in f:
236 f, c = f.split(b'\0')
254 f, c = f.split(b'\0')
237 copymap[f] = c
255 copymap[f] = c
238 dmap[f] = e[:4]
256 dmap[f] = e[:4]
239 return parents
257 return parents
240
258
241
259
242 def pack_dirstate(dmap, copymap, pl, now):
260 def pack_dirstate(dmap, copymap, pl, now):
243 now = int(now)
261 now = int(now)
244 cs = stringio()
262 cs = stringio()
245 write = cs.write
263 write = cs.write
246 write(b"".join(pl))
264 write(b"".join(pl))
247 for f, e in pycompat.iteritems(dmap):
265 for f, e in pycompat.iteritems(dmap):
248 if e[0] == b'n' and e[3] == now:
266 if e[0] == b'n' and e[3] == now:
249 # The file was last modified "simultaneously" with the current
267 # The file was last modified "simultaneously" with the current
250 # write to dirstate (i.e. within the same second for file-
268 # write to dirstate (i.e. within the same second for file-
251 # systems with a granularity of 1 sec). This commonly happens
269 # systems with a granularity of 1 sec). This commonly happens
252 # for at least a couple of files on 'update'.
270 # for at least a couple of files on 'update'.
253 # The user could change the file without changing its size
271 # The user could change the file without changing its size
254 # within the same second. Invalidate the file's mtime in
272 # within the same second. Invalidate the file's mtime in
255 # dirstate, forcing future 'status' calls to compare the
273 # dirstate, forcing future 'status' calls to compare the
256 # contents of the file if the size is the same. This prevents
274 # contents of the file if the size is the same. This prevents
257 # mistakenly treating such files as clean.
275 # mistakenly treating such files as clean.
258 e = dirstatetuple(e[0], e[1], e[2], -1)
276 e = dirstatetuple(e[0], e[1], e[2], -1)
259 dmap[f] = e
277 dmap[f] = e
260
278
261 if f in copymap:
279 if f in copymap:
262 f = b"%s\0%s" % (f, copymap[f])
280 f = b"%s\0%s" % (f, copymap[f])
263 e = _pack(b">cllll", e[0], e[1], e[2], e[3], len(f))
281 e = _pack(b">cllll", e[0], e[1], e[2], e[3], len(f))
264 write(e)
282 write(e)
265 write(f)
283 write(f)
266 return cs.getvalue()
284 return cs.getvalue()
@@ -1,414 +1,456 b''
1 # nodemap.py - nodemap related code and utilities
1 # nodemap.py - nodemap related code and utilities
2 #
2 #
3 # Copyright 2019 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2019 Pierre-Yves David <pierre-yves.david@octobus.net>
4 # Copyright 2019 George Racinet <georges.racinet@octobus.net>
4 # Copyright 2019 George Racinet <georges.racinet@octobus.net>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import os
11 import os
12 import re
12 import re
13 import struct
13 import struct
14
14
15 from .. import (
15 from .. import (
16 error,
16 error,
17 node as nodemod,
17 node as nodemod,
18 util,
18 util,
19 )
19 )
20
20
21
21
22 class NodeMap(dict):
22 class NodeMap(dict):
23 def __missing__(self, x):
23 def __missing__(self, x):
24 raise error.RevlogError(b'unknown node: %s' % x)
24 raise error.RevlogError(b'unknown node: %s' % x)
25
25
26
26
27 def persisted_data(revlog):
27 def persisted_data(revlog):
28 """read the nodemap for a revlog from disk"""
28 """read the nodemap for a revlog from disk"""
29 if revlog.nodemap_file is None:
29 if revlog.nodemap_file is None:
30 return None
30 return None
31 pdata = revlog.opener.tryread(revlog.nodemap_file)
31 pdata = revlog.opener.tryread(revlog.nodemap_file)
32 if not pdata:
32 if not pdata:
33 return None
33 return None
34 offset = 0
34 offset = 0
35 (version,) = S_VERSION.unpack(pdata[offset : offset + S_VERSION.size])
35 (version,) = S_VERSION.unpack(pdata[offset : offset + S_VERSION.size])
36 if version != ONDISK_VERSION:
36 if version != ONDISK_VERSION:
37 return None
37 return None
38 offset += S_VERSION.size
38 offset += S_VERSION.size
39 (uid_size,) = S_HEADER.unpack(pdata[offset : offset + S_HEADER.size])
39 (uid_size,) = S_HEADER.unpack(pdata[offset : offset + S_HEADER.size])
40 offset += S_HEADER.size
40 offset += S_HEADER.size
41 docket = NodeMapDocket(pdata[offset : offset + uid_size])
41 docket = NodeMapDocket(pdata[offset : offset + uid_size])
42
42
43 filename = _rawdata_filepath(revlog, docket)
43 filename = _rawdata_filepath(revlog, docket)
44 return docket, revlog.opener.tryread(filename)
44 return docket, revlog.opener.tryread(filename)
45
45
46
46
47 def setup_persistent_nodemap(tr, revlog):
47 def setup_persistent_nodemap(tr, revlog):
48 """Install whatever is needed transaction side to persist a nodemap on disk
48 """Install whatever is needed transaction side to persist a nodemap on disk
49
49
50 (only actually persist the nodemap if this is relevant for this revlog)
50 (only actually persist the nodemap if this is relevant for this revlog)
51 """
51 """
52 if revlog._inline:
52 if revlog._inline:
53 return # inlined revlog are too small for this to be relevant
53 return # inlined revlog are too small for this to be relevant
54 if revlog.nodemap_file is None:
54 if revlog.nodemap_file is None:
55 return # we do not use persistent_nodemap on this revlog
55 return # we do not use persistent_nodemap on this revlog
56 callback_id = b"revlog-persistent-nodemap-%s" % revlog.nodemap_file
56 callback_id = b"revlog-persistent-nodemap-%s" % revlog.nodemap_file
57 if tr.hasfinalize(callback_id):
57 if tr.hasfinalize(callback_id):
58 return # no need to register again
58 return # no need to register again
59 tr.addfinalize(callback_id, lambda tr: _persist_nodemap(tr, revlog))
59 tr.addfinalize(callback_id, lambda tr: _persist_nodemap(tr, revlog))
60
60
61
61
62 def _persist_nodemap(tr, revlog):
62 def _persist_nodemap(tr, revlog):
63 """Write nodemap data on disk for a given revlog
63 """Write nodemap data on disk for a given revlog
64 """
64 """
65 if getattr(revlog, 'filteredrevs', ()):
65 if getattr(revlog, 'filteredrevs', ()):
66 raise error.ProgrammingError(
66 raise error.ProgrammingError(
67 "cannot persist nodemap of a filtered changelog"
67 "cannot persist nodemap of a filtered changelog"
68 )
68 )
69 if revlog.nodemap_file is None:
69 if revlog.nodemap_file is None:
70 msg = "calling persist nodemap on a revlog without the feature enableb"
70 msg = "calling persist nodemap on a revlog without the feature enableb"
71 raise error.ProgrammingError(msg)
71 raise error.ProgrammingError(msg)
72 if util.safehasattr(revlog.index, "nodemap_data_all"):
72
73 data = revlog.index.nodemap_data_all()
73 can_incremental = util.safehasattr(revlog.index, "nodemap_data_incremental")
74 ondisk_docket = revlog._nodemap_docket
75
76 # first attemp an incremental update of the data
77 if can_incremental and ondisk_docket is not None:
78 target_docket = revlog._nodemap_docket.copy()
79 data = revlog.index.nodemap_data_incremental()
80 datafile = _rawdata_filepath(revlog, target_docket)
81 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
82 # store vfs
83 with revlog.opener(datafile, b'a') as fd:
84 fd.write(data)
74 else:
85 else:
75 data = persistent_data(revlog.index)
86 # otherwise fallback to a full new export
76 target_docket = NodeMapDocket()
87 target_docket = NodeMapDocket()
77 datafile = _rawdata_filepath(revlog, target_docket)
88 datafile = _rawdata_filepath(revlog, target_docket)
89 if util.safehasattr(revlog.index, "nodemap_data_all"):
90 data = revlog.index.nodemap_data_all()
91 else:
92 data = persistent_data(revlog.index)
93 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
94 # store vfs
95 with revlog.opener(datafile, b'w') as fd:
96 fd.write(data)
97 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
98 # store vfs
99 with revlog.opener(revlog.nodemap_file, b'w', atomictemp=True) as fp:
100 fp.write(target_docket.serialize())
101 revlog._nodemap_docket = target_docket
102 # EXP-TODO: if the transaction abort, we should remove the new data and
103 # reinstall the old one.
104
105 # search for old index file in all cases, some older process might have
106 # left one behind.
78 olds = _other_rawdata_filepath(revlog, target_docket)
107 olds = _other_rawdata_filepath(revlog, target_docket)
79 if olds:
108 if olds:
80 realvfs = getattr(revlog, '_realopener', revlog.opener)
109 realvfs = getattr(revlog, '_realopener', revlog.opener)
81
110
82 def cleanup(tr):
111 def cleanup(tr):
83 for oldfile in olds:
112 for oldfile in olds:
84 realvfs.tryunlink(oldfile)
113 realvfs.tryunlink(oldfile)
85
114
86 callback_id = b"revlog-cleanup-nodemap-%s" % revlog.nodemap_file
115 callback_id = b"revlog-cleanup-nodemap-%s" % revlog.nodemap_file
87 tr.addpostclose(callback_id, cleanup)
116 tr.addpostclose(callback_id, cleanup)
88 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
89 # store vfs
90 with revlog.opener(datafile, b'w') as fd:
91 fd.write(data)
92 # EXP-TODO: if this is a cache, this should use a cache vfs, not a
93 # store vfs
94 with revlog.opener(revlog.nodemap_file, b'w', atomictemp=True) as fp:
95 fp.write(target_docket.serialize())
96 revlog._nodemap_docket = target_docket
97 # EXP-TODO: if the transaction abort, we should remove the new data and
98 # reinstall the old one.
99
117
100
118
101 ### Nodemap docket file
119 ### Nodemap docket file
102 #
120 #
103 # The nodemap data are stored on disk using 2 files:
121 # The nodemap data are stored on disk using 2 files:
104 #
122 #
105 # * a raw data files containing a persistent nodemap
123 # * a raw data files containing a persistent nodemap
106 # (see `Nodemap Trie` section)
124 # (see `Nodemap Trie` section)
107 #
125 #
108 # * a small "docket" file containing medatadata
126 # * a small "docket" file containing medatadata
109 #
127 #
110 # While the nodemap data can be multiple tens of megabytes, the "docket" is
128 # While the nodemap data can be multiple tens of megabytes, the "docket" is
111 # small, it is easy to update it automatically or to duplicated its content
129 # small, it is easy to update it automatically or to duplicated its content
112 # during a transaction.
130 # during a transaction.
113 #
131 #
114 # Multiple raw data can exist at the same time (The currently valid one and a
132 # Multiple raw data can exist at the same time (The currently valid one and a
115 # new one beind used by an in progress transaction). To accomodate this, the
133 # new one beind used by an in progress transaction). To accomodate this, the
116 # filename hosting the raw data has a variable parts. The exact filename is
134 # filename hosting the raw data has a variable parts. The exact filename is
117 # specified inside the "docket" file.
135 # specified inside the "docket" file.
118 #
136 #
119 # The docket file contains information to find, qualify and validate the raw
137 # The docket file contains information to find, qualify and validate the raw
120 # data. Its content is currently very light, but it will expand as the on disk
138 # data. Its content is currently very light, but it will expand as the on disk
121 # nodemap gains the necessary features to be used in production.
139 # nodemap gains the necessary features to be used in production.
122
140
123 # version 0 is experimental, no BC garantee, do no use outside of tests.
141 # version 0 is experimental, no BC garantee, do no use outside of tests.
124 ONDISK_VERSION = 0
142 ONDISK_VERSION = 0
125
143
126 S_VERSION = struct.Struct(">B")
144 S_VERSION = struct.Struct(">B")
127 S_HEADER = struct.Struct(">B")
145 S_HEADER = struct.Struct(">B")
128
146
129 ID_SIZE = 8
147 ID_SIZE = 8
130
148
131
149
132 def _make_uid():
150 def _make_uid():
133 """return a new unique identifier.
151 """return a new unique identifier.
134
152
135 The identifier is random and composed of ascii characters."""
153 The identifier is random and composed of ascii characters."""
136 return nodemod.hex(os.urandom(ID_SIZE))
154 return nodemod.hex(os.urandom(ID_SIZE))
137
155
138
156
139 class NodeMapDocket(object):
157 class NodeMapDocket(object):
140 """metadata associated with persistent nodemap data
158 """metadata associated with persistent nodemap data
141
159
142 The persistent data may come from disk or be on their way to disk.
160 The persistent data may come from disk or be on their way to disk.
143 """
161 """
144
162
145 def __init__(self, uid=None):
163 def __init__(self, uid=None):
146 if uid is None:
164 if uid is None:
147 uid = _make_uid()
165 uid = _make_uid()
148 self.uid = uid
166 self.uid = uid
149
167
150 def copy(self):
168 def copy(self):
151 return NodeMapDocket(uid=self.uid)
169 return NodeMapDocket(uid=self.uid)
152
170
153 def serialize(self):
171 def serialize(self):
154 """return serialized bytes for a docket using the passed uid"""
172 """return serialized bytes for a docket using the passed uid"""
155 data = []
173 data = []
156 data.append(S_VERSION.pack(ONDISK_VERSION))
174 data.append(S_VERSION.pack(ONDISK_VERSION))
157 data.append(S_HEADER.pack(len(self.uid)))
175 data.append(S_HEADER.pack(len(self.uid)))
158 data.append(self.uid)
176 data.append(self.uid)
159 return b''.join(data)
177 return b''.join(data)
160
178
161
179
162 def _rawdata_filepath(revlog, docket):
180 def _rawdata_filepath(revlog, docket):
163 """The (vfs relative) nodemap's rawdata file for a given uid"""
181 """The (vfs relative) nodemap's rawdata file for a given uid"""
164 prefix = revlog.nodemap_file[:-2]
182 prefix = revlog.nodemap_file[:-2]
165 return b"%s-%s.nd" % (prefix, docket.uid)
183 return b"%s-%s.nd" % (prefix, docket.uid)
166
184
167
185
168 def _other_rawdata_filepath(revlog, docket):
186 def _other_rawdata_filepath(revlog, docket):
169 prefix = revlog.nodemap_file[:-2]
187 prefix = revlog.nodemap_file[:-2]
170 pattern = re.compile(b"(^|/)%s-[0-9a-f]+\.nd$" % prefix)
188 pattern = re.compile(b"(^|/)%s-[0-9a-f]+\.nd$" % prefix)
171 new_file_path = _rawdata_filepath(revlog, docket)
189 new_file_path = _rawdata_filepath(revlog, docket)
172 new_file_name = revlog.opener.basename(new_file_path)
190 new_file_name = revlog.opener.basename(new_file_path)
173 dirpath = revlog.opener.dirname(new_file_path)
191 dirpath = revlog.opener.dirname(new_file_path)
174 others = []
192 others = []
175 for f in revlog.opener.listdir(dirpath):
193 for f in revlog.opener.listdir(dirpath):
176 if pattern.match(f) and f != new_file_name:
194 if pattern.match(f) and f != new_file_name:
177 others.append(f)
195 others.append(f)
178 return others
196 return others
179
197
180
198
181 ### Nodemap Trie
199 ### Nodemap Trie
182 #
200 #
183 # This is a simple reference implementation to compute and persist a nodemap
201 # This is a simple reference implementation to compute and persist a nodemap
184 # trie. This reference implementation is write only. The python version of this
202 # trie. This reference implementation is write only. The python version of this
185 # is not expected to be actually used, since it wont provide performance
203 # is not expected to be actually used, since it wont provide performance
186 # improvement over existing non-persistent C implementation.
204 # improvement over existing non-persistent C implementation.
187 #
205 #
188 # The nodemap is persisted as Trie using 4bits-address/16-entries block. each
206 # The nodemap is persisted as Trie using 4bits-address/16-entries block. each
189 # revision can be adressed using its node shortest prefix.
207 # revision can be adressed using its node shortest prefix.
190 #
208 #
191 # The trie is stored as a sequence of block. Each block contains 16 entries
209 # The trie is stored as a sequence of block. Each block contains 16 entries
192 # (signed 64bit integer, big endian). Each entry can be one of the following:
210 # (signed 64bit integer, big endian). Each entry can be one of the following:
193 #
211 #
194 # * value >= 0 -> index of sub-block
212 # * value >= 0 -> index of sub-block
195 # * value == -1 -> no value
213 # * value == -1 -> no value
196 # * value < -1 -> a revision value: rev = -(value+10)
214 # * value < -1 -> a revision value: rev = -(value+10)
197 #
215 #
198 # The implementation focus on simplicity, not on performance. A Rust
216 # The implementation focus on simplicity, not on performance. A Rust
199 # implementation should provide a efficient version of the same binary
217 # implementation should provide a efficient version of the same binary
200 # persistence. This reference python implementation is never meant to be
218 # persistence. This reference python implementation is never meant to be
201 # extensively use in production.
219 # extensively use in production.
202
220
203
221
204 def persistent_data(index):
222 def persistent_data(index):
205 """return the persistent binary form for a nodemap for a given index
223 """return the persistent binary form for a nodemap for a given index
206 """
224 """
207 trie = _build_trie(index)
225 trie = _build_trie(index)
208 return _persist_trie(trie)
226 return _persist_trie(trie)
209
227
210
228
229 def update_persistent_data(index, root, max_idx, last_rev):
230 """return the incremental update for persistent nodemap from a given index
231 """
232 trie = _update_trie(index, root, last_rev)
233 return _persist_trie(trie, existing_idx=max_idx)
234
235
211 S_BLOCK = struct.Struct(">" + ("l" * 16))
236 S_BLOCK = struct.Struct(">" + ("l" * 16))
212
237
213 NO_ENTRY = -1
238 NO_ENTRY = -1
214 # rev 0 need to be -2 because 0 is used by block, -1 is a special value.
239 # rev 0 need to be -2 because 0 is used by block, -1 is a special value.
215 REV_OFFSET = 2
240 REV_OFFSET = 2
216
241
217
242
218 def _transform_rev(rev):
243 def _transform_rev(rev):
219 """Return the number used to represent the rev in the tree.
244 """Return the number used to represent the rev in the tree.
220
245
221 (or retrieve a rev number from such representation)
246 (or retrieve a rev number from such representation)
222
247
223 Note that this is an involution, a function equal to its inverse (i.e.
248 Note that this is an involution, a function equal to its inverse (i.e.
224 which gives the identity when applied to itself).
249 which gives the identity when applied to itself).
225 """
250 """
226 return -(rev + REV_OFFSET)
251 return -(rev + REV_OFFSET)
227
252
228
253
229 def _to_int(hex_digit):
254 def _to_int(hex_digit):
230 """turn an hexadecimal digit into a proper integer"""
255 """turn an hexadecimal digit into a proper integer"""
231 return int(hex_digit, 16)
256 return int(hex_digit, 16)
232
257
233
258
234 class Block(dict):
259 class Block(dict):
235 """represent a block of the Trie
260 """represent a block of the Trie
236
261
237 contains up to 16 entry indexed from 0 to 15"""
262 contains up to 16 entry indexed from 0 to 15"""
238
263
239 def __init__(self):
264 def __init__(self):
240 super(Block, self).__init__()
265 super(Block, self).__init__()
241 # If this block exist on disk, here is its ID
266 # If this block exist on disk, here is its ID
242 self.ondisk_id = None
267 self.ondisk_id = None
243
268
244 def __iter__(self):
269 def __iter__(self):
245 return iter(self.get(i) for i in range(16))
270 return iter(self.get(i) for i in range(16))
246
271
247
272
248 def _build_trie(index):
273 def _build_trie(index):
249 """build a nodemap trie
274 """build a nodemap trie
250
275
251 The nodemap stores revision number for each unique prefix.
276 The nodemap stores revision number for each unique prefix.
252
277
253 Each block is a dictionary with keys in `[0, 15]`. Values are either
278 Each block is a dictionary with keys in `[0, 15]`. Values are either
254 another block or a revision number.
279 another block or a revision number.
255 """
280 """
256 root = Block()
281 root = Block()
257 for rev in range(len(index)):
282 for rev in range(len(index)):
258 hex = nodemod.hex(index[rev][7])
283 hex = nodemod.hex(index[rev][7])
259 _insert_into_block(index, 0, root, rev, hex)
284 _insert_into_block(index, 0, root, rev, hex)
260 return root
285 return root
261
286
262
287
288 def _update_trie(index, root, last_rev):
289 """consume"""
290 for rev in range(last_rev + 1, len(index)):
291 hex = nodemod.hex(index[rev][7])
292 _insert_into_block(index, 0, root, rev, hex)
293 return root
294
295
263 def _insert_into_block(index, level, block, current_rev, current_hex):
296 def _insert_into_block(index, level, block, current_rev, current_hex):
264 """insert a new revision in a block
297 """insert a new revision in a block
265
298
266 index: the index we are adding revision for
299 index: the index we are adding revision for
267 level: the depth of the current block in the trie
300 level: the depth of the current block in the trie
268 block: the block currently being considered
301 block: the block currently being considered
269 current_rev: the revision number we are adding
302 current_rev: the revision number we are adding
270 current_hex: the hexadecimal representation of the of that revision
303 current_hex: the hexadecimal representation of the of that revision
271 """
304 """
305 if block.ondisk_id is not None:
306 block.ondisk_id = None
272 hex_digit = _to_int(current_hex[level : level + 1])
307 hex_digit = _to_int(current_hex[level : level + 1])
273 entry = block.get(hex_digit)
308 entry = block.get(hex_digit)
274 if entry is None:
309 if entry is None:
275 # no entry, simply store the revision number
310 # no entry, simply store the revision number
276 block[hex_digit] = current_rev
311 block[hex_digit] = current_rev
277 elif isinstance(entry, dict):
312 elif isinstance(entry, dict):
278 # need to recurse to an underlying block
313 # need to recurse to an underlying block
279 _insert_into_block(index, level + 1, entry, current_rev, current_hex)
314 _insert_into_block(index, level + 1, entry, current_rev, current_hex)
280 else:
315 else:
281 # collision with a previously unique prefix, inserting new
316 # collision with a previously unique prefix, inserting new
282 # vertices to fit both entry.
317 # vertices to fit both entry.
283 other_hex = nodemod.hex(index[entry][7])
318 other_hex = nodemod.hex(index[entry][7])
284 other_rev = entry
319 other_rev = entry
285 new = Block()
320 new = Block()
286 block[hex_digit] = new
321 block[hex_digit] = new
287 _insert_into_block(index, level + 1, new, other_rev, other_hex)
322 _insert_into_block(index, level + 1, new, other_rev, other_hex)
288 _insert_into_block(index, level + 1, new, current_rev, current_hex)
323 _insert_into_block(index, level + 1, new, current_rev, current_hex)
289
324
290
325
291 def _persist_trie(root):
326 def _persist_trie(root, existing_idx=None):
292 """turn a nodemap trie into persistent binary data
327 """turn a nodemap trie into persistent binary data
293
328
294 See `_build_trie` for nodemap trie structure"""
329 See `_build_trie` for nodemap trie structure"""
295 block_map = {}
330 block_map = {}
331 if existing_idx is not None:
332 base_idx = existing_idx + 1
333 else:
334 base_idx = 0
296 chunks = []
335 chunks = []
297 for tn in _walk_trie(root):
336 for tn in _walk_trie(root):
298 block_map[id(tn)] = len(chunks)
337 if tn.ondisk_id is not None:
299 chunks.append(_persist_block(tn, block_map))
338 block_map[id(tn)] = tn.ondisk_id
339 else:
340 block_map[id(tn)] = len(chunks) + base_idx
341 chunks.append(_persist_block(tn, block_map))
300 return b''.join(chunks)
342 return b''.join(chunks)
301
343
302
344
303 def _walk_trie(block):
345 def _walk_trie(block):
304 """yield all the block in a trie
346 """yield all the block in a trie
305
347
306 Children blocks are always yield before their parent block.
348 Children blocks are always yield before their parent block.
307 """
349 """
308 for (_, item) in sorted(block.items()):
350 for (_, item) in sorted(block.items()):
309 if isinstance(item, dict):
351 if isinstance(item, dict):
310 for sub_block in _walk_trie(item):
352 for sub_block in _walk_trie(item):
311 yield sub_block
353 yield sub_block
312 yield block
354 yield block
313
355
314
356
315 def _persist_block(block_node, block_map):
357 def _persist_block(block_node, block_map):
316 """produce persistent binary data for a single block
358 """produce persistent binary data for a single block
317
359
318 Children block are assumed to be already persisted and present in
360 Children block are assumed to be already persisted and present in
319 block_map.
361 block_map.
320 """
362 """
321 data = tuple(_to_value(v, block_map) for v in block_node)
363 data = tuple(_to_value(v, block_map) for v in block_node)
322 return S_BLOCK.pack(*data)
364 return S_BLOCK.pack(*data)
323
365
324
366
325 def _to_value(item, block_map):
367 def _to_value(item, block_map):
326 """persist any value as an integer"""
368 """persist any value as an integer"""
327 if item is None:
369 if item is None:
328 return NO_ENTRY
370 return NO_ENTRY
329 elif isinstance(item, dict):
371 elif isinstance(item, dict):
330 return block_map[id(item)]
372 return block_map[id(item)]
331 else:
373 else:
332 return _transform_rev(item)
374 return _transform_rev(item)
333
375
334
376
335 def parse_data(data):
377 def parse_data(data):
336 """parse parse nodemap data into a nodemap Trie"""
378 """parse parse nodemap data into a nodemap Trie"""
337 if (len(data) % S_BLOCK.size) != 0:
379 if (len(data) % S_BLOCK.size) != 0:
338 msg = "nodemap data size is not a multiple of block size (%d): %d"
380 msg = "nodemap data size is not a multiple of block size (%d): %d"
339 raise error.Abort(msg % (S_BLOCK.size, len(data)))
381 raise error.Abort(msg % (S_BLOCK.size, len(data)))
340 if not data:
382 if not data:
341 return Block()
383 return Block(), None
342 block_map = {}
384 block_map = {}
343 new_blocks = []
385 new_blocks = []
344 for i in range(0, len(data), S_BLOCK.size):
386 for i in range(0, len(data), S_BLOCK.size):
345 block = Block()
387 block = Block()
346 block.ondisk_id = len(block_map)
388 block.ondisk_id = len(block_map)
347 block_map[block.ondisk_id] = block
389 block_map[block.ondisk_id] = block
348 block_data = data[i : i + S_BLOCK.size]
390 block_data = data[i : i + S_BLOCK.size]
349 values = S_BLOCK.unpack(block_data)
391 values = S_BLOCK.unpack(block_data)
350 new_blocks.append((block, values))
392 new_blocks.append((block, values))
351 for b, values in new_blocks:
393 for b, values in new_blocks:
352 for idx, v in enumerate(values):
394 for idx, v in enumerate(values):
353 if v == NO_ENTRY:
395 if v == NO_ENTRY:
354 continue
396 continue
355 elif v >= 0:
397 elif v >= 0:
356 b[idx] = block_map[v]
398 b[idx] = block_map[v]
357 else:
399 else:
358 b[idx] = _transform_rev(v)
400 b[idx] = _transform_rev(v)
359 return block
401 return block, i // S_BLOCK.size
360
402
361
403
362 # debug utility
404 # debug utility
363
405
364
406
365 def check_data(ui, index, data):
407 def check_data(ui, index, data):
366 """verify that the provided nodemap data are valid for the given idex"""
408 """verify that the provided nodemap data are valid for the given idex"""
367 ret = 0
409 ret = 0
368 ui.status((b"revision in index: %d\n") % len(index))
410 ui.status((b"revision in index: %d\n") % len(index))
369 root = parse_data(data)
411 root, __ = parse_data(data)
370 all_revs = set(_all_revisions(root))
412 all_revs = set(_all_revisions(root))
371 ui.status((b"revision in nodemap: %d\n") % len(all_revs))
413 ui.status((b"revision in nodemap: %d\n") % len(all_revs))
372 for r in range(len(index)):
414 for r in range(len(index)):
373 if r not in all_revs:
415 if r not in all_revs:
374 msg = b" revision missing from nodemap: %d\n" % r
416 msg = b" revision missing from nodemap: %d\n" % r
375 ui.write_err(msg)
417 ui.write_err(msg)
376 ret = 1
418 ret = 1
377 else:
419 else:
378 all_revs.remove(r)
420 all_revs.remove(r)
379 nm_rev = _find_node(root, nodemod.hex(index[r][7]))
421 nm_rev = _find_node(root, nodemod.hex(index[r][7]))
380 if nm_rev is None:
422 if nm_rev is None:
381 msg = b" revision node does not match any entries: %d\n" % r
423 msg = b" revision node does not match any entries: %d\n" % r
382 ui.write_err(msg)
424 ui.write_err(msg)
383 ret = 1
425 ret = 1
384 elif nm_rev != r:
426 elif nm_rev != r:
385 msg = (
427 msg = (
386 b" revision node does not match the expected revision: "
428 b" revision node does not match the expected revision: "
387 b"%d != %d\n" % (r, nm_rev)
429 b"%d != %d\n" % (r, nm_rev)
388 )
430 )
389 ui.write_err(msg)
431 ui.write_err(msg)
390 ret = 1
432 ret = 1
391
433
392 if all_revs:
434 if all_revs:
393 for r in sorted(all_revs):
435 for r in sorted(all_revs):
394 msg = b" extra revision in nodemap: %d\n" % r
436 msg = b" extra revision in nodemap: %d\n" % r
395 ui.write_err(msg)
437 ui.write_err(msg)
396 ret = 1
438 ret = 1
397 return ret
439 return ret
398
440
399
441
400 def _all_revisions(root):
442 def _all_revisions(root):
401 """return all revisions stored in a Trie"""
443 """return all revisions stored in a Trie"""
402 for block in _walk_trie(root):
444 for block in _walk_trie(root):
403 for v in block:
445 for v in block:
404 if v is None or isinstance(v, Block):
446 if v is None or isinstance(v, Block):
405 continue
447 continue
406 yield v
448 yield v
407
449
408
450
409 def _find_node(block, node):
451 def _find_node(block, node):
410 """find the revision associated with a given node"""
452 """find the revision associated with a given node"""
411 entry = block.get(_to_int(node[0:1]))
453 entry = block.get(_to_int(node[0:1]))
412 if isinstance(entry, dict):
454 if isinstance(entry, dict):
413 return _find_node(entry, node[1:])
455 return _find_node(entry, node[1:])
414 return entry
456 return entry
@@ -1,56 +1,67 b''
1 ===================================
1 ===================================
2 Test the persistent on-disk nodemap
2 Test the persistent on-disk nodemap
3 ===================================
3 ===================================
4
4
5
5
6 $ hg init test-repo
6 $ hg init test-repo
7 $ cd test-repo
7 $ cd test-repo
8 $ cat << EOF >> .hg/hgrc
8 $ cat << EOF >> .hg/hgrc
9 > [experimental]
9 > [experimental]
10 > exp-persistent-nodemap=yes
10 > exp-persistent-nodemap=yes
11 > [devel]
11 > [devel]
12 > persistent-nodemap=yes
12 > persistent-nodemap=yes
13 > EOF
13 > EOF
14 $ hg debugbuilddag .+5000
14 $ hg debugbuilddag .+5000
15 $ f --size .hg/store/00changelog.n
15 $ f --size .hg/store/00changelog.n
16 .hg/store/00changelog.n: size=18
16 .hg/store/00changelog.n: size=18
17 $ f --sha256 .hg/store/00changelog-*.nd
17 $ f --sha256 .hg/store/00changelog-*.nd
18 .hg/store/00changelog-????????????????.nd: sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7 (glob)
18 .hg/store/00changelog-????????????????.nd: sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7 (glob)
19 $ hg debugnodemap --dump-new | f --sha256 --size
19 $ hg debugnodemap --dump-new | f --sha256 --size
20 size=122880, sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7
20 size=122880, sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7
21 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
21 $ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
22 size=122880, sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7
22 size=122880, sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7
23 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
23 0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
24 0010: ff ff ff ff ff ff ff ff ff ff fa c2 ff ff ff ff |................|
24 0010: ff ff ff ff ff ff ff ff ff ff fa c2 ff ff ff ff |................|
25 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
25 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
26 0030: ff ff ff ff ff ff ed b3 ff ff ff ff ff ff ff ff |................|
26 0030: ff ff ff ff ff ff ed b3 ff ff ff ff ff ff ff ff |................|
27 0040: ff ff ff ff ff ff ee 34 00 00 00 00 ff ff ff ff |.......4........|
27 0040: ff ff ff ff ff ff ee 34 00 00 00 00 ff ff ff ff |.......4........|
28 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
28 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
29 0060: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
29 0060: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
30 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
30 0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
31 0080: ff ff ff ff ff ff f8 50 ff ff ff ff ff ff ff ff |.......P........|
31 0080: ff ff ff ff ff ff f8 50 ff ff ff ff ff ff ff ff |.......P........|
32 0090: ff ff ff ff ff ff ff ff ff ff ec c7 ff ff ff ff |................|
32 0090: ff ff ff ff ff ff ff ff ff ff ec c7 ff ff ff ff |................|
33 00a0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
33 00a0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
34 00b0: ff ff ff ff ff ff fa be ff ff f2 fc ff ff ff ff |................|
34 00b0: ff ff ff ff ff ff fa be ff ff f2 fc ff ff ff ff |................|
35 00c0: ff ff ff ff ff ff ef ea ff ff ff ff ff ff f9 17 |................|
35 00c0: ff ff ff ff ff ff ef ea ff ff ff ff ff ff f9 17 |................|
36 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
36 00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
37 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
37 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
38 00f0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
38 00f0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
39 $ hg debugnodemap --check
39 $ hg debugnodemap --check
40 revision in index: 5001
40 revision in index: 5001
41 revision in nodemap: 5001
41 revision in nodemap: 5001
42
42
43 add a new commit
43 add a new commit
44
44
45 $ hg up
45 $ hg up
46 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
46 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
47 $ echo foo > foo
47 $ echo foo > foo
48 $ hg add foo
48 $ hg add foo
49 $ hg ci -m 'foo'
49 $ hg ci -m 'foo'
50 $ f --size .hg/store/00changelog.n
50 $ f --size .hg/store/00changelog.n
51 .hg/store/00changelog.n: size=18
51 .hg/store/00changelog.n: size=18
52
53 (The pure code use the debug code that perform incremental update, the C code reencode from scratch)
54
55 #if pure
56 $ f --sha256 .hg/store/00changelog-*.nd --size
57 .hg/store/00changelog-????????????????.nd: size=123072, sha256=136472751566c8198ff09e306a7d2f9bd18bd32298d614752b73da4d6df23340 (glob)
58
59 #else
52 $ f --sha256 .hg/store/00changelog-*.nd --size
60 $ f --sha256 .hg/store/00changelog-*.nd --size
53 .hg/store/00changelog-????????????????.nd: size=122880, sha256=bfafebd751c4f6d116a76a37a1dee2a251747affe7efbcc4f4842ccc746d4db9 (glob)
61 .hg/store/00changelog-????????????????.nd: size=122880, sha256=bfafebd751c4f6d116a76a37a1dee2a251747affe7efbcc4f4842ccc746d4db9 (glob)
62
63 #endif
64
54 $ hg debugnodemap --check
65 $ hg debugnodemap --check
55 revision in index: 5002
66 revision in index: 5002
56 revision in nodemap: 5002
67 revision in nodemap: 5002
General Comments 0
You need to be logged in to leave comments. Login now