##// END OF EJS Templates
revlog: prepare pure parser for being overloaded...
Raphaël Gomès -
r47136:095fa99a default
parent child Browse files
Show More
@@ -1,286 +1,287 b''
1 1 # parsers.py - Python implementation of parsers.c
2 2 #
3 3 # Copyright 2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import struct
11 11 import zlib
12 12
13 13 from ..node import nullid, nullrev
14 14 from .. import (
15 15 pycompat,
16 16 util,
17 17 )
18 18
19 19 from ..revlogutils import nodemap as nodemaputil
20 20
21 21 stringio = pycompat.bytesio
22 22
23 23
24 24 _pack = struct.pack
25 25 _unpack = struct.unpack
26 26 _compress = zlib.compress
27 27 _decompress = zlib.decompress
28 28
29 29 # Some code below makes tuples directly because it's more convenient. However,
30 30 # code outside this module should always use dirstatetuple.
31 31 def dirstatetuple(*x):
32 32 # x is a tuple
33 33 return x
34 34
35 35
36 indexformatng = b">Qiiiiii20s12x"
37 indexfirst = struct.calcsize(b'Q')
38 sizeint = struct.calcsize(b'i')
39 indexsize = struct.calcsize(indexformatng)
40 nullitem = (0, 0, 0, -1, -1, -1, -1, nullid)
41
42
43 36 def gettype(q):
44 37 return int(q & 0xFFFF)
45 38
46 39
47 40 def offset_type(offset, type):
48 41 return int(int(offset) << 16 | type)
49 42
50 43
51 44 class BaseIndexObject(object):
45 index_format = b">Qiiiiii20s12x"
46 big_int_size = struct.calcsize(b'Q')
47 int_size = struct.calcsize(b'i')
48 index_size = struct.calcsize(index_format)
49 null_item = (0, 0, 0, -1, -1, -1, -1, nullid)
50
52 51 @property
53 52 def nodemap(self):
54 53 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
55 54 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
56 55 return self._nodemap
57 56
58 57 @util.propertycache
59 58 def _nodemap(self):
60 59 nodemap = nodemaputil.NodeMap({nullid: nullrev})
61 60 for r in range(0, len(self)):
62 61 n = self[r][7]
63 62 nodemap[n] = r
64 63 return nodemap
65 64
66 65 def has_node(self, node):
67 66 """return True if the node exist in the index"""
68 67 return node in self._nodemap
69 68
70 69 def rev(self, node):
71 70 """return a revision for a node
72 71
73 72 If the node is unknown, raise a RevlogError"""
74 73 return self._nodemap[node]
75 74
76 75 def get_rev(self, node):
77 76 """return a revision for a node
78 77
79 78 If the node is unknown, return None"""
80 79 return self._nodemap.get(node)
81 80
82 81 def _stripnodes(self, start):
83 82 if '_nodemap' in vars(self):
84 83 for r in range(start, len(self)):
85 84 n = self[r][7]
86 85 del self._nodemap[n]
87 86
88 87 def clearcaches(self):
89 88 self.__dict__.pop('_nodemap', None)
90 89
91 90 def __len__(self):
92 91 return self._lgt + len(self._extra)
93 92
94 93 def append(self, tup):
95 94 if '_nodemap' in vars(self):
96 95 self._nodemap[tup[7]] = len(self)
97 data = _pack(indexformatng, *tup)
96 data = _pack(self.index_format, *tup)
98 97 self._extra.append(data)
99 98
100 99 def _check_index(self, i):
101 100 if not isinstance(i, int):
102 101 raise TypeError(b"expecting int indexes")
103 102 if i < 0 or i >= len(self):
104 103 raise IndexError
105 104
106 105 def __getitem__(self, i):
107 106 if i == -1:
108 return nullitem
107 return self.null_item
109 108 self._check_index(i)
110 109 if i >= self._lgt:
111 110 data = self._extra[i - self._lgt]
112 111 else:
113 112 index = self._calculate_index(i)
114 data = self._data[index : index + indexsize]
115 r = _unpack(indexformatng, data)
113 data = self._data[index : index + self.index_size]
114 r = _unpack(self.index_format, data)
116 115 if self._lgt and i == 0:
117 116 r = (offset_type(0, gettype(r[0])),) + r[1:]
118 117 return r
119 118
120 119
121 120 class IndexObject(BaseIndexObject):
122 121 def __init__(self, data):
123 assert len(data) % indexsize == 0
122 assert len(data) % self.index_size == 0
124 123 self._data = data
125 self._lgt = len(data) // indexsize
124 self._lgt = len(data) // self.index_size
126 125 self._extra = []
127 126
128 127 def _calculate_index(self, i):
129 return i * indexsize
128 return i * self.index_size
130 129
131 130 def __delitem__(self, i):
132 131 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
133 132 raise ValueError(b"deleting slices only supports a:-1 with step 1")
134 133 i = i.start
135 134 self._check_index(i)
136 135 self._stripnodes(i)
137 136 if i < self._lgt:
138 self._data = self._data[: i * indexsize]
137 self._data = self._data[: i * self.index_size]
139 138 self._lgt = i
140 139 self._extra = []
141 140 else:
142 141 self._extra = self._extra[: i - self._lgt]
143 142
144 143
145 144 class PersistentNodeMapIndexObject(IndexObject):
146 145 """a Debug oriented class to test persistent nodemap
147 146
148 147 We need a simple python object to test API and higher level behavior. See
149 148 the Rust implementation for more serious usage. This should be used only
150 149 through the dedicated `devel.persistent-nodemap` config.
151 150 """
152 151
153 152 def nodemap_data_all(self):
154 153 """Return bytes containing a full serialization of a nodemap
155 154
156 155 The nodemap should be valid for the full set of revisions in the
157 156 index."""
158 157 return nodemaputil.persistent_data(self)
159 158
160 159 def nodemap_data_incremental(self):
161 160 """Return bytes containing a incremental update to persistent nodemap
162 161
163 162 This containst the data for an append-only update of the data provided
164 163 in the last call to `update_nodemap_data`.
165 164 """
166 165 if self._nm_root is None:
167 166 return None
168 167 docket = self._nm_docket
169 168 changed, data = nodemaputil.update_persistent_data(
170 169 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
171 170 )
172 171
173 172 self._nm_root = self._nm_max_idx = self._nm_docket = None
174 173 return docket, changed, data
175 174
176 175 def update_nodemap_data(self, docket, nm_data):
177 176 """provide full block of persisted binary data for a nodemap
178 177
179 178 The data are expected to come from disk. See `nodemap_data_all` for a
180 179 produceur of such data."""
181 180 if nm_data is not None:
182 181 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
183 182 if self._nm_root:
184 183 self._nm_docket = docket
185 184 else:
186 185 self._nm_root = self._nm_max_idx = self._nm_docket = None
187 186
188 187
189 188 class InlinedIndexObject(BaseIndexObject):
190 189 def __init__(self, data, inline=0):
191 190 self._data = data
192 191 self._lgt = self._inline_scan(None)
193 192 self._inline_scan(self._lgt)
194 193 self._extra = []
195 194
196 195 def _inline_scan(self, lgt):
197 196 off = 0
198 197 if lgt is not None:
199 198 self._offsets = [0] * lgt
200 199 count = 0
201 while off <= len(self._data) - indexsize:
200 while off <= len(self._data) - self.index_size:
201 start = off + self.big_int_size
202 202 (s,) = struct.unpack(
203 b'>i', self._data[off + indexfirst : off + sizeint + indexfirst]
203 b'>i',
204 self._data[start : start + self.int_size],
204 205 )
205 206 if lgt is not None:
206 207 self._offsets[count] = off
207 208 count += 1
208 off += indexsize + s
209 off += self.index_size + s
209 210 if off != len(self._data):
210 211 raise ValueError(b"corrupted data")
211 212 return count
212 213
213 214 def __delitem__(self, i):
214 215 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
215 216 raise ValueError(b"deleting slices only supports a:-1 with step 1")
216 217 i = i.start
217 218 self._check_index(i)
218 219 self._stripnodes(i)
219 220 if i < self._lgt:
220 221 self._offsets = self._offsets[:i]
221 222 self._lgt = i
222 223 self._extra = []
223 224 else:
224 225 self._extra = self._extra[: i - self._lgt]
225 226
226 227 def _calculate_index(self, i):
227 228 return self._offsets[i]
228 229
229 230
230 231 def parse_index2(data, inline):
231 232 if not inline:
232 233 return IndexObject(data), None
233 234 return InlinedIndexObject(data, inline), (0, data)
234 235
235 236
236 237 def parse_index_devel_nodemap(data, inline):
237 238 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
238 239 return PersistentNodeMapIndexObject(data), None
239 240
240 241
241 242 def parse_dirstate(dmap, copymap, st):
242 243 parents = [st[:20], st[20:40]]
243 244 # dereference fields so they will be local in loop
244 245 format = b">cllll"
245 246 e_size = struct.calcsize(format)
246 247 pos1 = 40
247 248 l = len(st)
248 249
249 250 # the inner loop
250 251 while pos1 < l:
251 252 pos2 = pos1 + e_size
252 253 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
253 254 pos1 = pos2 + e[4]
254 255 f = st[pos2:pos1]
255 256 if b'\0' in f:
256 257 f, c = f.split(b'\0')
257 258 copymap[f] = c
258 259 dmap[f] = e[:4]
259 260 return parents
260 261
261 262
262 263 def pack_dirstate(dmap, copymap, pl, now):
263 264 now = int(now)
264 265 cs = stringio()
265 266 write = cs.write
266 267 write(b"".join(pl))
267 268 for f, e in pycompat.iteritems(dmap):
268 269 if e[0] == b'n' and e[3] == now:
269 270 # The file was last modified "simultaneously" with the current
270 271 # write to dirstate (i.e. within the same second for file-
271 272 # systems with a granularity of 1 sec). This commonly happens
272 273 # for at least a couple of files on 'update'.
273 274 # The user could change the file without changing its size
274 275 # within the same second. Invalidate the file's mtime in
275 276 # dirstate, forcing future 'status' calls to compare the
276 277 # contents of the file if the size is the same. This prevents
277 278 # mistakenly treating such files as clean.
278 279 e = dirstatetuple(e[0], e[1], e[2], -1)
279 280 dmap[f] = e
280 281
281 282 if f in copymap:
282 283 f = b"%s\0%s" % (f, copymap[f])
283 284 e = _pack(b">cllll", e[0], e[1], e[2], e[3], len(f))
284 285 write(e)
285 286 write(f)
286 287 return cs.getvalue()
General Comments 0
You need to be logged in to leave comments. Login now