##// END OF EJS Templates
dirstate-item: deprecate tuple access on the class...
marmoute -
r48369:7a4ba68f default
parent child Browse files
Show More
@@ -1,570 +1,578
1 # parsers.py - Python implementation of parsers.c
1 # parsers.py - Python implementation of parsers.c
2 #
2 #
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11 import zlib
11 import zlib
12
12
13 from ..node import (
13 from ..node import (
14 nullrev,
14 nullrev,
15 sha1nodeconstants,
15 sha1nodeconstants,
16 )
16 )
17 from ..thirdparty import attr
17 from ..thirdparty import attr
18 from .. import (
18 from .. import (
19 error,
19 error,
20 pycompat,
20 pycompat,
21 revlogutils,
21 revlogutils,
22 util,
22 util,
23 )
23 )
24
24
25 from ..revlogutils import nodemap as nodemaputil
25 from ..revlogutils import nodemap as nodemaputil
26 from ..revlogutils import constants as revlog_constants
26 from ..revlogutils import constants as revlog_constants
27
27
28 stringio = pycompat.bytesio
28 stringio = pycompat.bytesio
29
29
30
30
31 _pack = struct.pack
31 _pack = struct.pack
32 _unpack = struct.unpack
32 _unpack = struct.unpack
33 _compress = zlib.compress
33 _compress = zlib.compress
34 _decompress = zlib.decompress
34 _decompress = zlib.decompress
35
35
36
36
37 # a special value used internally for `size` if the file come from the other parent
37 # a special value used internally for `size` if the file come from the other parent
38 FROM_P2 = -2
38 FROM_P2 = -2
39
39
40 # a special value used internally for `size` if the file is modified/merged/added
40 # a special value used internally for `size` if the file is modified/merged/added
41 NONNORMAL = -1
41 NONNORMAL = -1
42
42
43 # a special value used internally for `time` if the time is ambigeous
43 # a special value used internally for `time` if the time is ambigeous
44 AMBIGUOUS_TIME = -1
44 AMBIGUOUS_TIME = -1
45
45
46
46
47 @attr.s(slots=True)
47 @attr.s(slots=True)
48 class DirstateItem(object):
48 class DirstateItem(object):
49 """represent a dirstate entry
49 """represent a dirstate entry
50
50
51 It contains:
51 It contains:
52
52
53 - state (one of 'n', 'a', 'r', 'm')
53 - state (one of 'n', 'a', 'r', 'm')
54 - mode,
54 - mode,
55 - size,
55 - size,
56 - mtime,
56 - mtime,
57 """
57 """
58
58
59 _state = attr.ib()
59 _state = attr.ib()
60 _mode = attr.ib()
60 _mode = attr.ib()
61 _size = attr.ib()
61 _size = attr.ib()
62 _mtime = attr.ib()
62 _mtime = attr.ib()
63
63
64 def __getitem__(self, idx):
64 def __getitem__(self, idx):
65 if idx == 0 or idx == -4:
65 if idx == 0 or idx == -4:
66 msg = b"do not use item[x], use item.state"
67 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
66 return self._state
68 return self._state
67 elif idx == 1 or idx == -3:
69 elif idx == 1 or idx == -3:
70 msg = b"do not use item[x], use item.mode"
71 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
68 return self._mode
72 return self._mode
69 elif idx == 2 or idx == -2:
73 elif idx == 2 or idx == -2:
74 msg = b"do not use item[x], use item.size"
75 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
70 return self._size
76 return self._size
71 elif idx == 3 or idx == -1:
77 elif idx == 3 or idx == -1:
78 msg = b"do not use item[x], use item.mtime"
79 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
72 return self._mtime
80 return self._mtime
73 else:
81 else:
74 raise IndexError(idx)
82 raise IndexError(idx)
75
83
76 @property
84 @property
77 def mode(self):
85 def mode(self):
78 return self._mode
86 return self._mode
79
87
80 @property
88 @property
81 def size(self):
89 def size(self):
82 return self._size
90 return self._size
83
91
84 @property
92 @property
85 def mtime(self):
93 def mtime(self):
86 return self._mtime
94 return self._mtime
87
95
88 @property
96 @property
89 def state(self):
97 def state(self):
90 """
98 """
91 States are:
99 States are:
92 n normal
100 n normal
93 m needs merging
101 m needs merging
94 r marked for removal
102 r marked for removal
95 a marked for addition
103 a marked for addition
96
104
97 XXX This "state" is a bit obscure and mostly a direct expression of the
105 XXX This "state" is a bit obscure and mostly a direct expression of the
98 dirstatev1 format. It would make sense to ultimately deprecate it in
106 dirstatev1 format. It would make sense to ultimately deprecate it in
99 favor of the more "semantic" attributes.
107 favor of the more "semantic" attributes.
100 """
108 """
101 return self._state
109 return self._state
102
110
103 @property
111 @property
104 def tracked(self):
112 def tracked(self):
105 """True is the file is tracked in the working copy"""
113 """True is the file is tracked in the working copy"""
106 return self._state in b"nma"
114 return self._state in b"nma"
107
115
108 @property
116 @property
109 def added(self):
117 def added(self):
110 """True if the file has been added"""
118 """True if the file has been added"""
111 return self._state == b'a'
119 return self._state == b'a'
112
120
113 @property
121 @property
114 def merged(self):
122 def merged(self):
115 """True if the file has been merged
123 """True if the file has been merged
116
124
117 Should only be set if a merge is in progress in the dirstate
125 Should only be set if a merge is in progress in the dirstate
118 """
126 """
119 return self._state == b'm'
127 return self._state == b'm'
120
128
121 @property
129 @property
122 def from_p2(self):
130 def from_p2(self):
123 """True if the file have been fetched from p2 during the current merge
131 """True if the file have been fetched from p2 during the current merge
124
132
125 This is only True is the file is currently tracked.
133 This is only True is the file is currently tracked.
126
134
127 Should only be set if a merge is in progress in the dirstate
135 Should only be set if a merge is in progress in the dirstate
128 """
136 """
129 return self._state == b'n' and self._size == FROM_P2
137 return self._state == b'n' and self._size == FROM_P2
130
138
131 @property
139 @property
132 def from_p2_removed(self):
140 def from_p2_removed(self):
133 """True if the file has been removed, but was "from_p2" initially
141 """True if the file has been removed, but was "from_p2" initially
134
142
135 This property seems like an abstraction leakage and should probably be
143 This property seems like an abstraction leakage and should probably be
136 dealt in this class (or maybe the dirstatemap) directly.
144 dealt in this class (or maybe the dirstatemap) directly.
137 """
145 """
138 return self._state == b'r' and self._size == FROM_P2
146 return self._state == b'r' and self._size == FROM_P2
139
147
140 @property
148 @property
141 def removed(self):
149 def removed(self):
142 """True if the file has been removed"""
150 """True if the file has been removed"""
143 return self._state == b'r'
151 return self._state == b'r'
144
152
145 @property
153 @property
146 def merged_removed(self):
154 def merged_removed(self):
147 """True if the file has been removed, but was "merged" initially
155 """True if the file has been removed, but was "merged" initially
148
156
149 This property seems like an abstraction leakage and should probably be
157 This property seems like an abstraction leakage and should probably be
150 dealt in this class (or maybe the dirstatemap) directly.
158 dealt in this class (or maybe the dirstatemap) directly.
151 """
159 """
152 return self._state == b'r' and self._size == NONNORMAL
160 return self._state == b'r' and self._size == NONNORMAL
153
161
154 def v1_state(self):
162 def v1_state(self):
155 """return a "state" suitable for v1 serialization"""
163 """return a "state" suitable for v1 serialization"""
156 return self._state
164 return self._state
157
165
158 def v1_mode(self):
166 def v1_mode(self):
159 """return a "mode" suitable for v1 serialization"""
167 """return a "mode" suitable for v1 serialization"""
160 return self._mode
168 return self._mode
161
169
162 def v1_size(self):
170 def v1_size(self):
163 """return a "size" suitable for v1 serialization"""
171 """return a "size" suitable for v1 serialization"""
164 return self._size
172 return self._size
165
173
166 def v1_mtime(self):
174 def v1_mtime(self):
167 """return a "mtime" suitable for v1 serialization"""
175 """return a "mtime" suitable for v1 serialization"""
168 return self._mtime
176 return self._mtime
169
177
170 def need_delay(self, now):
178 def need_delay(self, now):
171 """True if the stored mtime would be ambiguous with the current time"""
179 """True if the stored mtime would be ambiguous with the current time"""
172 return self._state == b'n' and self._mtime == now
180 return self._state == b'n' and self._mtime == now
173
181
174
182
175 def gettype(q):
183 def gettype(q):
176 return int(q & 0xFFFF)
184 return int(q & 0xFFFF)
177
185
178
186
179 class BaseIndexObject(object):
187 class BaseIndexObject(object):
180 # Can I be passed to an algorithme implemented in Rust ?
188 # Can I be passed to an algorithme implemented in Rust ?
181 rust_ext_compat = 0
189 rust_ext_compat = 0
182 # Format of an index entry according to Python's `struct` language
190 # Format of an index entry according to Python's `struct` language
183 index_format = revlog_constants.INDEX_ENTRY_V1
191 index_format = revlog_constants.INDEX_ENTRY_V1
184 # Size of a C unsigned long long int, platform independent
192 # Size of a C unsigned long long int, platform independent
185 big_int_size = struct.calcsize(b'>Q')
193 big_int_size = struct.calcsize(b'>Q')
186 # Size of a C long int, platform independent
194 # Size of a C long int, platform independent
187 int_size = struct.calcsize(b'>i')
195 int_size = struct.calcsize(b'>i')
188 # An empty index entry, used as a default value to be overridden, or nullrev
196 # An empty index entry, used as a default value to be overridden, or nullrev
189 null_item = (
197 null_item = (
190 0,
198 0,
191 0,
199 0,
192 0,
200 0,
193 -1,
201 -1,
194 -1,
202 -1,
195 -1,
203 -1,
196 -1,
204 -1,
197 sha1nodeconstants.nullid,
205 sha1nodeconstants.nullid,
198 0,
206 0,
199 0,
207 0,
200 revlog_constants.COMP_MODE_INLINE,
208 revlog_constants.COMP_MODE_INLINE,
201 revlog_constants.COMP_MODE_INLINE,
209 revlog_constants.COMP_MODE_INLINE,
202 )
210 )
203
211
204 @util.propertycache
212 @util.propertycache
205 def entry_size(self):
213 def entry_size(self):
206 return self.index_format.size
214 return self.index_format.size
207
215
208 @property
216 @property
209 def nodemap(self):
217 def nodemap(self):
210 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
218 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
211 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
219 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
212 return self._nodemap
220 return self._nodemap
213
221
214 @util.propertycache
222 @util.propertycache
215 def _nodemap(self):
223 def _nodemap(self):
216 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
224 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
217 for r in range(0, len(self)):
225 for r in range(0, len(self)):
218 n = self[r][7]
226 n = self[r][7]
219 nodemap[n] = r
227 nodemap[n] = r
220 return nodemap
228 return nodemap
221
229
222 def has_node(self, node):
230 def has_node(self, node):
223 """return True if the node exist in the index"""
231 """return True if the node exist in the index"""
224 return node in self._nodemap
232 return node in self._nodemap
225
233
226 def rev(self, node):
234 def rev(self, node):
227 """return a revision for a node
235 """return a revision for a node
228
236
229 If the node is unknown, raise a RevlogError"""
237 If the node is unknown, raise a RevlogError"""
230 return self._nodemap[node]
238 return self._nodemap[node]
231
239
232 def get_rev(self, node):
240 def get_rev(self, node):
233 """return a revision for a node
241 """return a revision for a node
234
242
235 If the node is unknown, return None"""
243 If the node is unknown, return None"""
236 return self._nodemap.get(node)
244 return self._nodemap.get(node)
237
245
238 def _stripnodes(self, start):
246 def _stripnodes(self, start):
239 if '_nodemap' in vars(self):
247 if '_nodemap' in vars(self):
240 for r in range(start, len(self)):
248 for r in range(start, len(self)):
241 n = self[r][7]
249 n = self[r][7]
242 del self._nodemap[n]
250 del self._nodemap[n]
243
251
244 def clearcaches(self):
252 def clearcaches(self):
245 self.__dict__.pop('_nodemap', None)
253 self.__dict__.pop('_nodemap', None)
246
254
247 def __len__(self):
255 def __len__(self):
248 return self._lgt + len(self._extra)
256 return self._lgt + len(self._extra)
249
257
250 def append(self, tup):
258 def append(self, tup):
251 if '_nodemap' in vars(self):
259 if '_nodemap' in vars(self):
252 self._nodemap[tup[7]] = len(self)
260 self._nodemap[tup[7]] = len(self)
253 data = self._pack_entry(len(self), tup)
261 data = self._pack_entry(len(self), tup)
254 self._extra.append(data)
262 self._extra.append(data)
255
263
256 def _pack_entry(self, rev, entry):
264 def _pack_entry(self, rev, entry):
257 assert entry[8] == 0
265 assert entry[8] == 0
258 assert entry[9] == 0
266 assert entry[9] == 0
259 return self.index_format.pack(*entry[:8])
267 return self.index_format.pack(*entry[:8])
260
268
261 def _check_index(self, i):
269 def _check_index(self, i):
262 if not isinstance(i, int):
270 if not isinstance(i, int):
263 raise TypeError(b"expecting int indexes")
271 raise TypeError(b"expecting int indexes")
264 if i < 0 or i >= len(self):
272 if i < 0 or i >= len(self):
265 raise IndexError
273 raise IndexError
266
274
267 def __getitem__(self, i):
275 def __getitem__(self, i):
268 if i == -1:
276 if i == -1:
269 return self.null_item
277 return self.null_item
270 self._check_index(i)
278 self._check_index(i)
271 if i >= self._lgt:
279 if i >= self._lgt:
272 data = self._extra[i - self._lgt]
280 data = self._extra[i - self._lgt]
273 else:
281 else:
274 index = self._calculate_index(i)
282 index = self._calculate_index(i)
275 data = self._data[index : index + self.entry_size]
283 data = self._data[index : index + self.entry_size]
276 r = self._unpack_entry(i, data)
284 r = self._unpack_entry(i, data)
277 if self._lgt and i == 0:
285 if self._lgt and i == 0:
278 offset = revlogutils.offset_type(0, gettype(r[0]))
286 offset = revlogutils.offset_type(0, gettype(r[0]))
279 r = (offset,) + r[1:]
287 r = (offset,) + r[1:]
280 return r
288 return r
281
289
282 def _unpack_entry(self, rev, data):
290 def _unpack_entry(self, rev, data):
283 r = self.index_format.unpack(data)
291 r = self.index_format.unpack(data)
284 r = r + (
292 r = r + (
285 0,
293 0,
286 0,
294 0,
287 revlog_constants.COMP_MODE_INLINE,
295 revlog_constants.COMP_MODE_INLINE,
288 revlog_constants.COMP_MODE_INLINE,
296 revlog_constants.COMP_MODE_INLINE,
289 )
297 )
290 return r
298 return r
291
299
292 def pack_header(self, header):
300 def pack_header(self, header):
293 """pack header information as binary"""
301 """pack header information as binary"""
294 v_fmt = revlog_constants.INDEX_HEADER
302 v_fmt = revlog_constants.INDEX_HEADER
295 return v_fmt.pack(header)
303 return v_fmt.pack(header)
296
304
297 def entry_binary(self, rev):
305 def entry_binary(self, rev):
298 """return the raw binary string representing a revision"""
306 """return the raw binary string representing a revision"""
299 entry = self[rev]
307 entry = self[rev]
300 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
308 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
301 if rev == 0:
309 if rev == 0:
302 p = p[revlog_constants.INDEX_HEADER.size :]
310 p = p[revlog_constants.INDEX_HEADER.size :]
303 return p
311 return p
304
312
305
313
306 class IndexObject(BaseIndexObject):
314 class IndexObject(BaseIndexObject):
307 def __init__(self, data):
315 def __init__(self, data):
308 assert len(data) % self.entry_size == 0, (
316 assert len(data) % self.entry_size == 0, (
309 len(data),
317 len(data),
310 self.entry_size,
318 self.entry_size,
311 len(data) % self.entry_size,
319 len(data) % self.entry_size,
312 )
320 )
313 self._data = data
321 self._data = data
314 self._lgt = len(data) // self.entry_size
322 self._lgt = len(data) // self.entry_size
315 self._extra = []
323 self._extra = []
316
324
317 def _calculate_index(self, i):
325 def _calculate_index(self, i):
318 return i * self.entry_size
326 return i * self.entry_size
319
327
320 def __delitem__(self, i):
328 def __delitem__(self, i):
321 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
329 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
322 raise ValueError(b"deleting slices only supports a:-1 with step 1")
330 raise ValueError(b"deleting slices only supports a:-1 with step 1")
323 i = i.start
331 i = i.start
324 self._check_index(i)
332 self._check_index(i)
325 self._stripnodes(i)
333 self._stripnodes(i)
326 if i < self._lgt:
334 if i < self._lgt:
327 self._data = self._data[: i * self.entry_size]
335 self._data = self._data[: i * self.entry_size]
328 self._lgt = i
336 self._lgt = i
329 self._extra = []
337 self._extra = []
330 else:
338 else:
331 self._extra = self._extra[: i - self._lgt]
339 self._extra = self._extra[: i - self._lgt]
332
340
333
341
334 class PersistentNodeMapIndexObject(IndexObject):
342 class PersistentNodeMapIndexObject(IndexObject):
335 """a Debug oriented class to test persistent nodemap
343 """a Debug oriented class to test persistent nodemap
336
344
337 We need a simple python object to test API and higher level behavior. See
345 We need a simple python object to test API and higher level behavior. See
338 the Rust implementation for more serious usage. This should be used only
346 the Rust implementation for more serious usage. This should be used only
339 through the dedicated `devel.persistent-nodemap` config.
347 through the dedicated `devel.persistent-nodemap` config.
340 """
348 """
341
349
342 def nodemap_data_all(self):
350 def nodemap_data_all(self):
343 """Return bytes containing a full serialization of a nodemap
351 """Return bytes containing a full serialization of a nodemap
344
352
345 The nodemap should be valid for the full set of revisions in the
353 The nodemap should be valid for the full set of revisions in the
346 index."""
354 index."""
347 return nodemaputil.persistent_data(self)
355 return nodemaputil.persistent_data(self)
348
356
349 def nodemap_data_incremental(self):
357 def nodemap_data_incremental(self):
350 """Return bytes containing a incremental update to persistent nodemap
358 """Return bytes containing a incremental update to persistent nodemap
351
359
352 This containst the data for an append-only update of the data provided
360 This containst the data for an append-only update of the data provided
353 in the last call to `update_nodemap_data`.
361 in the last call to `update_nodemap_data`.
354 """
362 """
355 if self._nm_root is None:
363 if self._nm_root is None:
356 return None
364 return None
357 docket = self._nm_docket
365 docket = self._nm_docket
358 changed, data = nodemaputil.update_persistent_data(
366 changed, data = nodemaputil.update_persistent_data(
359 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
367 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
360 )
368 )
361
369
362 self._nm_root = self._nm_max_idx = self._nm_docket = None
370 self._nm_root = self._nm_max_idx = self._nm_docket = None
363 return docket, changed, data
371 return docket, changed, data
364
372
365 def update_nodemap_data(self, docket, nm_data):
373 def update_nodemap_data(self, docket, nm_data):
366 """provide full block of persisted binary data for a nodemap
374 """provide full block of persisted binary data for a nodemap
367
375
368 The data are expected to come from disk. See `nodemap_data_all` for a
376 The data are expected to come from disk. See `nodemap_data_all` for a
369 produceur of such data."""
377 produceur of such data."""
370 if nm_data is not None:
378 if nm_data is not None:
371 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
379 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
372 if self._nm_root:
380 if self._nm_root:
373 self._nm_docket = docket
381 self._nm_docket = docket
374 else:
382 else:
375 self._nm_root = self._nm_max_idx = self._nm_docket = None
383 self._nm_root = self._nm_max_idx = self._nm_docket = None
376
384
377
385
378 class InlinedIndexObject(BaseIndexObject):
386 class InlinedIndexObject(BaseIndexObject):
379 def __init__(self, data, inline=0):
387 def __init__(self, data, inline=0):
380 self._data = data
388 self._data = data
381 self._lgt = self._inline_scan(None)
389 self._lgt = self._inline_scan(None)
382 self._inline_scan(self._lgt)
390 self._inline_scan(self._lgt)
383 self._extra = []
391 self._extra = []
384
392
385 def _inline_scan(self, lgt):
393 def _inline_scan(self, lgt):
386 off = 0
394 off = 0
387 if lgt is not None:
395 if lgt is not None:
388 self._offsets = [0] * lgt
396 self._offsets = [0] * lgt
389 count = 0
397 count = 0
390 while off <= len(self._data) - self.entry_size:
398 while off <= len(self._data) - self.entry_size:
391 start = off + self.big_int_size
399 start = off + self.big_int_size
392 (s,) = struct.unpack(
400 (s,) = struct.unpack(
393 b'>i',
401 b'>i',
394 self._data[start : start + self.int_size],
402 self._data[start : start + self.int_size],
395 )
403 )
396 if lgt is not None:
404 if lgt is not None:
397 self._offsets[count] = off
405 self._offsets[count] = off
398 count += 1
406 count += 1
399 off += self.entry_size + s
407 off += self.entry_size + s
400 if off != len(self._data):
408 if off != len(self._data):
401 raise ValueError(b"corrupted data")
409 raise ValueError(b"corrupted data")
402 return count
410 return count
403
411
404 def __delitem__(self, i):
412 def __delitem__(self, i):
405 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
413 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
406 raise ValueError(b"deleting slices only supports a:-1 with step 1")
414 raise ValueError(b"deleting slices only supports a:-1 with step 1")
407 i = i.start
415 i = i.start
408 self._check_index(i)
416 self._check_index(i)
409 self._stripnodes(i)
417 self._stripnodes(i)
410 if i < self._lgt:
418 if i < self._lgt:
411 self._offsets = self._offsets[:i]
419 self._offsets = self._offsets[:i]
412 self._lgt = i
420 self._lgt = i
413 self._extra = []
421 self._extra = []
414 else:
422 else:
415 self._extra = self._extra[: i - self._lgt]
423 self._extra = self._extra[: i - self._lgt]
416
424
417 def _calculate_index(self, i):
425 def _calculate_index(self, i):
418 return self._offsets[i]
426 return self._offsets[i]
419
427
420
428
421 def parse_index2(data, inline, revlogv2=False):
429 def parse_index2(data, inline, revlogv2=False):
422 if not inline:
430 if not inline:
423 cls = IndexObject2 if revlogv2 else IndexObject
431 cls = IndexObject2 if revlogv2 else IndexObject
424 return cls(data), None
432 return cls(data), None
425 cls = InlinedIndexObject
433 cls = InlinedIndexObject
426 return cls(data, inline), (0, data)
434 return cls(data, inline), (0, data)
427
435
428
436
429 def parse_index_cl_v2(data):
437 def parse_index_cl_v2(data):
430 return IndexChangelogV2(data), None
438 return IndexChangelogV2(data), None
431
439
432
440
433 class IndexObject2(IndexObject):
441 class IndexObject2(IndexObject):
434 index_format = revlog_constants.INDEX_ENTRY_V2
442 index_format = revlog_constants.INDEX_ENTRY_V2
435
443
436 def replace_sidedata_info(
444 def replace_sidedata_info(
437 self,
445 self,
438 rev,
446 rev,
439 sidedata_offset,
447 sidedata_offset,
440 sidedata_length,
448 sidedata_length,
441 offset_flags,
449 offset_flags,
442 compression_mode,
450 compression_mode,
443 ):
451 ):
444 """
452 """
445 Replace an existing index entry's sidedata offset and length with new
453 Replace an existing index entry's sidedata offset and length with new
446 ones.
454 ones.
447 This cannot be used outside of the context of sidedata rewriting,
455 This cannot be used outside of the context of sidedata rewriting,
448 inside the transaction that creates the revision `rev`.
456 inside the transaction that creates the revision `rev`.
449 """
457 """
450 if rev < 0:
458 if rev < 0:
451 raise KeyError
459 raise KeyError
452 self._check_index(rev)
460 self._check_index(rev)
453 if rev < self._lgt:
461 if rev < self._lgt:
454 msg = b"cannot rewrite entries outside of this transaction"
462 msg = b"cannot rewrite entries outside of this transaction"
455 raise KeyError(msg)
463 raise KeyError(msg)
456 else:
464 else:
457 entry = list(self[rev])
465 entry = list(self[rev])
458 entry[0] = offset_flags
466 entry[0] = offset_flags
459 entry[8] = sidedata_offset
467 entry[8] = sidedata_offset
460 entry[9] = sidedata_length
468 entry[9] = sidedata_length
461 entry[11] = compression_mode
469 entry[11] = compression_mode
462 entry = tuple(entry)
470 entry = tuple(entry)
463 new = self._pack_entry(rev, entry)
471 new = self._pack_entry(rev, entry)
464 self._extra[rev - self._lgt] = new
472 self._extra[rev - self._lgt] = new
465
473
466 def _unpack_entry(self, rev, data):
474 def _unpack_entry(self, rev, data):
467 data = self.index_format.unpack(data)
475 data = self.index_format.unpack(data)
468 entry = data[:10]
476 entry = data[:10]
469 data_comp = data[10] & 3
477 data_comp = data[10] & 3
470 sidedata_comp = (data[10] & (3 << 2)) >> 2
478 sidedata_comp = (data[10] & (3 << 2)) >> 2
471 return entry + (data_comp, sidedata_comp)
479 return entry + (data_comp, sidedata_comp)
472
480
473 def _pack_entry(self, rev, entry):
481 def _pack_entry(self, rev, entry):
474 data = entry[:10]
482 data = entry[:10]
475 data_comp = entry[10] & 3
483 data_comp = entry[10] & 3
476 sidedata_comp = (entry[11] & 3) << 2
484 sidedata_comp = (entry[11] & 3) << 2
477 data += (data_comp | sidedata_comp,)
485 data += (data_comp | sidedata_comp,)
478
486
479 return self.index_format.pack(*data)
487 return self.index_format.pack(*data)
480
488
481 def entry_binary(self, rev):
489 def entry_binary(self, rev):
482 """return the raw binary string representing a revision"""
490 """return the raw binary string representing a revision"""
483 entry = self[rev]
491 entry = self[rev]
484 return self._pack_entry(rev, entry)
492 return self._pack_entry(rev, entry)
485
493
486 def pack_header(self, header):
494 def pack_header(self, header):
487 """pack header information as binary"""
495 """pack header information as binary"""
488 msg = 'version header should go in the docket, not the index: %d'
496 msg = 'version header should go in the docket, not the index: %d'
489 msg %= header
497 msg %= header
490 raise error.ProgrammingError(msg)
498 raise error.ProgrammingError(msg)
491
499
492
500
493 class IndexChangelogV2(IndexObject2):
501 class IndexChangelogV2(IndexObject2):
494 index_format = revlog_constants.INDEX_ENTRY_CL_V2
502 index_format = revlog_constants.INDEX_ENTRY_CL_V2
495
503
496 def _unpack_entry(self, rev, data, r=True):
504 def _unpack_entry(self, rev, data, r=True):
497 items = self.index_format.unpack(data)
505 items = self.index_format.unpack(data)
498 entry = items[:3] + (rev, rev) + items[3:8]
506 entry = items[:3] + (rev, rev) + items[3:8]
499 data_comp = items[8] & 3
507 data_comp = items[8] & 3
500 sidedata_comp = (items[8] >> 2) & 3
508 sidedata_comp = (items[8] >> 2) & 3
501 return entry + (data_comp, sidedata_comp)
509 return entry + (data_comp, sidedata_comp)
502
510
503 def _pack_entry(self, rev, entry):
511 def _pack_entry(self, rev, entry):
504 assert entry[3] == rev, entry[3]
512 assert entry[3] == rev, entry[3]
505 assert entry[4] == rev, entry[4]
513 assert entry[4] == rev, entry[4]
506 data = entry[:3] + entry[5:10]
514 data = entry[:3] + entry[5:10]
507 data_comp = entry[10] & 3
515 data_comp = entry[10] & 3
508 sidedata_comp = (entry[11] & 3) << 2
516 sidedata_comp = (entry[11] & 3) << 2
509 data += (data_comp | sidedata_comp,)
517 data += (data_comp | sidedata_comp,)
510 return self.index_format.pack(*data)
518 return self.index_format.pack(*data)
511
519
512
520
513 def parse_index_devel_nodemap(data, inline):
521 def parse_index_devel_nodemap(data, inline):
514 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
522 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
515 return PersistentNodeMapIndexObject(data), None
523 return PersistentNodeMapIndexObject(data), None
516
524
517
525
518 def parse_dirstate(dmap, copymap, st):
526 def parse_dirstate(dmap, copymap, st):
519 parents = [st[:20], st[20:40]]
527 parents = [st[:20], st[20:40]]
520 # dereference fields so they will be local in loop
528 # dereference fields so they will be local in loop
521 format = b">cllll"
529 format = b">cllll"
522 e_size = struct.calcsize(format)
530 e_size = struct.calcsize(format)
523 pos1 = 40
531 pos1 = 40
524 l = len(st)
532 l = len(st)
525
533
526 # the inner loop
534 # the inner loop
527 while pos1 < l:
535 while pos1 < l:
528 pos2 = pos1 + e_size
536 pos2 = pos1 + e_size
529 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
537 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
530 pos1 = pos2 + e[4]
538 pos1 = pos2 + e[4]
531 f = st[pos2:pos1]
539 f = st[pos2:pos1]
532 if b'\0' in f:
540 if b'\0' in f:
533 f, c = f.split(b'\0')
541 f, c = f.split(b'\0')
534 copymap[f] = c
542 copymap[f] = c
535 dmap[f] = DirstateItem(*e[:4])
543 dmap[f] = DirstateItem(*e[:4])
536 return parents
544 return parents
537
545
538
546
539 def pack_dirstate(dmap, copymap, pl, now):
547 def pack_dirstate(dmap, copymap, pl, now):
540 now = int(now)
548 now = int(now)
541 cs = stringio()
549 cs = stringio()
542 write = cs.write
550 write = cs.write
543 write(b"".join(pl))
551 write(b"".join(pl))
544 for f, e in pycompat.iteritems(dmap):
552 for f, e in pycompat.iteritems(dmap):
545 if e.need_delay(now):
553 if e.need_delay(now):
546 # The file was last modified "simultaneously" with the current
554 # The file was last modified "simultaneously" with the current
547 # write to dirstate (i.e. within the same second for file-
555 # write to dirstate (i.e. within the same second for file-
548 # systems with a granularity of 1 sec). This commonly happens
556 # systems with a granularity of 1 sec). This commonly happens
549 # for at least a couple of files on 'update'.
557 # for at least a couple of files on 'update'.
550 # The user could change the file without changing its size
558 # The user could change the file without changing its size
551 # within the same second. Invalidate the file's mtime in
559 # within the same second. Invalidate the file's mtime in
552 # dirstate, forcing future 'status' calls to compare the
560 # dirstate, forcing future 'status' calls to compare the
553 # contents of the file if the size is the same. This prevents
561 # contents of the file if the size is the same. This prevents
554 # mistakenly treating such files as clean.
562 # mistakenly treating such files as clean.
555 e = DirstateItem(e.state, e.mode, e.size, AMBIGUOUS_TIME)
563 e = DirstateItem(e.state, e.mode, e.size, AMBIGUOUS_TIME)
556 dmap[f] = e
564 dmap[f] = e
557
565
558 if f in copymap:
566 if f in copymap:
559 f = b"%s\0%s" % (f, copymap[f])
567 f = b"%s\0%s" % (f, copymap[f])
560 e = _pack(
568 e = _pack(
561 b">cllll",
569 b">cllll",
562 e.v1_state(),
570 e.v1_state(),
563 e.v1_mode(),
571 e.v1_mode(),
564 e.v1_size(),
572 e.v1_size(),
565 e.v1_mtime(),
573 e.v1_mtime(),
566 len(f),
574 len(f),
567 )
575 )
568 write(e)
576 write(e)
569 write(f)
577 write(f)
570 return cs.getvalue()
578 return cs.getvalue()
General Comments 0
You need to be logged in to leave comments. Login now