##// END OF EJS Templates
dirstate-item: use an explicit __init__ function instead of the attrs one...
marmoute -
r48464:119b9c8d default
parent child Browse files
Show More
@@ -1,578 +1,584
1 # parsers.py - Python implementation of parsers.c
1 # parsers.py - Python implementation of parsers.c
2 #
2 #
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11 import zlib
11 import zlib
12
12
13 from ..node import (
13 from ..node import (
14 nullrev,
14 nullrev,
15 sha1nodeconstants,
15 sha1nodeconstants,
16 )
16 )
17 from ..thirdparty import attr
17 from ..thirdparty import attr
18 from .. import (
18 from .. import (
19 error,
19 error,
20 pycompat,
20 pycompat,
21 revlogutils,
21 revlogutils,
22 util,
22 util,
23 )
23 )
24
24
25 from ..revlogutils import nodemap as nodemaputil
25 from ..revlogutils import nodemap as nodemaputil
26 from ..revlogutils import constants as revlog_constants
26 from ..revlogutils import constants as revlog_constants
27
27
28 stringio = pycompat.bytesio
28 stringio = pycompat.bytesio
29
29
30
30
31 _pack = struct.pack
31 _pack = struct.pack
32 _unpack = struct.unpack
32 _unpack = struct.unpack
33 _compress = zlib.compress
33 _compress = zlib.compress
34 _decompress = zlib.decompress
34 _decompress = zlib.decompress
35
35
36
36
37 # a special value used internally for `size` if the file come from the other parent
37 # a special value used internally for `size` if the file come from the other parent
38 FROM_P2 = -2
38 FROM_P2 = -2
39
39
40 # a special value used internally for `size` if the file is modified/merged/added
40 # a special value used internally for `size` if the file is modified/merged/added
41 NONNORMAL = -1
41 NONNORMAL = -1
42
42
43 # a special value used internally for `time` if the time is ambigeous
43 # a special value used internally for `time` if the time is ambigeous
44 AMBIGUOUS_TIME = -1
44 AMBIGUOUS_TIME = -1
45
45
46
46
47 @attr.s(slots=True)
47 @attr.s(slots=True, init=False)
48 class DirstateItem(object):
48 class DirstateItem(object):
49 """represent a dirstate entry
49 """represent a dirstate entry
50
50
51 It contains:
51 It contains:
52
52
53 - state (one of 'n', 'a', 'r', 'm')
53 - state (one of 'n', 'a', 'r', 'm')
54 - mode,
54 - mode,
55 - size,
55 - size,
56 - mtime,
56 - mtime,
57 """
57 """
58
58
59 _state = attr.ib()
59 _state = attr.ib()
60 _mode = attr.ib()
60 _mode = attr.ib()
61 _size = attr.ib()
61 _size = attr.ib()
62 _mtime = attr.ib()
62 _mtime = attr.ib()
63
63
64 def __init__(self, state, mode, size, mtime):
65 self._state = state
66 self._mode = mode
67 self._size = size
68 self._mtime = mtime
69
64 def __getitem__(self, idx):
70 def __getitem__(self, idx):
65 if idx == 0 or idx == -4:
71 if idx == 0 or idx == -4:
66 msg = b"do not use item[x], use item.state"
72 msg = b"do not use item[x], use item.state"
67 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
73 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
68 return self._state
74 return self._state
69 elif idx == 1 or idx == -3:
75 elif idx == 1 or idx == -3:
70 msg = b"do not use item[x], use item.mode"
76 msg = b"do not use item[x], use item.mode"
71 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
77 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
72 return self._mode
78 return self._mode
73 elif idx == 2 or idx == -2:
79 elif idx == 2 or idx == -2:
74 msg = b"do not use item[x], use item.size"
80 msg = b"do not use item[x], use item.size"
75 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
81 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
76 return self._size
82 return self._size
77 elif idx == 3 or idx == -1:
83 elif idx == 3 or idx == -1:
78 msg = b"do not use item[x], use item.mtime"
84 msg = b"do not use item[x], use item.mtime"
79 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
85 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
80 return self._mtime
86 return self._mtime
81 else:
87 else:
82 raise IndexError(idx)
88 raise IndexError(idx)
83
89
84 @property
90 @property
85 def mode(self):
91 def mode(self):
86 return self._mode
92 return self._mode
87
93
88 @property
94 @property
89 def size(self):
95 def size(self):
90 return self._size
96 return self._size
91
97
92 @property
98 @property
93 def mtime(self):
99 def mtime(self):
94 return self._mtime
100 return self._mtime
95
101
96 @property
102 @property
97 def state(self):
103 def state(self):
98 """
104 """
99 States are:
105 States are:
100 n normal
106 n normal
101 m needs merging
107 m needs merging
102 r marked for removal
108 r marked for removal
103 a marked for addition
109 a marked for addition
104
110
105 XXX This "state" is a bit obscure and mostly a direct expression of the
111 XXX This "state" is a bit obscure and mostly a direct expression of the
106 dirstatev1 format. It would make sense to ultimately deprecate it in
112 dirstatev1 format. It would make sense to ultimately deprecate it in
107 favor of the more "semantic" attributes.
113 favor of the more "semantic" attributes.
108 """
114 """
109 return self._state
115 return self._state
110
116
111 @property
117 @property
112 def tracked(self):
118 def tracked(self):
113 """True is the file is tracked in the working copy"""
119 """True is the file is tracked in the working copy"""
114 return self._state in b"nma"
120 return self._state in b"nma"
115
121
116 @property
122 @property
117 def added(self):
123 def added(self):
118 """True if the file has been added"""
124 """True if the file has been added"""
119 return self._state == b'a'
125 return self._state == b'a'
120
126
121 @property
127 @property
122 def merged(self):
128 def merged(self):
123 """True if the file has been merged
129 """True if the file has been merged
124
130
125 Should only be set if a merge is in progress in the dirstate
131 Should only be set if a merge is in progress in the dirstate
126 """
132 """
127 return self._state == b'm'
133 return self._state == b'm'
128
134
129 @property
135 @property
130 def from_p2(self):
136 def from_p2(self):
131 """True if the file have been fetched from p2 during the current merge
137 """True if the file have been fetched from p2 during the current merge
132
138
133 This is only True is the file is currently tracked.
139 This is only True is the file is currently tracked.
134
140
135 Should only be set if a merge is in progress in the dirstate
141 Should only be set if a merge is in progress in the dirstate
136 """
142 """
137 return self._state == b'n' and self._size == FROM_P2
143 return self._state == b'n' and self._size == FROM_P2
138
144
139 @property
145 @property
140 def from_p2_removed(self):
146 def from_p2_removed(self):
141 """True if the file has been removed, but was "from_p2" initially
147 """True if the file has been removed, but was "from_p2" initially
142
148
143 This property seems like an abstraction leakage and should probably be
149 This property seems like an abstraction leakage and should probably be
144 dealt in this class (or maybe the dirstatemap) directly.
150 dealt in this class (or maybe the dirstatemap) directly.
145 """
151 """
146 return self._state == b'r' and self._size == FROM_P2
152 return self._state == b'r' and self._size == FROM_P2
147
153
148 @property
154 @property
149 def removed(self):
155 def removed(self):
150 """True if the file has been removed"""
156 """True if the file has been removed"""
151 return self._state == b'r'
157 return self._state == b'r'
152
158
153 @property
159 @property
154 def merged_removed(self):
160 def merged_removed(self):
155 """True if the file has been removed, but was "merged" initially
161 """True if the file has been removed, but was "merged" initially
156
162
157 This property seems like an abstraction leakage and should probably be
163 This property seems like an abstraction leakage and should probably be
158 dealt in this class (or maybe the dirstatemap) directly.
164 dealt in this class (or maybe the dirstatemap) directly.
159 """
165 """
160 return self._state == b'r' and self._size == NONNORMAL
166 return self._state == b'r' and self._size == NONNORMAL
161
167
162 def v1_state(self):
168 def v1_state(self):
163 """return a "state" suitable for v1 serialization"""
169 """return a "state" suitable for v1 serialization"""
164 return self._state
170 return self._state
165
171
166 def v1_mode(self):
172 def v1_mode(self):
167 """return a "mode" suitable for v1 serialization"""
173 """return a "mode" suitable for v1 serialization"""
168 return self._mode
174 return self._mode
169
175
170 def v1_size(self):
176 def v1_size(self):
171 """return a "size" suitable for v1 serialization"""
177 """return a "size" suitable for v1 serialization"""
172 return self._size
178 return self._size
173
179
174 def v1_mtime(self):
180 def v1_mtime(self):
175 """return a "mtime" suitable for v1 serialization"""
181 """return a "mtime" suitable for v1 serialization"""
176 return self._mtime
182 return self._mtime
177
183
178 def need_delay(self, now):
184 def need_delay(self, now):
179 """True if the stored mtime would be ambiguous with the current time"""
185 """True if the stored mtime would be ambiguous with the current time"""
180 return self._state == b'n' and self._mtime == now
186 return self._state == b'n' and self._mtime == now
181
187
182
188
183 def gettype(q):
189 def gettype(q):
184 return int(q & 0xFFFF)
190 return int(q & 0xFFFF)
185
191
186
192
187 class BaseIndexObject(object):
193 class BaseIndexObject(object):
188 # Can I be passed to an algorithme implemented in Rust ?
194 # Can I be passed to an algorithme implemented in Rust ?
189 rust_ext_compat = 0
195 rust_ext_compat = 0
190 # Format of an index entry according to Python's `struct` language
196 # Format of an index entry according to Python's `struct` language
191 index_format = revlog_constants.INDEX_ENTRY_V1
197 index_format = revlog_constants.INDEX_ENTRY_V1
192 # Size of a C unsigned long long int, platform independent
198 # Size of a C unsigned long long int, platform independent
193 big_int_size = struct.calcsize(b'>Q')
199 big_int_size = struct.calcsize(b'>Q')
194 # Size of a C long int, platform independent
200 # Size of a C long int, platform independent
195 int_size = struct.calcsize(b'>i')
201 int_size = struct.calcsize(b'>i')
196 # An empty index entry, used as a default value to be overridden, or nullrev
202 # An empty index entry, used as a default value to be overridden, or nullrev
197 null_item = (
203 null_item = (
198 0,
204 0,
199 0,
205 0,
200 0,
206 0,
201 -1,
207 -1,
202 -1,
208 -1,
203 -1,
209 -1,
204 -1,
210 -1,
205 sha1nodeconstants.nullid,
211 sha1nodeconstants.nullid,
206 0,
212 0,
207 0,
213 0,
208 revlog_constants.COMP_MODE_INLINE,
214 revlog_constants.COMP_MODE_INLINE,
209 revlog_constants.COMP_MODE_INLINE,
215 revlog_constants.COMP_MODE_INLINE,
210 )
216 )
211
217
212 @util.propertycache
218 @util.propertycache
213 def entry_size(self):
219 def entry_size(self):
214 return self.index_format.size
220 return self.index_format.size
215
221
216 @property
222 @property
217 def nodemap(self):
223 def nodemap(self):
218 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
224 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
219 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
225 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
220 return self._nodemap
226 return self._nodemap
221
227
222 @util.propertycache
228 @util.propertycache
223 def _nodemap(self):
229 def _nodemap(self):
224 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
230 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
225 for r in range(0, len(self)):
231 for r in range(0, len(self)):
226 n = self[r][7]
232 n = self[r][7]
227 nodemap[n] = r
233 nodemap[n] = r
228 return nodemap
234 return nodemap
229
235
230 def has_node(self, node):
236 def has_node(self, node):
231 """return True if the node exist in the index"""
237 """return True if the node exist in the index"""
232 return node in self._nodemap
238 return node in self._nodemap
233
239
234 def rev(self, node):
240 def rev(self, node):
235 """return a revision for a node
241 """return a revision for a node
236
242
237 If the node is unknown, raise a RevlogError"""
243 If the node is unknown, raise a RevlogError"""
238 return self._nodemap[node]
244 return self._nodemap[node]
239
245
240 def get_rev(self, node):
246 def get_rev(self, node):
241 """return a revision for a node
247 """return a revision for a node
242
248
243 If the node is unknown, return None"""
249 If the node is unknown, return None"""
244 return self._nodemap.get(node)
250 return self._nodemap.get(node)
245
251
246 def _stripnodes(self, start):
252 def _stripnodes(self, start):
247 if '_nodemap' in vars(self):
253 if '_nodemap' in vars(self):
248 for r in range(start, len(self)):
254 for r in range(start, len(self)):
249 n = self[r][7]
255 n = self[r][7]
250 del self._nodemap[n]
256 del self._nodemap[n]
251
257
252 def clearcaches(self):
258 def clearcaches(self):
253 self.__dict__.pop('_nodemap', None)
259 self.__dict__.pop('_nodemap', None)
254
260
255 def __len__(self):
261 def __len__(self):
256 return self._lgt + len(self._extra)
262 return self._lgt + len(self._extra)
257
263
258 def append(self, tup):
264 def append(self, tup):
259 if '_nodemap' in vars(self):
265 if '_nodemap' in vars(self):
260 self._nodemap[tup[7]] = len(self)
266 self._nodemap[tup[7]] = len(self)
261 data = self._pack_entry(len(self), tup)
267 data = self._pack_entry(len(self), tup)
262 self._extra.append(data)
268 self._extra.append(data)
263
269
264 def _pack_entry(self, rev, entry):
270 def _pack_entry(self, rev, entry):
265 assert entry[8] == 0
271 assert entry[8] == 0
266 assert entry[9] == 0
272 assert entry[9] == 0
267 return self.index_format.pack(*entry[:8])
273 return self.index_format.pack(*entry[:8])
268
274
269 def _check_index(self, i):
275 def _check_index(self, i):
270 if not isinstance(i, int):
276 if not isinstance(i, int):
271 raise TypeError(b"expecting int indexes")
277 raise TypeError(b"expecting int indexes")
272 if i < 0 or i >= len(self):
278 if i < 0 or i >= len(self):
273 raise IndexError
279 raise IndexError
274
280
275 def __getitem__(self, i):
281 def __getitem__(self, i):
276 if i == -1:
282 if i == -1:
277 return self.null_item
283 return self.null_item
278 self._check_index(i)
284 self._check_index(i)
279 if i >= self._lgt:
285 if i >= self._lgt:
280 data = self._extra[i - self._lgt]
286 data = self._extra[i - self._lgt]
281 else:
287 else:
282 index = self._calculate_index(i)
288 index = self._calculate_index(i)
283 data = self._data[index : index + self.entry_size]
289 data = self._data[index : index + self.entry_size]
284 r = self._unpack_entry(i, data)
290 r = self._unpack_entry(i, data)
285 if self._lgt and i == 0:
291 if self._lgt and i == 0:
286 offset = revlogutils.offset_type(0, gettype(r[0]))
292 offset = revlogutils.offset_type(0, gettype(r[0]))
287 r = (offset,) + r[1:]
293 r = (offset,) + r[1:]
288 return r
294 return r
289
295
290 def _unpack_entry(self, rev, data):
296 def _unpack_entry(self, rev, data):
291 r = self.index_format.unpack(data)
297 r = self.index_format.unpack(data)
292 r = r + (
298 r = r + (
293 0,
299 0,
294 0,
300 0,
295 revlog_constants.COMP_MODE_INLINE,
301 revlog_constants.COMP_MODE_INLINE,
296 revlog_constants.COMP_MODE_INLINE,
302 revlog_constants.COMP_MODE_INLINE,
297 )
303 )
298 return r
304 return r
299
305
300 def pack_header(self, header):
306 def pack_header(self, header):
301 """pack header information as binary"""
307 """pack header information as binary"""
302 v_fmt = revlog_constants.INDEX_HEADER
308 v_fmt = revlog_constants.INDEX_HEADER
303 return v_fmt.pack(header)
309 return v_fmt.pack(header)
304
310
305 def entry_binary(self, rev):
311 def entry_binary(self, rev):
306 """return the raw binary string representing a revision"""
312 """return the raw binary string representing a revision"""
307 entry = self[rev]
313 entry = self[rev]
308 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
314 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
309 if rev == 0:
315 if rev == 0:
310 p = p[revlog_constants.INDEX_HEADER.size :]
316 p = p[revlog_constants.INDEX_HEADER.size :]
311 return p
317 return p
312
318
313
319
314 class IndexObject(BaseIndexObject):
320 class IndexObject(BaseIndexObject):
315 def __init__(self, data):
321 def __init__(self, data):
316 assert len(data) % self.entry_size == 0, (
322 assert len(data) % self.entry_size == 0, (
317 len(data),
323 len(data),
318 self.entry_size,
324 self.entry_size,
319 len(data) % self.entry_size,
325 len(data) % self.entry_size,
320 )
326 )
321 self._data = data
327 self._data = data
322 self._lgt = len(data) // self.entry_size
328 self._lgt = len(data) // self.entry_size
323 self._extra = []
329 self._extra = []
324
330
325 def _calculate_index(self, i):
331 def _calculate_index(self, i):
326 return i * self.entry_size
332 return i * self.entry_size
327
333
328 def __delitem__(self, i):
334 def __delitem__(self, i):
329 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
335 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
330 raise ValueError(b"deleting slices only supports a:-1 with step 1")
336 raise ValueError(b"deleting slices only supports a:-1 with step 1")
331 i = i.start
337 i = i.start
332 self._check_index(i)
338 self._check_index(i)
333 self._stripnodes(i)
339 self._stripnodes(i)
334 if i < self._lgt:
340 if i < self._lgt:
335 self._data = self._data[: i * self.entry_size]
341 self._data = self._data[: i * self.entry_size]
336 self._lgt = i
342 self._lgt = i
337 self._extra = []
343 self._extra = []
338 else:
344 else:
339 self._extra = self._extra[: i - self._lgt]
345 self._extra = self._extra[: i - self._lgt]
340
346
341
347
342 class PersistentNodeMapIndexObject(IndexObject):
348 class PersistentNodeMapIndexObject(IndexObject):
343 """a Debug oriented class to test persistent nodemap
349 """a Debug oriented class to test persistent nodemap
344
350
345 We need a simple python object to test API and higher level behavior. See
351 We need a simple python object to test API and higher level behavior. See
346 the Rust implementation for more serious usage. This should be used only
352 the Rust implementation for more serious usage. This should be used only
347 through the dedicated `devel.persistent-nodemap` config.
353 through the dedicated `devel.persistent-nodemap` config.
348 """
354 """
349
355
350 def nodemap_data_all(self):
356 def nodemap_data_all(self):
351 """Return bytes containing a full serialization of a nodemap
357 """Return bytes containing a full serialization of a nodemap
352
358
353 The nodemap should be valid for the full set of revisions in the
359 The nodemap should be valid for the full set of revisions in the
354 index."""
360 index."""
355 return nodemaputil.persistent_data(self)
361 return nodemaputil.persistent_data(self)
356
362
357 def nodemap_data_incremental(self):
363 def nodemap_data_incremental(self):
358 """Return bytes containing a incremental update to persistent nodemap
364 """Return bytes containing a incremental update to persistent nodemap
359
365
360 This containst the data for an append-only update of the data provided
366 This containst the data for an append-only update of the data provided
361 in the last call to `update_nodemap_data`.
367 in the last call to `update_nodemap_data`.
362 """
368 """
363 if self._nm_root is None:
369 if self._nm_root is None:
364 return None
370 return None
365 docket = self._nm_docket
371 docket = self._nm_docket
366 changed, data = nodemaputil.update_persistent_data(
372 changed, data = nodemaputil.update_persistent_data(
367 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
373 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
368 )
374 )
369
375
370 self._nm_root = self._nm_max_idx = self._nm_docket = None
376 self._nm_root = self._nm_max_idx = self._nm_docket = None
371 return docket, changed, data
377 return docket, changed, data
372
378
373 def update_nodemap_data(self, docket, nm_data):
379 def update_nodemap_data(self, docket, nm_data):
374 """provide full block of persisted binary data for a nodemap
380 """provide full block of persisted binary data for a nodemap
375
381
376 The data are expected to come from disk. See `nodemap_data_all` for a
382 The data are expected to come from disk. See `nodemap_data_all` for a
377 produceur of such data."""
383 produceur of such data."""
378 if nm_data is not None:
384 if nm_data is not None:
379 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
385 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
380 if self._nm_root:
386 if self._nm_root:
381 self._nm_docket = docket
387 self._nm_docket = docket
382 else:
388 else:
383 self._nm_root = self._nm_max_idx = self._nm_docket = None
389 self._nm_root = self._nm_max_idx = self._nm_docket = None
384
390
385
391
386 class InlinedIndexObject(BaseIndexObject):
392 class InlinedIndexObject(BaseIndexObject):
387 def __init__(self, data, inline=0):
393 def __init__(self, data, inline=0):
388 self._data = data
394 self._data = data
389 self._lgt = self._inline_scan(None)
395 self._lgt = self._inline_scan(None)
390 self._inline_scan(self._lgt)
396 self._inline_scan(self._lgt)
391 self._extra = []
397 self._extra = []
392
398
393 def _inline_scan(self, lgt):
399 def _inline_scan(self, lgt):
394 off = 0
400 off = 0
395 if lgt is not None:
401 if lgt is not None:
396 self._offsets = [0] * lgt
402 self._offsets = [0] * lgt
397 count = 0
403 count = 0
398 while off <= len(self._data) - self.entry_size:
404 while off <= len(self._data) - self.entry_size:
399 start = off + self.big_int_size
405 start = off + self.big_int_size
400 (s,) = struct.unpack(
406 (s,) = struct.unpack(
401 b'>i',
407 b'>i',
402 self._data[start : start + self.int_size],
408 self._data[start : start + self.int_size],
403 )
409 )
404 if lgt is not None:
410 if lgt is not None:
405 self._offsets[count] = off
411 self._offsets[count] = off
406 count += 1
412 count += 1
407 off += self.entry_size + s
413 off += self.entry_size + s
408 if off != len(self._data):
414 if off != len(self._data):
409 raise ValueError(b"corrupted data")
415 raise ValueError(b"corrupted data")
410 return count
416 return count
411
417
412 def __delitem__(self, i):
418 def __delitem__(self, i):
413 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
419 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
414 raise ValueError(b"deleting slices only supports a:-1 with step 1")
420 raise ValueError(b"deleting slices only supports a:-1 with step 1")
415 i = i.start
421 i = i.start
416 self._check_index(i)
422 self._check_index(i)
417 self._stripnodes(i)
423 self._stripnodes(i)
418 if i < self._lgt:
424 if i < self._lgt:
419 self._offsets = self._offsets[:i]
425 self._offsets = self._offsets[:i]
420 self._lgt = i
426 self._lgt = i
421 self._extra = []
427 self._extra = []
422 else:
428 else:
423 self._extra = self._extra[: i - self._lgt]
429 self._extra = self._extra[: i - self._lgt]
424
430
425 def _calculate_index(self, i):
431 def _calculate_index(self, i):
426 return self._offsets[i]
432 return self._offsets[i]
427
433
428
434
429 def parse_index2(data, inline, revlogv2=False):
435 def parse_index2(data, inline, revlogv2=False):
430 if not inline:
436 if not inline:
431 cls = IndexObject2 if revlogv2 else IndexObject
437 cls = IndexObject2 if revlogv2 else IndexObject
432 return cls(data), None
438 return cls(data), None
433 cls = InlinedIndexObject
439 cls = InlinedIndexObject
434 return cls(data, inline), (0, data)
440 return cls(data, inline), (0, data)
435
441
436
442
437 def parse_index_cl_v2(data):
443 def parse_index_cl_v2(data):
438 return IndexChangelogV2(data), None
444 return IndexChangelogV2(data), None
439
445
440
446
441 class IndexObject2(IndexObject):
447 class IndexObject2(IndexObject):
442 index_format = revlog_constants.INDEX_ENTRY_V2
448 index_format = revlog_constants.INDEX_ENTRY_V2
443
449
444 def replace_sidedata_info(
450 def replace_sidedata_info(
445 self,
451 self,
446 rev,
452 rev,
447 sidedata_offset,
453 sidedata_offset,
448 sidedata_length,
454 sidedata_length,
449 offset_flags,
455 offset_flags,
450 compression_mode,
456 compression_mode,
451 ):
457 ):
452 """
458 """
453 Replace an existing index entry's sidedata offset and length with new
459 Replace an existing index entry's sidedata offset and length with new
454 ones.
460 ones.
455 This cannot be used outside of the context of sidedata rewriting,
461 This cannot be used outside of the context of sidedata rewriting,
456 inside the transaction that creates the revision `rev`.
462 inside the transaction that creates the revision `rev`.
457 """
463 """
458 if rev < 0:
464 if rev < 0:
459 raise KeyError
465 raise KeyError
460 self._check_index(rev)
466 self._check_index(rev)
461 if rev < self._lgt:
467 if rev < self._lgt:
462 msg = b"cannot rewrite entries outside of this transaction"
468 msg = b"cannot rewrite entries outside of this transaction"
463 raise KeyError(msg)
469 raise KeyError(msg)
464 else:
470 else:
465 entry = list(self[rev])
471 entry = list(self[rev])
466 entry[0] = offset_flags
472 entry[0] = offset_flags
467 entry[8] = sidedata_offset
473 entry[8] = sidedata_offset
468 entry[9] = sidedata_length
474 entry[9] = sidedata_length
469 entry[11] = compression_mode
475 entry[11] = compression_mode
470 entry = tuple(entry)
476 entry = tuple(entry)
471 new = self._pack_entry(rev, entry)
477 new = self._pack_entry(rev, entry)
472 self._extra[rev - self._lgt] = new
478 self._extra[rev - self._lgt] = new
473
479
474 def _unpack_entry(self, rev, data):
480 def _unpack_entry(self, rev, data):
475 data = self.index_format.unpack(data)
481 data = self.index_format.unpack(data)
476 entry = data[:10]
482 entry = data[:10]
477 data_comp = data[10] & 3
483 data_comp = data[10] & 3
478 sidedata_comp = (data[10] & (3 << 2)) >> 2
484 sidedata_comp = (data[10] & (3 << 2)) >> 2
479 return entry + (data_comp, sidedata_comp)
485 return entry + (data_comp, sidedata_comp)
480
486
481 def _pack_entry(self, rev, entry):
487 def _pack_entry(self, rev, entry):
482 data = entry[:10]
488 data = entry[:10]
483 data_comp = entry[10] & 3
489 data_comp = entry[10] & 3
484 sidedata_comp = (entry[11] & 3) << 2
490 sidedata_comp = (entry[11] & 3) << 2
485 data += (data_comp | sidedata_comp,)
491 data += (data_comp | sidedata_comp,)
486
492
487 return self.index_format.pack(*data)
493 return self.index_format.pack(*data)
488
494
489 def entry_binary(self, rev):
495 def entry_binary(self, rev):
490 """return the raw binary string representing a revision"""
496 """return the raw binary string representing a revision"""
491 entry = self[rev]
497 entry = self[rev]
492 return self._pack_entry(rev, entry)
498 return self._pack_entry(rev, entry)
493
499
494 def pack_header(self, header):
500 def pack_header(self, header):
495 """pack header information as binary"""
501 """pack header information as binary"""
496 msg = 'version header should go in the docket, not the index: %d'
502 msg = 'version header should go in the docket, not the index: %d'
497 msg %= header
503 msg %= header
498 raise error.ProgrammingError(msg)
504 raise error.ProgrammingError(msg)
499
505
500
506
501 class IndexChangelogV2(IndexObject2):
507 class IndexChangelogV2(IndexObject2):
502 index_format = revlog_constants.INDEX_ENTRY_CL_V2
508 index_format = revlog_constants.INDEX_ENTRY_CL_V2
503
509
504 def _unpack_entry(self, rev, data, r=True):
510 def _unpack_entry(self, rev, data, r=True):
505 items = self.index_format.unpack(data)
511 items = self.index_format.unpack(data)
506 entry = items[:3] + (rev, rev) + items[3:8]
512 entry = items[:3] + (rev, rev) + items[3:8]
507 data_comp = items[8] & 3
513 data_comp = items[8] & 3
508 sidedata_comp = (items[8] >> 2) & 3
514 sidedata_comp = (items[8] >> 2) & 3
509 return entry + (data_comp, sidedata_comp)
515 return entry + (data_comp, sidedata_comp)
510
516
511 def _pack_entry(self, rev, entry):
517 def _pack_entry(self, rev, entry):
512 assert entry[3] == rev, entry[3]
518 assert entry[3] == rev, entry[3]
513 assert entry[4] == rev, entry[4]
519 assert entry[4] == rev, entry[4]
514 data = entry[:3] + entry[5:10]
520 data = entry[:3] + entry[5:10]
515 data_comp = entry[10] & 3
521 data_comp = entry[10] & 3
516 sidedata_comp = (entry[11] & 3) << 2
522 sidedata_comp = (entry[11] & 3) << 2
517 data += (data_comp | sidedata_comp,)
523 data += (data_comp | sidedata_comp,)
518 return self.index_format.pack(*data)
524 return self.index_format.pack(*data)
519
525
520
526
521 def parse_index_devel_nodemap(data, inline):
527 def parse_index_devel_nodemap(data, inline):
522 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
528 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
523 return PersistentNodeMapIndexObject(data), None
529 return PersistentNodeMapIndexObject(data), None
524
530
525
531
526 def parse_dirstate(dmap, copymap, st):
532 def parse_dirstate(dmap, copymap, st):
527 parents = [st[:20], st[20:40]]
533 parents = [st[:20], st[20:40]]
528 # dereference fields so they will be local in loop
534 # dereference fields so they will be local in loop
529 format = b">cllll"
535 format = b">cllll"
530 e_size = struct.calcsize(format)
536 e_size = struct.calcsize(format)
531 pos1 = 40
537 pos1 = 40
532 l = len(st)
538 l = len(st)
533
539
534 # the inner loop
540 # the inner loop
535 while pos1 < l:
541 while pos1 < l:
536 pos2 = pos1 + e_size
542 pos2 = pos1 + e_size
537 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
543 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
538 pos1 = pos2 + e[4]
544 pos1 = pos2 + e[4]
539 f = st[pos2:pos1]
545 f = st[pos2:pos1]
540 if b'\0' in f:
546 if b'\0' in f:
541 f, c = f.split(b'\0')
547 f, c = f.split(b'\0')
542 copymap[f] = c
548 copymap[f] = c
543 dmap[f] = DirstateItem(*e[:4])
549 dmap[f] = DirstateItem(*e[:4])
544 return parents
550 return parents
545
551
546
552
547 def pack_dirstate(dmap, copymap, pl, now):
553 def pack_dirstate(dmap, copymap, pl, now):
548 now = int(now)
554 now = int(now)
549 cs = stringio()
555 cs = stringio()
550 write = cs.write
556 write = cs.write
551 write(b"".join(pl))
557 write(b"".join(pl))
552 for f, e in pycompat.iteritems(dmap):
558 for f, e in pycompat.iteritems(dmap):
553 if e.need_delay(now):
559 if e.need_delay(now):
554 # The file was last modified "simultaneously" with the current
560 # The file was last modified "simultaneously" with the current
555 # write to dirstate (i.e. within the same second for file-
561 # write to dirstate (i.e. within the same second for file-
556 # systems with a granularity of 1 sec). This commonly happens
562 # systems with a granularity of 1 sec). This commonly happens
557 # for at least a couple of files on 'update'.
563 # for at least a couple of files on 'update'.
558 # The user could change the file without changing its size
564 # The user could change the file without changing its size
559 # within the same second. Invalidate the file's mtime in
565 # within the same second. Invalidate the file's mtime in
560 # dirstate, forcing future 'status' calls to compare the
566 # dirstate, forcing future 'status' calls to compare the
561 # contents of the file if the size is the same. This prevents
567 # contents of the file if the size is the same. This prevents
562 # mistakenly treating such files as clean.
568 # mistakenly treating such files as clean.
563 e = DirstateItem(e.state, e.mode, e.size, AMBIGUOUS_TIME)
569 e = DirstateItem(e.state, e.mode, e.size, AMBIGUOUS_TIME)
564 dmap[f] = e
570 dmap[f] = e
565
571
566 if f in copymap:
572 if f in copymap:
567 f = b"%s\0%s" % (f, copymap[f])
573 f = b"%s\0%s" % (f, copymap[f])
568 e = _pack(
574 e = _pack(
569 b">cllll",
575 b">cllll",
570 e.v1_state(),
576 e.v1_state(),
571 e.v1_mode(),
577 e.v1_mode(),
572 e.v1_size(),
578 e.v1_size(),
573 e.v1_mtime(),
579 e.v1_mtime(),
574 len(f),
580 len(f),
575 )
581 )
576 write(e)
582 write(e)
577 write(f)
583 write(f)
578 return cs.getvalue()
584 return cs.getvalue()
General Comments 0
You need to be logged in to leave comments. Login now