##// END OF EJS Templates
dirstate-item: use `set_possibly_dirty` in `pure.pack_dirstate`...
marmoute -
r48467:d06ced90 default
parent child Browse files
Show More
@@ -1,606 +1,605
1 # parsers.py - Python implementation of parsers.c
1 # parsers.py - Python implementation of parsers.c
2 #
2 #
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11 import zlib
11 import zlib
12
12
13 from ..node import (
13 from ..node import (
14 nullrev,
14 nullrev,
15 sha1nodeconstants,
15 sha1nodeconstants,
16 )
16 )
17 from ..thirdparty import attr
17 from ..thirdparty import attr
18 from .. import (
18 from .. import (
19 error,
19 error,
20 pycompat,
20 pycompat,
21 revlogutils,
21 revlogutils,
22 util,
22 util,
23 )
23 )
24
24
25 from ..revlogutils import nodemap as nodemaputil
25 from ..revlogutils import nodemap as nodemaputil
26 from ..revlogutils import constants as revlog_constants
26 from ..revlogutils import constants as revlog_constants
27
27
28 stringio = pycompat.bytesio
28 stringio = pycompat.bytesio
29
29
30
30
31 _pack = struct.pack
31 _pack = struct.pack
32 _unpack = struct.unpack
32 _unpack = struct.unpack
33 _compress = zlib.compress
33 _compress = zlib.compress
34 _decompress = zlib.decompress
34 _decompress = zlib.decompress
35
35
36
36
37 # a special value used internally for `size` if the file come from the other parent
37 # a special value used internally for `size` if the file come from the other parent
38 FROM_P2 = -2
38 FROM_P2 = -2
39
39
40 # a special value used internally for `size` if the file is modified/merged/added
40 # a special value used internally for `size` if the file is modified/merged/added
41 NONNORMAL = -1
41 NONNORMAL = -1
42
42
43 # a special value used internally for `time` if the time is ambigeous
43 # a special value used internally for `time` if the time is ambigeous
44 AMBIGUOUS_TIME = -1
44 AMBIGUOUS_TIME = -1
45
45
46
46
47 @attr.s(slots=True, init=False)
47 @attr.s(slots=True, init=False)
48 class DirstateItem(object):
48 class DirstateItem(object):
49 """represent a dirstate entry
49 """represent a dirstate entry
50
50
51 It contains:
51 It contains:
52
52
53 - state (one of 'n', 'a', 'r', 'm')
53 - state (one of 'n', 'a', 'r', 'm')
54 - mode,
54 - mode,
55 - size,
55 - size,
56 - mtime,
56 - mtime,
57 """
57 """
58
58
59 _state = attr.ib()
59 _state = attr.ib()
60 _mode = attr.ib()
60 _mode = attr.ib()
61 _size = attr.ib()
61 _size = attr.ib()
62 _mtime = attr.ib()
62 _mtime = attr.ib()
63
63
64 def __init__(self, state, mode, size, mtime):
64 def __init__(self, state, mode, size, mtime):
65 self._state = state
65 self._state = state
66 self._mode = mode
66 self._mode = mode
67 self._size = size
67 self._size = size
68 self._mtime = mtime
68 self._mtime = mtime
69
69
70 @classmethod
70 @classmethod
71 def from_v1_data(cls, state, mode, size, mtime):
71 def from_v1_data(cls, state, mode, size, mtime):
72 """Build a new DirstateItem object from V1 data
72 """Build a new DirstateItem object from V1 data
73
73
74 Since the dirstate-v1 format is frozen, the signature of this function
74 Since the dirstate-v1 format is frozen, the signature of this function
75 is not expected to change, unlike the __init__ one.
75 is not expected to change, unlike the __init__ one.
76 """
76 """
77 return cls(
77 return cls(
78 state=state,
78 state=state,
79 mode=mode,
79 mode=mode,
80 size=size,
80 size=size,
81 mtime=mtime,
81 mtime=mtime,
82 )
82 )
83
83
84 def set_possibly_dirty(self):
84 def set_possibly_dirty(self):
85 """Mark a file as "possibly dirty"
85 """Mark a file as "possibly dirty"
86
86
87 This means the next status call will have to actually check its content
87 This means the next status call will have to actually check its content
88 to make sure it is correct.
88 to make sure it is correct.
89 """
89 """
90 self._mtime = AMBIGUOUS_TIME
90 self._mtime = AMBIGUOUS_TIME
91
91
92 def __getitem__(self, idx):
92 def __getitem__(self, idx):
93 if idx == 0 or idx == -4:
93 if idx == 0 or idx == -4:
94 msg = b"do not use item[x], use item.state"
94 msg = b"do not use item[x], use item.state"
95 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
95 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
96 return self._state
96 return self._state
97 elif idx == 1 or idx == -3:
97 elif idx == 1 or idx == -3:
98 msg = b"do not use item[x], use item.mode"
98 msg = b"do not use item[x], use item.mode"
99 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
99 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
100 return self._mode
100 return self._mode
101 elif idx == 2 or idx == -2:
101 elif idx == 2 or idx == -2:
102 msg = b"do not use item[x], use item.size"
102 msg = b"do not use item[x], use item.size"
103 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
103 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
104 return self._size
104 return self._size
105 elif idx == 3 or idx == -1:
105 elif idx == 3 or idx == -1:
106 msg = b"do not use item[x], use item.mtime"
106 msg = b"do not use item[x], use item.mtime"
107 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
107 util.nouideprecwarn(msg, b'6.0', stacklevel=2)
108 return self._mtime
108 return self._mtime
109 else:
109 else:
110 raise IndexError(idx)
110 raise IndexError(idx)
111
111
112 @property
112 @property
113 def mode(self):
113 def mode(self):
114 return self._mode
114 return self._mode
115
115
116 @property
116 @property
117 def size(self):
117 def size(self):
118 return self._size
118 return self._size
119
119
120 @property
120 @property
121 def mtime(self):
121 def mtime(self):
122 return self._mtime
122 return self._mtime
123
123
124 @property
124 @property
125 def state(self):
125 def state(self):
126 """
126 """
127 States are:
127 States are:
128 n normal
128 n normal
129 m needs merging
129 m needs merging
130 r marked for removal
130 r marked for removal
131 a marked for addition
131 a marked for addition
132
132
133 XXX This "state" is a bit obscure and mostly a direct expression of the
133 XXX This "state" is a bit obscure and mostly a direct expression of the
134 dirstatev1 format. It would make sense to ultimately deprecate it in
134 dirstatev1 format. It would make sense to ultimately deprecate it in
135 favor of the more "semantic" attributes.
135 favor of the more "semantic" attributes.
136 """
136 """
137 return self._state
137 return self._state
138
138
139 @property
139 @property
140 def tracked(self):
140 def tracked(self):
141 """True is the file is tracked in the working copy"""
141 """True is the file is tracked in the working copy"""
142 return self._state in b"nma"
142 return self._state in b"nma"
143
143
144 @property
144 @property
145 def added(self):
145 def added(self):
146 """True if the file has been added"""
146 """True if the file has been added"""
147 return self._state == b'a'
147 return self._state == b'a'
148
148
149 @property
149 @property
150 def merged(self):
150 def merged(self):
151 """True if the file has been merged
151 """True if the file has been merged
152
152
153 Should only be set if a merge is in progress in the dirstate
153 Should only be set if a merge is in progress in the dirstate
154 """
154 """
155 return self._state == b'm'
155 return self._state == b'm'
156
156
157 @property
157 @property
158 def from_p2(self):
158 def from_p2(self):
159 """True if the file have been fetched from p2 during the current merge
159 """True if the file have been fetched from p2 during the current merge
160
160
161 This is only True is the file is currently tracked.
161 This is only True is the file is currently tracked.
162
162
163 Should only be set if a merge is in progress in the dirstate
163 Should only be set if a merge is in progress in the dirstate
164 """
164 """
165 return self._state == b'n' and self._size == FROM_P2
165 return self._state == b'n' and self._size == FROM_P2
166
166
167 @property
167 @property
168 def from_p2_removed(self):
168 def from_p2_removed(self):
169 """True if the file has been removed, but was "from_p2" initially
169 """True if the file has been removed, but was "from_p2" initially
170
170
171 This property seems like an abstraction leakage and should probably be
171 This property seems like an abstraction leakage and should probably be
172 dealt in this class (or maybe the dirstatemap) directly.
172 dealt in this class (or maybe the dirstatemap) directly.
173 """
173 """
174 return self._state == b'r' and self._size == FROM_P2
174 return self._state == b'r' and self._size == FROM_P2
175
175
176 @property
176 @property
177 def removed(self):
177 def removed(self):
178 """True if the file has been removed"""
178 """True if the file has been removed"""
179 return self._state == b'r'
179 return self._state == b'r'
180
180
181 @property
181 @property
182 def merged_removed(self):
182 def merged_removed(self):
183 """True if the file has been removed, but was "merged" initially
183 """True if the file has been removed, but was "merged" initially
184
184
185 This property seems like an abstraction leakage and should probably be
185 This property seems like an abstraction leakage and should probably be
186 dealt in this class (or maybe the dirstatemap) directly.
186 dealt in this class (or maybe the dirstatemap) directly.
187 """
187 """
188 return self._state == b'r' and self._size == NONNORMAL
188 return self._state == b'r' and self._size == NONNORMAL
189
189
190 def v1_state(self):
190 def v1_state(self):
191 """return a "state" suitable for v1 serialization"""
191 """return a "state" suitable for v1 serialization"""
192 return self._state
192 return self._state
193
193
194 def v1_mode(self):
194 def v1_mode(self):
195 """return a "mode" suitable for v1 serialization"""
195 """return a "mode" suitable for v1 serialization"""
196 return self._mode
196 return self._mode
197
197
198 def v1_size(self):
198 def v1_size(self):
199 """return a "size" suitable for v1 serialization"""
199 """return a "size" suitable for v1 serialization"""
200 return self._size
200 return self._size
201
201
202 def v1_mtime(self):
202 def v1_mtime(self):
203 """return a "mtime" suitable for v1 serialization"""
203 """return a "mtime" suitable for v1 serialization"""
204 return self._mtime
204 return self._mtime
205
205
206 def need_delay(self, now):
206 def need_delay(self, now):
207 """True if the stored mtime would be ambiguous with the current time"""
207 """True if the stored mtime would be ambiguous with the current time"""
208 return self._state == b'n' and self._mtime == now
208 return self._state == b'n' and self._mtime == now
209
209
210
210
211 def gettype(q):
211 def gettype(q):
212 return int(q & 0xFFFF)
212 return int(q & 0xFFFF)
213
213
214
214
215 class BaseIndexObject(object):
215 class BaseIndexObject(object):
216 # Can I be passed to an algorithme implemented in Rust ?
216 # Can I be passed to an algorithme implemented in Rust ?
217 rust_ext_compat = 0
217 rust_ext_compat = 0
218 # Format of an index entry according to Python's `struct` language
218 # Format of an index entry according to Python's `struct` language
219 index_format = revlog_constants.INDEX_ENTRY_V1
219 index_format = revlog_constants.INDEX_ENTRY_V1
220 # Size of a C unsigned long long int, platform independent
220 # Size of a C unsigned long long int, platform independent
221 big_int_size = struct.calcsize(b'>Q')
221 big_int_size = struct.calcsize(b'>Q')
222 # Size of a C long int, platform independent
222 # Size of a C long int, platform independent
223 int_size = struct.calcsize(b'>i')
223 int_size = struct.calcsize(b'>i')
224 # An empty index entry, used as a default value to be overridden, or nullrev
224 # An empty index entry, used as a default value to be overridden, or nullrev
225 null_item = (
225 null_item = (
226 0,
226 0,
227 0,
227 0,
228 0,
228 0,
229 -1,
229 -1,
230 -1,
230 -1,
231 -1,
231 -1,
232 -1,
232 -1,
233 sha1nodeconstants.nullid,
233 sha1nodeconstants.nullid,
234 0,
234 0,
235 0,
235 0,
236 revlog_constants.COMP_MODE_INLINE,
236 revlog_constants.COMP_MODE_INLINE,
237 revlog_constants.COMP_MODE_INLINE,
237 revlog_constants.COMP_MODE_INLINE,
238 )
238 )
239
239
240 @util.propertycache
240 @util.propertycache
241 def entry_size(self):
241 def entry_size(self):
242 return self.index_format.size
242 return self.index_format.size
243
243
244 @property
244 @property
245 def nodemap(self):
245 def nodemap(self):
246 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
246 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
247 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
247 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
248 return self._nodemap
248 return self._nodemap
249
249
250 @util.propertycache
250 @util.propertycache
251 def _nodemap(self):
251 def _nodemap(self):
252 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
252 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
253 for r in range(0, len(self)):
253 for r in range(0, len(self)):
254 n = self[r][7]
254 n = self[r][7]
255 nodemap[n] = r
255 nodemap[n] = r
256 return nodemap
256 return nodemap
257
257
258 def has_node(self, node):
258 def has_node(self, node):
259 """return True if the node exist in the index"""
259 """return True if the node exist in the index"""
260 return node in self._nodemap
260 return node in self._nodemap
261
261
262 def rev(self, node):
262 def rev(self, node):
263 """return a revision for a node
263 """return a revision for a node
264
264
265 If the node is unknown, raise a RevlogError"""
265 If the node is unknown, raise a RevlogError"""
266 return self._nodemap[node]
266 return self._nodemap[node]
267
267
268 def get_rev(self, node):
268 def get_rev(self, node):
269 """return a revision for a node
269 """return a revision for a node
270
270
271 If the node is unknown, return None"""
271 If the node is unknown, return None"""
272 return self._nodemap.get(node)
272 return self._nodemap.get(node)
273
273
274 def _stripnodes(self, start):
274 def _stripnodes(self, start):
275 if '_nodemap' in vars(self):
275 if '_nodemap' in vars(self):
276 for r in range(start, len(self)):
276 for r in range(start, len(self)):
277 n = self[r][7]
277 n = self[r][7]
278 del self._nodemap[n]
278 del self._nodemap[n]
279
279
280 def clearcaches(self):
280 def clearcaches(self):
281 self.__dict__.pop('_nodemap', None)
281 self.__dict__.pop('_nodemap', None)
282
282
283 def __len__(self):
283 def __len__(self):
284 return self._lgt + len(self._extra)
284 return self._lgt + len(self._extra)
285
285
286 def append(self, tup):
286 def append(self, tup):
287 if '_nodemap' in vars(self):
287 if '_nodemap' in vars(self):
288 self._nodemap[tup[7]] = len(self)
288 self._nodemap[tup[7]] = len(self)
289 data = self._pack_entry(len(self), tup)
289 data = self._pack_entry(len(self), tup)
290 self._extra.append(data)
290 self._extra.append(data)
291
291
292 def _pack_entry(self, rev, entry):
292 def _pack_entry(self, rev, entry):
293 assert entry[8] == 0
293 assert entry[8] == 0
294 assert entry[9] == 0
294 assert entry[9] == 0
295 return self.index_format.pack(*entry[:8])
295 return self.index_format.pack(*entry[:8])
296
296
297 def _check_index(self, i):
297 def _check_index(self, i):
298 if not isinstance(i, int):
298 if not isinstance(i, int):
299 raise TypeError(b"expecting int indexes")
299 raise TypeError(b"expecting int indexes")
300 if i < 0 or i >= len(self):
300 if i < 0 or i >= len(self):
301 raise IndexError
301 raise IndexError
302
302
303 def __getitem__(self, i):
303 def __getitem__(self, i):
304 if i == -1:
304 if i == -1:
305 return self.null_item
305 return self.null_item
306 self._check_index(i)
306 self._check_index(i)
307 if i >= self._lgt:
307 if i >= self._lgt:
308 data = self._extra[i - self._lgt]
308 data = self._extra[i - self._lgt]
309 else:
309 else:
310 index = self._calculate_index(i)
310 index = self._calculate_index(i)
311 data = self._data[index : index + self.entry_size]
311 data = self._data[index : index + self.entry_size]
312 r = self._unpack_entry(i, data)
312 r = self._unpack_entry(i, data)
313 if self._lgt and i == 0:
313 if self._lgt and i == 0:
314 offset = revlogutils.offset_type(0, gettype(r[0]))
314 offset = revlogutils.offset_type(0, gettype(r[0]))
315 r = (offset,) + r[1:]
315 r = (offset,) + r[1:]
316 return r
316 return r
317
317
318 def _unpack_entry(self, rev, data):
318 def _unpack_entry(self, rev, data):
319 r = self.index_format.unpack(data)
319 r = self.index_format.unpack(data)
320 r = r + (
320 r = r + (
321 0,
321 0,
322 0,
322 0,
323 revlog_constants.COMP_MODE_INLINE,
323 revlog_constants.COMP_MODE_INLINE,
324 revlog_constants.COMP_MODE_INLINE,
324 revlog_constants.COMP_MODE_INLINE,
325 )
325 )
326 return r
326 return r
327
327
328 def pack_header(self, header):
328 def pack_header(self, header):
329 """pack header information as binary"""
329 """pack header information as binary"""
330 v_fmt = revlog_constants.INDEX_HEADER
330 v_fmt = revlog_constants.INDEX_HEADER
331 return v_fmt.pack(header)
331 return v_fmt.pack(header)
332
332
333 def entry_binary(self, rev):
333 def entry_binary(self, rev):
334 """return the raw binary string representing a revision"""
334 """return the raw binary string representing a revision"""
335 entry = self[rev]
335 entry = self[rev]
336 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
336 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
337 if rev == 0:
337 if rev == 0:
338 p = p[revlog_constants.INDEX_HEADER.size :]
338 p = p[revlog_constants.INDEX_HEADER.size :]
339 return p
339 return p
340
340
341
341
342 class IndexObject(BaseIndexObject):
342 class IndexObject(BaseIndexObject):
343 def __init__(self, data):
343 def __init__(self, data):
344 assert len(data) % self.entry_size == 0, (
344 assert len(data) % self.entry_size == 0, (
345 len(data),
345 len(data),
346 self.entry_size,
346 self.entry_size,
347 len(data) % self.entry_size,
347 len(data) % self.entry_size,
348 )
348 )
349 self._data = data
349 self._data = data
350 self._lgt = len(data) // self.entry_size
350 self._lgt = len(data) // self.entry_size
351 self._extra = []
351 self._extra = []
352
352
353 def _calculate_index(self, i):
353 def _calculate_index(self, i):
354 return i * self.entry_size
354 return i * self.entry_size
355
355
356 def __delitem__(self, i):
356 def __delitem__(self, i):
357 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
357 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
358 raise ValueError(b"deleting slices only supports a:-1 with step 1")
358 raise ValueError(b"deleting slices only supports a:-1 with step 1")
359 i = i.start
359 i = i.start
360 self._check_index(i)
360 self._check_index(i)
361 self._stripnodes(i)
361 self._stripnodes(i)
362 if i < self._lgt:
362 if i < self._lgt:
363 self._data = self._data[: i * self.entry_size]
363 self._data = self._data[: i * self.entry_size]
364 self._lgt = i
364 self._lgt = i
365 self._extra = []
365 self._extra = []
366 else:
366 else:
367 self._extra = self._extra[: i - self._lgt]
367 self._extra = self._extra[: i - self._lgt]
368
368
369
369
370 class PersistentNodeMapIndexObject(IndexObject):
370 class PersistentNodeMapIndexObject(IndexObject):
371 """a Debug oriented class to test persistent nodemap
371 """a Debug oriented class to test persistent nodemap
372
372
373 We need a simple python object to test API and higher level behavior. See
373 We need a simple python object to test API and higher level behavior. See
374 the Rust implementation for more serious usage. This should be used only
374 the Rust implementation for more serious usage. This should be used only
375 through the dedicated `devel.persistent-nodemap` config.
375 through the dedicated `devel.persistent-nodemap` config.
376 """
376 """
377
377
378 def nodemap_data_all(self):
378 def nodemap_data_all(self):
379 """Return bytes containing a full serialization of a nodemap
379 """Return bytes containing a full serialization of a nodemap
380
380
381 The nodemap should be valid for the full set of revisions in the
381 The nodemap should be valid for the full set of revisions in the
382 index."""
382 index."""
383 return nodemaputil.persistent_data(self)
383 return nodemaputil.persistent_data(self)
384
384
385 def nodemap_data_incremental(self):
385 def nodemap_data_incremental(self):
386 """Return bytes containing a incremental update to persistent nodemap
386 """Return bytes containing a incremental update to persistent nodemap
387
387
388 This containst the data for an append-only update of the data provided
388 This containst the data for an append-only update of the data provided
389 in the last call to `update_nodemap_data`.
389 in the last call to `update_nodemap_data`.
390 """
390 """
391 if self._nm_root is None:
391 if self._nm_root is None:
392 return None
392 return None
393 docket = self._nm_docket
393 docket = self._nm_docket
394 changed, data = nodemaputil.update_persistent_data(
394 changed, data = nodemaputil.update_persistent_data(
395 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
395 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
396 )
396 )
397
397
398 self._nm_root = self._nm_max_idx = self._nm_docket = None
398 self._nm_root = self._nm_max_idx = self._nm_docket = None
399 return docket, changed, data
399 return docket, changed, data
400
400
401 def update_nodemap_data(self, docket, nm_data):
401 def update_nodemap_data(self, docket, nm_data):
402 """provide full block of persisted binary data for a nodemap
402 """provide full block of persisted binary data for a nodemap
403
403
404 The data are expected to come from disk. See `nodemap_data_all` for a
404 The data are expected to come from disk. See `nodemap_data_all` for a
405 produceur of such data."""
405 produceur of such data."""
406 if nm_data is not None:
406 if nm_data is not None:
407 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
407 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
408 if self._nm_root:
408 if self._nm_root:
409 self._nm_docket = docket
409 self._nm_docket = docket
410 else:
410 else:
411 self._nm_root = self._nm_max_idx = self._nm_docket = None
411 self._nm_root = self._nm_max_idx = self._nm_docket = None
412
412
413
413
414 class InlinedIndexObject(BaseIndexObject):
414 class InlinedIndexObject(BaseIndexObject):
415 def __init__(self, data, inline=0):
415 def __init__(self, data, inline=0):
416 self._data = data
416 self._data = data
417 self._lgt = self._inline_scan(None)
417 self._lgt = self._inline_scan(None)
418 self._inline_scan(self._lgt)
418 self._inline_scan(self._lgt)
419 self._extra = []
419 self._extra = []
420
420
421 def _inline_scan(self, lgt):
421 def _inline_scan(self, lgt):
422 off = 0
422 off = 0
423 if lgt is not None:
423 if lgt is not None:
424 self._offsets = [0] * lgt
424 self._offsets = [0] * lgt
425 count = 0
425 count = 0
426 while off <= len(self._data) - self.entry_size:
426 while off <= len(self._data) - self.entry_size:
427 start = off + self.big_int_size
427 start = off + self.big_int_size
428 (s,) = struct.unpack(
428 (s,) = struct.unpack(
429 b'>i',
429 b'>i',
430 self._data[start : start + self.int_size],
430 self._data[start : start + self.int_size],
431 )
431 )
432 if lgt is not None:
432 if lgt is not None:
433 self._offsets[count] = off
433 self._offsets[count] = off
434 count += 1
434 count += 1
435 off += self.entry_size + s
435 off += self.entry_size + s
436 if off != len(self._data):
436 if off != len(self._data):
437 raise ValueError(b"corrupted data")
437 raise ValueError(b"corrupted data")
438 return count
438 return count
439
439
440 def __delitem__(self, i):
440 def __delitem__(self, i):
441 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
441 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
442 raise ValueError(b"deleting slices only supports a:-1 with step 1")
442 raise ValueError(b"deleting slices only supports a:-1 with step 1")
443 i = i.start
443 i = i.start
444 self._check_index(i)
444 self._check_index(i)
445 self._stripnodes(i)
445 self._stripnodes(i)
446 if i < self._lgt:
446 if i < self._lgt:
447 self._offsets = self._offsets[:i]
447 self._offsets = self._offsets[:i]
448 self._lgt = i
448 self._lgt = i
449 self._extra = []
449 self._extra = []
450 else:
450 else:
451 self._extra = self._extra[: i - self._lgt]
451 self._extra = self._extra[: i - self._lgt]
452
452
453 def _calculate_index(self, i):
453 def _calculate_index(self, i):
454 return self._offsets[i]
454 return self._offsets[i]
455
455
456
456
457 def parse_index2(data, inline, revlogv2=False):
457 def parse_index2(data, inline, revlogv2=False):
458 if not inline:
458 if not inline:
459 cls = IndexObject2 if revlogv2 else IndexObject
459 cls = IndexObject2 if revlogv2 else IndexObject
460 return cls(data), None
460 return cls(data), None
461 cls = InlinedIndexObject
461 cls = InlinedIndexObject
462 return cls(data, inline), (0, data)
462 return cls(data, inline), (0, data)
463
463
464
464
465 def parse_index_cl_v2(data):
465 def parse_index_cl_v2(data):
466 return IndexChangelogV2(data), None
466 return IndexChangelogV2(data), None
467
467
468
468
469 class IndexObject2(IndexObject):
469 class IndexObject2(IndexObject):
470 index_format = revlog_constants.INDEX_ENTRY_V2
470 index_format = revlog_constants.INDEX_ENTRY_V2
471
471
472 def replace_sidedata_info(
472 def replace_sidedata_info(
473 self,
473 self,
474 rev,
474 rev,
475 sidedata_offset,
475 sidedata_offset,
476 sidedata_length,
476 sidedata_length,
477 offset_flags,
477 offset_flags,
478 compression_mode,
478 compression_mode,
479 ):
479 ):
480 """
480 """
481 Replace an existing index entry's sidedata offset and length with new
481 Replace an existing index entry's sidedata offset and length with new
482 ones.
482 ones.
483 This cannot be used outside of the context of sidedata rewriting,
483 This cannot be used outside of the context of sidedata rewriting,
484 inside the transaction that creates the revision `rev`.
484 inside the transaction that creates the revision `rev`.
485 """
485 """
486 if rev < 0:
486 if rev < 0:
487 raise KeyError
487 raise KeyError
488 self._check_index(rev)
488 self._check_index(rev)
489 if rev < self._lgt:
489 if rev < self._lgt:
490 msg = b"cannot rewrite entries outside of this transaction"
490 msg = b"cannot rewrite entries outside of this transaction"
491 raise KeyError(msg)
491 raise KeyError(msg)
492 else:
492 else:
493 entry = list(self[rev])
493 entry = list(self[rev])
494 entry[0] = offset_flags
494 entry[0] = offset_flags
495 entry[8] = sidedata_offset
495 entry[8] = sidedata_offset
496 entry[9] = sidedata_length
496 entry[9] = sidedata_length
497 entry[11] = compression_mode
497 entry[11] = compression_mode
498 entry = tuple(entry)
498 entry = tuple(entry)
499 new = self._pack_entry(rev, entry)
499 new = self._pack_entry(rev, entry)
500 self._extra[rev - self._lgt] = new
500 self._extra[rev - self._lgt] = new
501
501
502 def _unpack_entry(self, rev, data):
502 def _unpack_entry(self, rev, data):
503 data = self.index_format.unpack(data)
503 data = self.index_format.unpack(data)
504 entry = data[:10]
504 entry = data[:10]
505 data_comp = data[10] & 3
505 data_comp = data[10] & 3
506 sidedata_comp = (data[10] & (3 << 2)) >> 2
506 sidedata_comp = (data[10] & (3 << 2)) >> 2
507 return entry + (data_comp, sidedata_comp)
507 return entry + (data_comp, sidedata_comp)
508
508
509 def _pack_entry(self, rev, entry):
509 def _pack_entry(self, rev, entry):
510 data = entry[:10]
510 data = entry[:10]
511 data_comp = entry[10] & 3
511 data_comp = entry[10] & 3
512 sidedata_comp = (entry[11] & 3) << 2
512 sidedata_comp = (entry[11] & 3) << 2
513 data += (data_comp | sidedata_comp,)
513 data += (data_comp | sidedata_comp,)
514
514
515 return self.index_format.pack(*data)
515 return self.index_format.pack(*data)
516
516
517 def entry_binary(self, rev):
517 def entry_binary(self, rev):
518 """return the raw binary string representing a revision"""
518 """return the raw binary string representing a revision"""
519 entry = self[rev]
519 entry = self[rev]
520 return self._pack_entry(rev, entry)
520 return self._pack_entry(rev, entry)
521
521
522 def pack_header(self, header):
522 def pack_header(self, header):
523 """pack header information as binary"""
523 """pack header information as binary"""
524 msg = 'version header should go in the docket, not the index: %d'
524 msg = 'version header should go in the docket, not the index: %d'
525 msg %= header
525 msg %= header
526 raise error.ProgrammingError(msg)
526 raise error.ProgrammingError(msg)
527
527
528
528
529 class IndexChangelogV2(IndexObject2):
529 class IndexChangelogV2(IndexObject2):
530 index_format = revlog_constants.INDEX_ENTRY_CL_V2
530 index_format = revlog_constants.INDEX_ENTRY_CL_V2
531
531
532 def _unpack_entry(self, rev, data, r=True):
532 def _unpack_entry(self, rev, data, r=True):
533 items = self.index_format.unpack(data)
533 items = self.index_format.unpack(data)
534 entry = items[:3] + (rev, rev) + items[3:8]
534 entry = items[:3] + (rev, rev) + items[3:8]
535 data_comp = items[8] & 3
535 data_comp = items[8] & 3
536 sidedata_comp = (items[8] >> 2) & 3
536 sidedata_comp = (items[8] >> 2) & 3
537 return entry + (data_comp, sidedata_comp)
537 return entry + (data_comp, sidedata_comp)
538
538
539 def _pack_entry(self, rev, entry):
539 def _pack_entry(self, rev, entry):
540 assert entry[3] == rev, entry[3]
540 assert entry[3] == rev, entry[3]
541 assert entry[4] == rev, entry[4]
541 assert entry[4] == rev, entry[4]
542 data = entry[:3] + entry[5:10]
542 data = entry[:3] + entry[5:10]
543 data_comp = entry[10] & 3
543 data_comp = entry[10] & 3
544 sidedata_comp = (entry[11] & 3) << 2
544 sidedata_comp = (entry[11] & 3) << 2
545 data += (data_comp | sidedata_comp,)
545 data += (data_comp | sidedata_comp,)
546 return self.index_format.pack(*data)
546 return self.index_format.pack(*data)
547
547
548
548
549 def parse_index_devel_nodemap(data, inline):
549 def parse_index_devel_nodemap(data, inline):
550 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
550 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
551 return PersistentNodeMapIndexObject(data), None
551 return PersistentNodeMapIndexObject(data), None
552
552
553
553
554 def parse_dirstate(dmap, copymap, st):
554 def parse_dirstate(dmap, copymap, st):
555 parents = [st[:20], st[20:40]]
555 parents = [st[:20], st[20:40]]
556 # dereference fields so they will be local in loop
556 # dereference fields so they will be local in loop
557 format = b">cllll"
557 format = b">cllll"
558 e_size = struct.calcsize(format)
558 e_size = struct.calcsize(format)
559 pos1 = 40
559 pos1 = 40
560 l = len(st)
560 l = len(st)
561
561
562 # the inner loop
562 # the inner loop
563 while pos1 < l:
563 while pos1 < l:
564 pos2 = pos1 + e_size
564 pos2 = pos1 + e_size
565 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
565 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
566 pos1 = pos2 + e[4]
566 pos1 = pos2 + e[4]
567 f = st[pos2:pos1]
567 f = st[pos2:pos1]
568 if b'\0' in f:
568 if b'\0' in f:
569 f, c = f.split(b'\0')
569 f, c = f.split(b'\0')
570 copymap[f] = c
570 copymap[f] = c
571 dmap[f] = DirstateItem.from_v1_data(*e[:4])
571 dmap[f] = DirstateItem.from_v1_data(*e[:4])
572 return parents
572 return parents
573
573
574
574
575 def pack_dirstate(dmap, copymap, pl, now):
575 def pack_dirstate(dmap, copymap, pl, now):
576 now = int(now)
576 now = int(now)
577 cs = stringio()
577 cs = stringio()
578 write = cs.write
578 write = cs.write
579 write(b"".join(pl))
579 write(b"".join(pl))
580 for f, e in pycompat.iteritems(dmap):
580 for f, e in pycompat.iteritems(dmap):
581 if e.need_delay(now):
581 if e.need_delay(now):
582 # The file was last modified "simultaneously" with the current
582 # The file was last modified "simultaneously" with the current
583 # write to dirstate (i.e. within the same second for file-
583 # write to dirstate (i.e. within the same second for file-
584 # systems with a granularity of 1 sec). This commonly happens
584 # systems with a granularity of 1 sec). This commonly happens
585 # for at least a couple of files on 'update'.
585 # for at least a couple of files on 'update'.
586 # The user could change the file without changing its size
586 # The user could change the file without changing its size
587 # within the same second. Invalidate the file's mtime in
587 # within the same second. Invalidate the file's mtime in
588 # dirstate, forcing future 'status' calls to compare the
588 # dirstate, forcing future 'status' calls to compare the
589 # contents of the file if the size is the same. This prevents
589 # contents of the file if the size is the same. This prevents
590 # mistakenly treating such files as clean.
590 # mistakenly treating such files as clean.
591 e = DirstateItem(e.state, e.mode, e.size, AMBIGUOUS_TIME)
591 e.set_possibly_dirty()
592 dmap[f] = e
593
592
594 if f in copymap:
593 if f in copymap:
595 f = b"%s\0%s" % (f, copymap[f])
594 f = b"%s\0%s" % (f, copymap[f])
596 e = _pack(
595 e = _pack(
597 b">cllll",
596 b">cllll",
598 e.v1_state(),
597 e.v1_state(),
599 e.v1_mode(),
598 e.v1_mode(),
600 e.v1_size(),
599 e.v1_size(),
601 e.v1_mtime(),
600 e.v1_mtime(),
602 len(f),
601 len(f),
603 )
602 )
604 write(e)
603 write(e)
605 write(f)
604 write(f)
606 return cs.getvalue()
605 return cs.getvalue()
General Comments 0
You need to be logged in to leave comments. Login now