##// END OF EJS Templates
dirstate-item: use need_delay when packing dirstate...
marmoute -
r48329:1a630242 default
parent child Browse files
Show More
@@ -1,568 +1,571 b''
1 # parsers.py - Python implementation of parsers.c
1 # parsers.py - Python implementation of parsers.c
2 #
2 #
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11 import zlib
11 import zlib
12
12
13 from ..node import (
13 from ..node import (
14 nullrev,
14 nullrev,
15 sha1nodeconstants,
15 sha1nodeconstants,
16 )
16 )
17 from .. import (
17 from .. import (
18 error,
18 error,
19 pycompat,
19 pycompat,
20 revlogutils,
20 revlogutils,
21 util,
21 util,
22 )
22 )
23
23
24 from ..revlogutils import nodemap as nodemaputil
24 from ..revlogutils import nodemap as nodemaputil
25 from ..revlogutils import constants as revlog_constants
25 from ..revlogutils import constants as revlog_constants
26
26
27 stringio = pycompat.bytesio
27 stringio = pycompat.bytesio
28
28
29
29
30 _pack = struct.pack
30 _pack = struct.pack
31 _unpack = struct.unpack
31 _unpack = struct.unpack
32 _compress = zlib.compress
32 _compress = zlib.compress
33 _decompress = zlib.decompress
33 _decompress = zlib.decompress
34
34
35
35
36 # a special value used internally for `size` if the file come from the other parent
36 # a special value used internally for `size` if the file come from the other parent
37 FROM_P2 = -2
37 FROM_P2 = -2
38
38
39 # a special value used internally for `size` if the file is modified/merged/added
39 # a special value used internally for `size` if the file is modified/merged/added
40 NONNORMAL = -1
40 NONNORMAL = -1
41
41
42 # a special value used internally for `time` if the time is ambigeous
43 AMBIGUOUS_TIME = -1
44
42
45
43 class DirstateItem(object):
46 class DirstateItem(object):
44 """represent a dirstate entry
47 """represent a dirstate entry
45
48
46 It contains:
49 It contains:
47
50
48 - state (one of 'n', 'a', 'r', 'm')
51 - state (one of 'n', 'a', 'r', 'm')
49 - mode,
52 - mode,
50 - size,
53 - size,
51 - mtime,
54 - mtime,
52 """
55 """
53
56
54 __slot__ = ('_state', '_mode', '_size', '_mtime')
57 __slot__ = ('_state', '_mode', '_size', '_mtime')
55
58
56 def __init__(self, state, mode, size, mtime):
59 def __init__(self, state, mode, size, mtime):
57 self._state = state
60 self._state = state
58 self._mode = mode
61 self._mode = mode
59 self._size = size
62 self._size = size
60 self._mtime = mtime
63 self._mtime = mtime
61
64
62 def __getitem__(self, idx):
65 def __getitem__(self, idx):
63 if idx == 0 or idx == -4:
66 if idx == 0 or idx == -4:
64 return self._state
67 return self._state
65 elif idx == 1 or idx == -3:
68 elif idx == 1 or idx == -3:
66 return self._mode
69 return self._mode
67 elif idx == 2 or idx == -2:
70 elif idx == 2 or idx == -2:
68 return self._size
71 return self._size
69 elif idx == 3 or idx == -1:
72 elif idx == 3 or idx == -1:
70 return self._mtime
73 return self._mtime
71 else:
74 else:
72 raise IndexError(idx)
75 raise IndexError(idx)
73
76
74 @property
77 @property
75 def mode(self):
78 def mode(self):
76 return self._mode
79 return self._mode
77
80
78 @property
81 @property
79 def size(self):
82 def size(self):
80 return self._size
83 return self._size
81
84
82 @property
85 @property
83 def mtime(self):
86 def mtime(self):
84 return self._mtime
87 return self._mtime
85
88
86 @property
89 @property
87 def state(self):
90 def state(self):
88 """
91 """
89 States are:
92 States are:
90 n normal
93 n normal
91 m needs merging
94 m needs merging
92 r marked for removal
95 r marked for removal
93 a marked for addition
96 a marked for addition
94
97
95 XXX This "state" is a bit obscure and mostly a direct expression of the
98 XXX This "state" is a bit obscure and mostly a direct expression of the
96 dirstatev1 format. It would make sense to ultimately deprecate it in
99 dirstatev1 format. It would make sense to ultimately deprecate it in
97 favor of the more "semantic" attributes.
100 favor of the more "semantic" attributes.
98 """
101 """
99 return self._state
102 return self._state
100
103
101 @property
104 @property
102 def tracked(self):
105 def tracked(self):
103 """True is the file is tracked in the working copy"""
106 """True is the file is tracked in the working copy"""
104 return self._state in b"nma"
107 return self._state in b"nma"
105
108
106 @property
109 @property
107 def added(self):
110 def added(self):
108 """True if the file has been added"""
111 """True if the file has been added"""
109 return self._state == b'a'
112 return self._state == b'a'
110
113
111 @property
114 @property
112 def merged(self):
115 def merged(self):
113 """True if the file has been merged
116 """True if the file has been merged
114
117
115 Should only be set if a merge is in progress in the dirstate
118 Should only be set if a merge is in progress in the dirstate
116 """
119 """
117 return self._state == b'm'
120 return self._state == b'm'
118
121
119 @property
122 @property
120 def from_p2(self):
123 def from_p2(self):
121 """True if the file have been fetched from p2 during the current merge
124 """True if the file have been fetched from p2 during the current merge
122
125
123 This is only True is the file is currently tracked.
126 This is only True is the file is currently tracked.
124
127
125 Should only be set if a merge is in progress in the dirstate
128 Should only be set if a merge is in progress in the dirstate
126 """
129 """
127 return self._state == b'n' and self._size == FROM_P2
130 return self._state == b'n' and self._size == FROM_P2
128
131
129 @property
132 @property
130 def from_p2_removed(self):
133 def from_p2_removed(self):
131 """True if the file has been removed, but was "from_p2" initially
134 """True if the file has been removed, but was "from_p2" initially
132
135
133 This property seems like an abstraction leakage and should probably be
136 This property seems like an abstraction leakage and should probably be
134 dealt in this class (or maybe the dirstatemap) directly.
137 dealt in this class (or maybe the dirstatemap) directly.
135 """
138 """
136 return self._state == b'r' and self._size == FROM_P2
139 return self._state == b'r' and self._size == FROM_P2
137
140
138 @property
141 @property
139 def removed(self):
142 def removed(self):
140 """True if the file has been removed"""
143 """True if the file has been removed"""
141 return self._state == b'r'
144 return self._state == b'r'
142
145
143 @property
146 @property
144 def merged_removed(self):
147 def merged_removed(self):
145 """True if the file has been removed, but was "merged" initially
148 """True if the file has been removed, but was "merged" initially
146
149
147 This property seems like an abstraction leakage and should probably be
150 This property seems like an abstraction leakage and should probably be
148 dealt in this class (or maybe the dirstatemap) directly.
151 dealt in this class (or maybe the dirstatemap) directly.
149 """
152 """
150 return self._state == b'r' and self._size == NONNORMAL
153 return self._state == b'r' and self._size == NONNORMAL
151
154
152 def v1_state(self):
155 def v1_state(self):
153 """return a "state" suitable for v1 serialization"""
156 """return a "state" suitable for v1 serialization"""
154 return self._state
157 return self._state
155
158
156 def v1_mode(self):
159 def v1_mode(self):
157 """return a "mode" suitable for v1 serialization"""
160 """return a "mode" suitable for v1 serialization"""
158 return self._mode
161 return self._mode
159
162
160 def v1_size(self):
163 def v1_size(self):
161 """return a "size" suitable for v1 serialization"""
164 """return a "size" suitable for v1 serialization"""
162 return self._size
165 return self._size
163
166
164 def v1_mtime(self):
167 def v1_mtime(self):
165 """return a "mtime" suitable for v1 serialization"""
168 """return a "mtime" suitable for v1 serialization"""
166 return self._mtime
169 return self._mtime
167
170
168 def need_delay(self, now):
171 def need_delay(self, now):
169 """True if the stored mtime would be ambiguous with the current time"""
172 """True if the stored mtime would be ambiguous with the current time"""
170 return self._state == b'n' and self._mtime == now
173 return self._state == b'n' and self._mtime == now
171
174
172
175
173 def gettype(q):
176 def gettype(q):
174 return int(q & 0xFFFF)
177 return int(q & 0xFFFF)
175
178
176
179
177 class BaseIndexObject(object):
180 class BaseIndexObject(object):
178 # Can I be passed to an algorithme implemented in Rust ?
181 # Can I be passed to an algorithme implemented in Rust ?
179 rust_ext_compat = 0
182 rust_ext_compat = 0
180 # Format of an index entry according to Python's `struct` language
183 # Format of an index entry according to Python's `struct` language
181 index_format = revlog_constants.INDEX_ENTRY_V1
184 index_format = revlog_constants.INDEX_ENTRY_V1
182 # Size of a C unsigned long long int, platform independent
185 # Size of a C unsigned long long int, platform independent
183 big_int_size = struct.calcsize(b'>Q')
186 big_int_size = struct.calcsize(b'>Q')
184 # Size of a C long int, platform independent
187 # Size of a C long int, platform independent
185 int_size = struct.calcsize(b'>i')
188 int_size = struct.calcsize(b'>i')
186 # An empty index entry, used as a default value to be overridden, or nullrev
189 # An empty index entry, used as a default value to be overridden, or nullrev
187 null_item = (
190 null_item = (
188 0,
191 0,
189 0,
192 0,
190 0,
193 0,
191 -1,
194 -1,
192 -1,
195 -1,
193 -1,
196 -1,
194 -1,
197 -1,
195 sha1nodeconstants.nullid,
198 sha1nodeconstants.nullid,
196 0,
199 0,
197 0,
200 0,
198 revlog_constants.COMP_MODE_INLINE,
201 revlog_constants.COMP_MODE_INLINE,
199 revlog_constants.COMP_MODE_INLINE,
202 revlog_constants.COMP_MODE_INLINE,
200 )
203 )
201
204
202 @util.propertycache
205 @util.propertycache
203 def entry_size(self):
206 def entry_size(self):
204 return self.index_format.size
207 return self.index_format.size
205
208
206 @property
209 @property
207 def nodemap(self):
210 def nodemap(self):
208 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
211 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
209 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
212 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
210 return self._nodemap
213 return self._nodemap
211
214
212 @util.propertycache
215 @util.propertycache
213 def _nodemap(self):
216 def _nodemap(self):
214 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
217 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
215 for r in range(0, len(self)):
218 for r in range(0, len(self)):
216 n = self[r][7]
219 n = self[r][7]
217 nodemap[n] = r
220 nodemap[n] = r
218 return nodemap
221 return nodemap
219
222
220 def has_node(self, node):
223 def has_node(self, node):
221 """return True if the node exist in the index"""
224 """return True if the node exist in the index"""
222 return node in self._nodemap
225 return node in self._nodemap
223
226
224 def rev(self, node):
227 def rev(self, node):
225 """return a revision for a node
228 """return a revision for a node
226
229
227 If the node is unknown, raise a RevlogError"""
230 If the node is unknown, raise a RevlogError"""
228 return self._nodemap[node]
231 return self._nodemap[node]
229
232
230 def get_rev(self, node):
233 def get_rev(self, node):
231 """return a revision for a node
234 """return a revision for a node
232
235
233 If the node is unknown, return None"""
236 If the node is unknown, return None"""
234 return self._nodemap.get(node)
237 return self._nodemap.get(node)
235
238
236 def _stripnodes(self, start):
239 def _stripnodes(self, start):
237 if '_nodemap' in vars(self):
240 if '_nodemap' in vars(self):
238 for r in range(start, len(self)):
241 for r in range(start, len(self)):
239 n = self[r][7]
242 n = self[r][7]
240 del self._nodemap[n]
243 del self._nodemap[n]
241
244
242 def clearcaches(self):
245 def clearcaches(self):
243 self.__dict__.pop('_nodemap', None)
246 self.__dict__.pop('_nodemap', None)
244
247
245 def __len__(self):
248 def __len__(self):
246 return self._lgt + len(self._extra)
249 return self._lgt + len(self._extra)
247
250
248 def append(self, tup):
251 def append(self, tup):
249 if '_nodemap' in vars(self):
252 if '_nodemap' in vars(self):
250 self._nodemap[tup[7]] = len(self)
253 self._nodemap[tup[7]] = len(self)
251 data = self._pack_entry(len(self), tup)
254 data = self._pack_entry(len(self), tup)
252 self._extra.append(data)
255 self._extra.append(data)
253
256
254 def _pack_entry(self, rev, entry):
257 def _pack_entry(self, rev, entry):
255 assert entry[8] == 0
258 assert entry[8] == 0
256 assert entry[9] == 0
259 assert entry[9] == 0
257 return self.index_format.pack(*entry[:8])
260 return self.index_format.pack(*entry[:8])
258
261
259 def _check_index(self, i):
262 def _check_index(self, i):
260 if not isinstance(i, int):
263 if not isinstance(i, int):
261 raise TypeError(b"expecting int indexes")
264 raise TypeError(b"expecting int indexes")
262 if i < 0 or i >= len(self):
265 if i < 0 or i >= len(self):
263 raise IndexError
266 raise IndexError
264
267
265 def __getitem__(self, i):
268 def __getitem__(self, i):
266 if i == -1:
269 if i == -1:
267 return self.null_item
270 return self.null_item
268 self._check_index(i)
271 self._check_index(i)
269 if i >= self._lgt:
272 if i >= self._lgt:
270 data = self._extra[i - self._lgt]
273 data = self._extra[i - self._lgt]
271 else:
274 else:
272 index = self._calculate_index(i)
275 index = self._calculate_index(i)
273 data = self._data[index : index + self.entry_size]
276 data = self._data[index : index + self.entry_size]
274 r = self._unpack_entry(i, data)
277 r = self._unpack_entry(i, data)
275 if self._lgt and i == 0:
278 if self._lgt and i == 0:
276 offset = revlogutils.offset_type(0, gettype(r[0]))
279 offset = revlogutils.offset_type(0, gettype(r[0]))
277 r = (offset,) + r[1:]
280 r = (offset,) + r[1:]
278 return r
281 return r
279
282
280 def _unpack_entry(self, rev, data):
283 def _unpack_entry(self, rev, data):
281 r = self.index_format.unpack(data)
284 r = self.index_format.unpack(data)
282 r = r + (
285 r = r + (
283 0,
286 0,
284 0,
287 0,
285 revlog_constants.COMP_MODE_INLINE,
288 revlog_constants.COMP_MODE_INLINE,
286 revlog_constants.COMP_MODE_INLINE,
289 revlog_constants.COMP_MODE_INLINE,
287 )
290 )
288 return r
291 return r
289
292
290 def pack_header(self, header):
293 def pack_header(self, header):
291 """pack header information as binary"""
294 """pack header information as binary"""
292 v_fmt = revlog_constants.INDEX_HEADER
295 v_fmt = revlog_constants.INDEX_HEADER
293 return v_fmt.pack(header)
296 return v_fmt.pack(header)
294
297
295 def entry_binary(self, rev):
298 def entry_binary(self, rev):
296 """return the raw binary string representing a revision"""
299 """return the raw binary string representing a revision"""
297 entry = self[rev]
300 entry = self[rev]
298 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
301 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
299 if rev == 0:
302 if rev == 0:
300 p = p[revlog_constants.INDEX_HEADER.size :]
303 p = p[revlog_constants.INDEX_HEADER.size :]
301 return p
304 return p
302
305
303
306
304 class IndexObject(BaseIndexObject):
307 class IndexObject(BaseIndexObject):
305 def __init__(self, data):
308 def __init__(self, data):
306 assert len(data) % self.entry_size == 0, (
309 assert len(data) % self.entry_size == 0, (
307 len(data),
310 len(data),
308 self.entry_size,
311 self.entry_size,
309 len(data) % self.entry_size,
312 len(data) % self.entry_size,
310 )
313 )
311 self._data = data
314 self._data = data
312 self._lgt = len(data) // self.entry_size
315 self._lgt = len(data) // self.entry_size
313 self._extra = []
316 self._extra = []
314
317
315 def _calculate_index(self, i):
318 def _calculate_index(self, i):
316 return i * self.entry_size
319 return i * self.entry_size
317
320
318 def __delitem__(self, i):
321 def __delitem__(self, i):
319 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
322 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
320 raise ValueError(b"deleting slices only supports a:-1 with step 1")
323 raise ValueError(b"deleting slices only supports a:-1 with step 1")
321 i = i.start
324 i = i.start
322 self._check_index(i)
325 self._check_index(i)
323 self._stripnodes(i)
326 self._stripnodes(i)
324 if i < self._lgt:
327 if i < self._lgt:
325 self._data = self._data[: i * self.entry_size]
328 self._data = self._data[: i * self.entry_size]
326 self._lgt = i
329 self._lgt = i
327 self._extra = []
330 self._extra = []
328 else:
331 else:
329 self._extra = self._extra[: i - self._lgt]
332 self._extra = self._extra[: i - self._lgt]
330
333
331
334
332 class PersistentNodeMapIndexObject(IndexObject):
335 class PersistentNodeMapIndexObject(IndexObject):
333 """a Debug oriented class to test persistent nodemap
336 """a Debug oriented class to test persistent nodemap
334
337
335 We need a simple python object to test API and higher level behavior. See
338 We need a simple python object to test API and higher level behavior. See
336 the Rust implementation for more serious usage. This should be used only
339 the Rust implementation for more serious usage. This should be used only
337 through the dedicated `devel.persistent-nodemap` config.
340 through the dedicated `devel.persistent-nodemap` config.
338 """
341 """
339
342
340 def nodemap_data_all(self):
343 def nodemap_data_all(self):
341 """Return bytes containing a full serialization of a nodemap
344 """Return bytes containing a full serialization of a nodemap
342
345
343 The nodemap should be valid for the full set of revisions in the
346 The nodemap should be valid for the full set of revisions in the
344 index."""
347 index."""
345 return nodemaputil.persistent_data(self)
348 return nodemaputil.persistent_data(self)
346
349
347 def nodemap_data_incremental(self):
350 def nodemap_data_incremental(self):
348 """Return bytes containing a incremental update to persistent nodemap
351 """Return bytes containing a incremental update to persistent nodemap
349
352
350 This containst the data for an append-only update of the data provided
353 This containst the data for an append-only update of the data provided
351 in the last call to `update_nodemap_data`.
354 in the last call to `update_nodemap_data`.
352 """
355 """
353 if self._nm_root is None:
356 if self._nm_root is None:
354 return None
357 return None
355 docket = self._nm_docket
358 docket = self._nm_docket
356 changed, data = nodemaputil.update_persistent_data(
359 changed, data = nodemaputil.update_persistent_data(
357 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
360 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
358 )
361 )
359
362
360 self._nm_root = self._nm_max_idx = self._nm_docket = None
363 self._nm_root = self._nm_max_idx = self._nm_docket = None
361 return docket, changed, data
364 return docket, changed, data
362
365
363 def update_nodemap_data(self, docket, nm_data):
366 def update_nodemap_data(self, docket, nm_data):
364 """provide full block of persisted binary data for a nodemap
367 """provide full block of persisted binary data for a nodemap
365
368
366 The data are expected to come from disk. See `nodemap_data_all` for a
369 The data are expected to come from disk. See `nodemap_data_all` for a
367 produceur of such data."""
370 produceur of such data."""
368 if nm_data is not None:
371 if nm_data is not None:
369 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
372 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
370 if self._nm_root:
373 if self._nm_root:
371 self._nm_docket = docket
374 self._nm_docket = docket
372 else:
375 else:
373 self._nm_root = self._nm_max_idx = self._nm_docket = None
376 self._nm_root = self._nm_max_idx = self._nm_docket = None
374
377
375
378
376 class InlinedIndexObject(BaseIndexObject):
379 class InlinedIndexObject(BaseIndexObject):
377 def __init__(self, data, inline=0):
380 def __init__(self, data, inline=0):
378 self._data = data
381 self._data = data
379 self._lgt = self._inline_scan(None)
382 self._lgt = self._inline_scan(None)
380 self._inline_scan(self._lgt)
383 self._inline_scan(self._lgt)
381 self._extra = []
384 self._extra = []
382
385
383 def _inline_scan(self, lgt):
386 def _inline_scan(self, lgt):
384 off = 0
387 off = 0
385 if lgt is not None:
388 if lgt is not None:
386 self._offsets = [0] * lgt
389 self._offsets = [0] * lgt
387 count = 0
390 count = 0
388 while off <= len(self._data) - self.entry_size:
391 while off <= len(self._data) - self.entry_size:
389 start = off + self.big_int_size
392 start = off + self.big_int_size
390 (s,) = struct.unpack(
393 (s,) = struct.unpack(
391 b'>i',
394 b'>i',
392 self._data[start : start + self.int_size],
395 self._data[start : start + self.int_size],
393 )
396 )
394 if lgt is not None:
397 if lgt is not None:
395 self._offsets[count] = off
398 self._offsets[count] = off
396 count += 1
399 count += 1
397 off += self.entry_size + s
400 off += self.entry_size + s
398 if off != len(self._data):
401 if off != len(self._data):
399 raise ValueError(b"corrupted data")
402 raise ValueError(b"corrupted data")
400 return count
403 return count
401
404
402 def __delitem__(self, i):
405 def __delitem__(self, i):
403 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
406 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
404 raise ValueError(b"deleting slices only supports a:-1 with step 1")
407 raise ValueError(b"deleting slices only supports a:-1 with step 1")
405 i = i.start
408 i = i.start
406 self._check_index(i)
409 self._check_index(i)
407 self._stripnodes(i)
410 self._stripnodes(i)
408 if i < self._lgt:
411 if i < self._lgt:
409 self._offsets = self._offsets[:i]
412 self._offsets = self._offsets[:i]
410 self._lgt = i
413 self._lgt = i
411 self._extra = []
414 self._extra = []
412 else:
415 else:
413 self._extra = self._extra[: i - self._lgt]
416 self._extra = self._extra[: i - self._lgt]
414
417
415 def _calculate_index(self, i):
418 def _calculate_index(self, i):
416 return self._offsets[i]
419 return self._offsets[i]
417
420
418
421
419 def parse_index2(data, inline, revlogv2=False):
422 def parse_index2(data, inline, revlogv2=False):
420 if not inline:
423 if not inline:
421 cls = IndexObject2 if revlogv2 else IndexObject
424 cls = IndexObject2 if revlogv2 else IndexObject
422 return cls(data), None
425 return cls(data), None
423 cls = InlinedIndexObject
426 cls = InlinedIndexObject
424 return cls(data, inline), (0, data)
427 return cls(data, inline), (0, data)
425
428
426
429
427 def parse_index_cl_v2(data):
430 def parse_index_cl_v2(data):
428 return IndexChangelogV2(data), None
431 return IndexChangelogV2(data), None
429
432
430
433
431 class IndexObject2(IndexObject):
434 class IndexObject2(IndexObject):
432 index_format = revlog_constants.INDEX_ENTRY_V2
435 index_format = revlog_constants.INDEX_ENTRY_V2
433
436
434 def replace_sidedata_info(
437 def replace_sidedata_info(
435 self,
438 self,
436 rev,
439 rev,
437 sidedata_offset,
440 sidedata_offset,
438 sidedata_length,
441 sidedata_length,
439 offset_flags,
442 offset_flags,
440 compression_mode,
443 compression_mode,
441 ):
444 ):
442 """
445 """
443 Replace an existing index entry's sidedata offset and length with new
446 Replace an existing index entry's sidedata offset and length with new
444 ones.
447 ones.
445 This cannot be used outside of the context of sidedata rewriting,
448 This cannot be used outside of the context of sidedata rewriting,
446 inside the transaction that creates the revision `rev`.
449 inside the transaction that creates the revision `rev`.
447 """
450 """
448 if rev < 0:
451 if rev < 0:
449 raise KeyError
452 raise KeyError
450 self._check_index(rev)
453 self._check_index(rev)
451 if rev < self._lgt:
454 if rev < self._lgt:
452 msg = b"cannot rewrite entries outside of this transaction"
455 msg = b"cannot rewrite entries outside of this transaction"
453 raise KeyError(msg)
456 raise KeyError(msg)
454 else:
457 else:
455 entry = list(self[rev])
458 entry = list(self[rev])
456 entry[0] = offset_flags
459 entry[0] = offset_flags
457 entry[8] = sidedata_offset
460 entry[8] = sidedata_offset
458 entry[9] = sidedata_length
461 entry[9] = sidedata_length
459 entry[11] = compression_mode
462 entry[11] = compression_mode
460 entry = tuple(entry)
463 entry = tuple(entry)
461 new = self._pack_entry(rev, entry)
464 new = self._pack_entry(rev, entry)
462 self._extra[rev - self._lgt] = new
465 self._extra[rev - self._lgt] = new
463
466
464 def _unpack_entry(self, rev, data):
467 def _unpack_entry(self, rev, data):
465 data = self.index_format.unpack(data)
468 data = self.index_format.unpack(data)
466 entry = data[:10]
469 entry = data[:10]
467 data_comp = data[10] & 3
470 data_comp = data[10] & 3
468 sidedata_comp = (data[10] & (3 << 2)) >> 2
471 sidedata_comp = (data[10] & (3 << 2)) >> 2
469 return entry + (data_comp, sidedata_comp)
472 return entry + (data_comp, sidedata_comp)
470
473
471 def _pack_entry(self, rev, entry):
474 def _pack_entry(self, rev, entry):
472 data = entry[:10]
475 data = entry[:10]
473 data_comp = entry[10] & 3
476 data_comp = entry[10] & 3
474 sidedata_comp = (entry[11] & 3) << 2
477 sidedata_comp = (entry[11] & 3) << 2
475 data += (data_comp | sidedata_comp,)
478 data += (data_comp | sidedata_comp,)
476
479
477 return self.index_format.pack(*data)
480 return self.index_format.pack(*data)
478
481
479 def entry_binary(self, rev):
482 def entry_binary(self, rev):
480 """return the raw binary string representing a revision"""
483 """return the raw binary string representing a revision"""
481 entry = self[rev]
484 entry = self[rev]
482 return self._pack_entry(rev, entry)
485 return self._pack_entry(rev, entry)
483
486
484 def pack_header(self, header):
487 def pack_header(self, header):
485 """pack header information as binary"""
488 """pack header information as binary"""
486 msg = 'version header should go in the docket, not the index: %d'
489 msg = 'version header should go in the docket, not the index: %d'
487 msg %= header
490 msg %= header
488 raise error.ProgrammingError(msg)
491 raise error.ProgrammingError(msg)
489
492
490
493
491 class IndexChangelogV2(IndexObject2):
494 class IndexChangelogV2(IndexObject2):
492 index_format = revlog_constants.INDEX_ENTRY_CL_V2
495 index_format = revlog_constants.INDEX_ENTRY_CL_V2
493
496
494 def _unpack_entry(self, rev, data, r=True):
497 def _unpack_entry(self, rev, data, r=True):
495 items = self.index_format.unpack(data)
498 items = self.index_format.unpack(data)
496 entry = items[:3] + (rev, rev) + items[3:8]
499 entry = items[:3] + (rev, rev) + items[3:8]
497 data_comp = items[8] & 3
500 data_comp = items[8] & 3
498 sidedata_comp = (items[8] >> 2) & 3
501 sidedata_comp = (items[8] >> 2) & 3
499 return entry + (data_comp, sidedata_comp)
502 return entry + (data_comp, sidedata_comp)
500
503
501 def _pack_entry(self, rev, entry):
504 def _pack_entry(self, rev, entry):
502 assert entry[3] == rev, entry[3]
505 assert entry[3] == rev, entry[3]
503 assert entry[4] == rev, entry[4]
506 assert entry[4] == rev, entry[4]
504 data = entry[:3] + entry[5:10]
507 data = entry[:3] + entry[5:10]
505 data_comp = entry[10] & 3
508 data_comp = entry[10] & 3
506 sidedata_comp = (entry[11] & 3) << 2
509 sidedata_comp = (entry[11] & 3) << 2
507 data += (data_comp | sidedata_comp,)
510 data += (data_comp | sidedata_comp,)
508 return self.index_format.pack(*data)
511 return self.index_format.pack(*data)
509
512
510
513
511 def parse_index_devel_nodemap(data, inline):
514 def parse_index_devel_nodemap(data, inline):
512 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
515 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
513 return PersistentNodeMapIndexObject(data), None
516 return PersistentNodeMapIndexObject(data), None
514
517
515
518
516 def parse_dirstate(dmap, copymap, st):
519 def parse_dirstate(dmap, copymap, st):
517 parents = [st[:20], st[20:40]]
520 parents = [st[:20], st[20:40]]
518 # dereference fields so they will be local in loop
521 # dereference fields so they will be local in loop
519 format = b">cllll"
522 format = b">cllll"
520 e_size = struct.calcsize(format)
523 e_size = struct.calcsize(format)
521 pos1 = 40
524 pos1 = 40
522 l = len(st)
525 l = len(st)
523
526
524 # the inner loop
527 # the inner loop
525 while pos1 < l:
528 while pos1 < l:
526 pos2 = pos1 + e_size
529 pos2 = pos1 + e_size
527 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
530 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
528 pos1 = pos2 + e[4]
531 pos1 = pos2 + e[4]
529 f = st[pos2:pos1]
532 f = st[pos2:pos1]
530 if b'\0' in f:
533 if b'\0' in f:
531 f, c = f.split(b'\0')
534 f, c = f.split(b'\0')
532 copymap[f] = c
535 copymap[f] = c
533 dmap[f] = DirstateItem(*e[:4])
536 dmap[f] = DirstateItem(*e[:4])
534 return parents
537 return parents
535
538
536
539
537 def pack_dirstate(dmap, copymap, pl, now):
540 def pack_dirstate(dmap, copymap, pl, now):
538 now = int(now)
541 now = int(now)
539 cs = stringio()
542 cs = stringio()
540 write = cs.write
543 write = cs.write
541 write(b"".join(pl))
544 write(b"".join(pl))
542 for f, e in pycompat.iteritems(dmap):
545 for f, e in pycompat.iteritems(dmap):
543 if e[0] == b'n' and e[3] == now:
546 if e.need_delay(now):
544 # The file was last modified "simultaneously" with the current
547 # The file was last modified "simultaneously" with the current
545 # write to dirstate (i.e. within the same second for file-
548 # write to dirstate (i.e. within the same second for file-
546 # systems with a granularity of 1 sec). This commonly happens
549 # systems with a granularity of 1 sec). This commonly happens
547 # for at least a couple of files on 'update'.
550 # for at least a couple of files on 'update'.
548 # The user could change the file without changing its size
551 # The user could change the file without changing its size
549 # within the same second. Invalidate the file's mtime in
552 # within the same second. Invalidate the file's mtime in
550 # dirstate, forcing future 'status' calls to compare the
553 # dirstate, forcing future 'status' calls to compare the
551 # contents of the file if the size is the same. This prevents
554 # contents of the file if the size is the same. This prevents
552 # mistakenly treating such files as clean.
555 # mistakenly treating such files as clean.
553 e = DirstateItem(e[0], e[1], e[2], -1)
556 e = DirstateItem(e.state, e.mode, e.size, AMBIGUOUS_TIME)
554 dmap[f] = e
557 dmap[f] = e
555
558
556 if f in copymap:
559 if f in copymap:
557 f = b"%s\0%s" % (f, copymap[f])
560 f = b"%s\0%s" % (f, copymap[f])
558 e = _pack(
561 e = _pack(
559 b">cllll",
562 b">cllll",
560 e.v1_state(),
563 e.v1_state(),
561 e.v1_mode(),
564 e.v1_mode(),
562 e.v1_size(),
565 e.v1_size(),
563 e.v1_mtime(),
566 e.v1_mtime(),
564 len(f),
567 len(f),
565 )
568 )
566 write(e)
569 write(e)
567 write(f)
570 write(f)
568 return cs.getvalue()
571 return cs.getvalue()
General Comments 0
You need to be logged in to leave comments. Login now