##// END OF EJS Templates
dirstate-item: have all the logic go through the v1_ accessors...
marmoute -
r48738:05f2be3a default
parent child Browse files
Show More
@@ -1,735 +1,735 b''
1 # parsers.py - Python implementation of parsers.c
1 # parsers.py - Python implementation of parsers.c
2 #
2 #
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11 import zlib
11 import zlib
12
12
13 from ..node import (
13 from ..node import (
14 nullrev,
14 nullrev,
15 sha1nodeconstants,
15 sha1nodeconstants,
16 )
16 )
17 from ..thirdparty import attr
17 from ..thirdparty import attr
18 from .. import (
18 from .. import (
19 error,
19 error,
20 pycompat,
20 pycompat,
21 revlogutils,
21 revlogutils,
22 util,
22 util,
23 )
23 )
24
24
25 from ..revlogutils import nodemap as nodemaputil
25 from ..revlogutils import nodemap as nodemaputil
26 from ..revlogutils import constants as revlog_constants
26 from ..revlogutils import constants as revlog_constants
27
27
28 stringio = pycompat.bytesio
28 stringio = pycompat.bytesio
29
29
30
30
31 _pack = struct.pack
31 _pack = struct.pack
32 _unpack = struct.unpack
32 _unpack = struct.unpack
33 _compress = zlib.compress
33 _compress = zlib.compress
34 _decompress = zlib.decompress
34 _decompress = zlib.decompress
35
35
36
36
37 # a special value used internally for `size` if the file come from the other parent
37 # a special value used internally for `size` if the file come from the other parent
38 FROM_P2 = -2
38 FROM_P2 = -2
39
39
40 # a special value used internally for `size` if the file is modified/merged/added
40 # a special value used internally for `size` if the file is modified/merged/added
41 NONNORMAL = -1
41 NONNORMAL = -1
42
42
43 # a special value used internally for `time` if the time is ambigeous
43 # a special value used internally for `time` if the time is ambigeous
44 AMBIGUOUS_TIME = -1
44 AMBIGUOUS_TIME = -1
45
45
46
46
47 @attr.s(slots=True, init=False)
47 @attr.s(slots=True, init=False)
48 class DirstateItem(object):
48 class DirstateItem(object):
49 """represent a dirstate entry
49 """represent a dirstate entry
50
50
51 It contains:
51 It contains:
52
52
53 - state (one of 'n', 'a', 'r', 'm')
53 - state (one of 'n', 'a', 'r', 'm')
54 - mode,
54 - mode,
55 - size,
55 - size,
56 - mtime,
56 - mtime,
57 """
57 """
58
58
59 _state = attr.ib()
59 _state = attr.ib()
60 _mode = attr.ib()
60 _mode = attr.ib()
61 _size = attr.ib()
61 _size = attr.ib()
62 _mtime = attr.ib()
62 _mtime = attr.ib()
63
63
64 def __init__(
64 def __init__(
65 self,
65 self,
66 wc_tracked=False,
66 wc_tracked=False,
67 p1_tracked=False,
67 p1_tracked=False,
68 p2_tracked=False,
68 p2_tracked=False,
69 merged=False,
69 merged=False,
70 clean_p1=False,
70 clean_p1=False,
71 clean_p2=False,
71 clean_p2=False,
72 possibly_dirty=False,
72 possibly_dirty=False,
73 parentfiledata=None,
73 parentfiledata=None,
74 ):
74 ):
75 if merged and (clean_p1 or clean_p2):
75 if merged and (clean_p1 or clean_p2):
76 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
76 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
77 raise error.ProgrammingError(msg)
77 raise error.ProgrammingError(msg)
78
78
79 self._state = None
79 self._state = None
80 self._mode = 0
80 self._mode = 0
81 self._size = NONNORMAL
81 self._size = NONNORMAL
82 self._mtime = AMBIGUOUS_TIME
82 self._mtime = AMBIGUOUS_TIME
83 if not (p1_tracked or p2_tracked or wc_tracked):
83 if not (p1_tracked or p2_tracked or wc_tracked):
84 pass # the object has no state to record
84 pass # the object has no state to record
85 elif merged:
85 elif merged:
86 self._state = b'm'
86 self._state = b'm'
87 self._size = FROM_P2
87 self._size = FROM_P2
88 self._mtime = AMBIGUOUS_TIME
88 self._mtime = AMBIGUOUS_TIME
89 elif not (p1_tracked or p2_tracked) and wc_tracked:
89 elif not (p1_tracked or p2_tracked) and wc_tracked:
90 self._state = b'a'
90 self._state = b'a'
91 self._size = NONNORMAL
91 self._size = NONNORMAL
92 self._mtime = AMBIGUOUS_TIME
92 self._mtime = AMBIGUOUS_TIME
93 elif (p1_tracked or p2_tracked) and not wc_tracked:
93 elif (p1_tracked or p2_tracked) and not wc_tracked:
94 self._state = b'r'
94 self._state = b'r'
95 self._size = 0
95 self._size = 0
96 self._mtime = 0
96 self._mtime = 0
97 elif clean_p2 and wc_tracked:
97 elif clean_p2 and wc_tracked:
98 self._state = b'n'
98 self._state = b'n'
99 self._size = FROM_P2
99 self._size = FROM_P2
100 self._mtime = AMBIGUOUS_TIME
100 self._mtime = AMBIGUOUS_TIME
101 elif not p1_tracked and p2_tracked and wc_tracked:
101 elif not p1_tracked and p2_tracked and wc_tracked:
102 self._state = b'n'
102 self._state = b'n'
103 self._size = FROM_P2
103 self._size = FROM_P2
104 self._mtime = AMBIGUOUS_TIME
104 self._mtime = AMBIGUOUS_TIME
105 elif possibly_dirty:
105 elif possibly_dirty:
106 self._state = b'n'
106 self._state = b'n'
107 self._size = NONNORMAL
107 self._size = NONNORMAL
108 self._mtime = AMBIGUOUS_TIME
108 self._mtime = AMBIGUOUS_TIME
109 elif wc_tracked:
109 elif wc_tracked:
110 # this is a "normal" file
110 # this is a "normal" file
111 if parentfiledata is None:
111 if parentfiledata is None:
112 msg = b'failed to pass parentfiledata for a normal file'
112 msg = b'failed to pass parentfiledata for a normal file'
113 raise error.ProgrammingError(msg)
113 raise error.ProgrammingError(msg)
114 self._state = b'n'
114 self._state = b'n'
115 self._mode = parentfiledata[0]
115 self._mode = parentfiledata[0]
116 self._size = parentfiledata[1]
116 self._size = parentfiledata[1]
117 self._mtime = parentfiledata[2]
117 self._mtime = parentfiledata[2]
118 else:
118 else:
119 assert False, 'unreachable'
119 assert False, 'unreachable'
120
120
121 @classmethod
121 @classmethod
122 def new_added(cls):
122 def new_added(cls):
123 """constructor to help legacy API to build a new "added" item
123 """constructor to help legacy API to build a new "added" item
124
124
125 Should eventually be removed
125 Should eventually be removed
126 """
126 """
127 instance = cls()
127 instance = cls()
128 instance._state = b'a'
128 instance._state = b'a'
129 instance._mode = 0
129 instance._mode = 0
130 instance._size = NONNORMAL
130 instance._size = NONNORMAL
131 instance._mtime = AMBIGUOUS_TIME
131 instance._mtime = AMBIGUOUS_TIME
132 return instance
132 return instance
133
133
134 @classmethod
134 @classmethod
135 def new_merged(cls):
135 def new_merged(cls):
136 """constructor to help legacy API to build a new "merged" item
136 """constructor to help legacy API to build a new "merged" item
137
137
138 Should eventually be removed
138 Should eventually be removed
139 """
139 """
140 instance = cls()
140 instance = cls()
141 instance._state = b'm'
141 instance._state = b'm'
142 instance._mode = 0
142 instance._mode = 0
143 instance._size = FROM_P2
143 instance._size = FROM_P2
144 instance._mtime = AMBIGUOUS_TIME
144 instance._mtime = AMBIGUOUS_TIME
145 return instance
145 return instance
146
146
147 @classmethod
147 @classmethod
148 def new_from_p2(cls):
148 def new_from_p2(cls):
149 """constructor to help legacy API to build a new "from_p2" item
149 """constructor to help legacy API to build a new "from_p2" item
150
150
151 Should eventually be removed
151 Should eventually be removed
152 """
152 """
153 instance = cls()
153 instance = cls()
154 instance._state = b'n'
154 instance._state = b'n'
155 instance._mode = 0
155 instance._mode = 0
156 instance._size = FROM_P2
156 instance._size = FROM_P2
157 instance._mtime = AMBIGUOUS_TIME
157 instance._mtime = AMBIGUOUS_TIME
158 return instance
158 return instance
159
159
160 @classmethod
160 @classmethod
161 def new_possibly_dirty(cls):
161 def new_possibly_dirty(cls):
162 """constructor to help legacy API to build a new "possibly_dirty" item
162 """constructor to help legacy API to build a new "possibly_dirty" item
163
163
164 Should eventually be removed
164 Should eventually be removed
165 """
165 """
166 instance = cls()
166 instance = cls()
167 instance._state = b'n'
167 instance._state = b'n'
168 instance._mode = 0
168 instance._mode = 0
169 instance._size = NONNORMAL
169 instance._size = NONNORMAL
170 instance._mtime = AMBIGUOUS_TIME
170 instance._mtime = AMBIGUOUS_TIME
171 return instance
171 return instance
172
172
173 @classmethod
173 @classmethod
174 def new_normal(cls, mode, size, mtime):
174 def new_normal(cls, mode, size, mtime):
175 """constructor to help legacy API to build a new "normal" item
175 """constructor to help legacy API to build a new "normal" item
176
176
177 Should eventually be removed
177 Should eventually be removed
178 """
178 """
179 assert size != FROM_P2
179 assert size != FROM_P2
180 assert size != NONNORMAL
180 assert size != NONNORMAL
181 instance = cls()
181 instance = cls()
182 instance._state = b'n'
182 instance._state = b'n'
183 instance._mode = mode
183 instance._mode = mode
184 instance._size = size
184 instance._size = size
185 instance._mtime = mtime
185 instance._mtime = mtime
186 return instance
186 return instance
187
187
188 @classmethod
188 @classmethod
189 def from_v1_data(cls, state, mode, size, mtime):
189 def from_v1_data(cls, state, mode, size, mtime):
190 """Build a new DirstateItem object from V1 data
190 """Build a new DirstateItem object from V1 data
191
191
192 Since the dirstate-v1 format is frozen, the signature of this function
192 Since the dirstate-v1 format is frozen, the signature of this function
193 is not expected to change, unlike the __init__ one.
193 is not expected to change, unlike the __init__ one.
194 """
194 """
195 instance = cls()
195 instance = cls()
196 instance._state = state
196 instance._state = state
197 instance._mode = mode
197 instance._mode = mode
198 instance._size = size
198 instance._size = size
199 instance._mtime = mtime
199 instance._mtime = mtime
200 return instance
200 return instance
201
201
202 def set_possibly_dirty(self):
202 def set_possibly_dirty(self):
203 """Mark a file as "possibly dirty"
203 """Mark a file as "possibly dirty"
204
204
205 This means the next status call will have to actually check its content
205 This means the next status call will have to actually check its content
206 to make sure it is correct.
206 to make sure it is correct.
207 """
207 """
208 self._mtime = AMBIGUOUS_TIME
208 self._mtime = AMBIGUOUS_TIME
209
209
210 def set_untracked(self):
210 def set_untracked(self):
211 """mark a file as untracked in the working copy
211 """mark a file as untracked in the working copy
212
212
213 This will ultimately be called by command like `hg remove`.
213 This will ultimately be called by command like `hg remove`.
214 """
214 """
215 # backup the previous state (useful for merge)
215 # backup the previous state (useful for merge)
216 size = 0
216 size = 0
217 if self.merged: # merge
217 if self.merged: # merge
218 size = NONNORMAL
218 size = NONNORMAL
219 elif self.from_p2:
219 elif self.from_p2:
220 size = FROM_P2
220 size = FROM_P2
221 self._state = b'r'
221 self._state = b'r'
222 self._mode = 0
222 self._mode = 0
223 self._size = size
223 self._size = size
224 self._mtime = 0
224 self._mtime = 0
225
225
226 @property
226 @property
227 def mode(self):
227 def mode(self):
228 return self._mode
228 return self.v1_mode()
229
229
230 @property
230 @property
231 def size(self):
231 def size(self):
232 return self._size
232 return self.v1_size()
233
233
234 @property
234 @property
235 def mtime(self):
235 def mtime(self):
236 return self._mtime
236 return self.v1_mtime()
237
237
238 @property
238 @property
239 def state(self):
239 def state(self):
240 """
240 """
241 States are:
241 States are:
242 n normal
242 n normal
243 m needs merging
243 m needs merging
244 r marked for removal
244 r marked for removal
245 a marked for addition
245 a marked for addition
246
246
247 XXX This "state" is a bit obscure and mostly a direct expression of the
247 XXX This "state" is a bit obscure and mostly a direct expression of the
248 dirstatev1 format. It would make sense to ultimately deprecate it in
248 dirstatev1 format. It would make sense to ultimately deprecate it in
249 favor of the more "semantic" attributes.
249 favor of the more "semantic" attributes.
250 """
250 """
251 return self._state
251 return self.v1_state()
252
252
253 @property
253 @property
254 def tracked(self):
254 def tracked(self):
255 """True is the file is tracked in the working copy"""
255 """True is the file is tracked in the working copy"""
256 return self._state in b"nma"
256 return self.v1_state() in b"nma"
257
257
258 @property
258 @property
259 def added(self):
259 def added(self):
260 """True if the file has been added"""
260 """True if the file has been added"""
261 return self._state == b'a'
261 return self.v1_state() == b'a'
262
262
263 @property
263 @property
264 def merged(self):
264 def merged(self):
265 """True if the file has been merged
265 """True if the file has been merged
266
266
267 Should only be set if a merge is in progress in the dirstate
267 Should only be set if a merge is in progress in the dirstate
268 """
268 """
269 return self._state == b'm'
269 return self.v1_state() == b'm'
270
270
271 @property
271 @property
272 def from_p2(self):
272 def from_p2(self):
273 """True if the file have been fetched from p2 during the current merge
273 """True if the file have been fetched from p2 during the current merge
274
274
275 This is only True is the file is currently tracked.
275 This is only True is the file is currently tracked.
276
276
277 Should only be set if a merge is in progress in the dirstate
277 Should only be set if a merge is in progress in the dirstate
278 """
278 """
279 return self._state == b'n' and self._size == FROM_P2
279 return self.v1_state() == b'n' and self.v1_size() == FROM_P2
280
280
281 @property
281 @property
282 def from_p2_removed(self):
282 def from_p2_removed(self):
283 """True if the file has been removed, but was "from_p2" initially
283 """True if the file has been removed, but was "from_p2" initially
284
284
285 This property seems like an abstraction leakage and should probably be
285 This property seems like an abstraction leakage and should probably be
286 dealt in this class (or maybe the dirstatemap) directly.
286 dealt in this class (or maybe the dirstatemap) directly.
287 """
287 """
288 return self._state == b'r' and self._size == FROM_P2
288 return self.v1_state() == b'r' and self.v1_size() == FROM_P2
289
289
290 @property
290 @property
291 def removed(self):
291 def removed(self):
292 """True if the file has been removed"""
292 """True if the file has been removed"""
293 return self._state == b'r'
293 return self.v1_state() == b'r'
294
294
295 @property
295 @property
296 def merged_removed(self):
296 def merged_removed(self):
297 """True if the file has been removed, but was "merged" initially
297 """True if the file has been removed, but was "merged" initially
298
298
299 This property seems like an abstraction leakage and should probably be
299 This property seems like an abstraction leakage and should probably be
300 dealt in this class (or maybe the dirstatemap) directly.
300 dealt in this class (or maybe the dirstatemap) directly.
301 """
301 """
302 return self._state == b'r' and self._size == NONNORMAL
302 return self.v1_state() == b'r' and self.v1_size() == NONNORMAL
303
303
304 @property
304 @property
305 def dm_nonnormal(self):
305 def dm_nonnormal(self):
306 """True is the entry is non-normal in the dirstatemap sense
306 """True is the entry is non-normal in the dirstatemap sense
307
307
308 There is no reason for any code, but the dirstatemap one to use this.
308 There is no reason for any code, but the dirstatemap one to use this.
309 """
309 """
310 return self.state != b'n' or self.mtime == AMBIGUOUS_TIME
310 return self.v1_state() != b'n' or self.v1_mtime() == AMBIGUOUS_TIME
311
311
312 @property
312 @property
313 def dm_otherparent(self):
313 def dm_otherparent(self):
314 """True is the entry is `otherparent` in the dirstatemap sense
314 """True is the entry is `otherparent` in the dirstatemap sense
315
315
316 There is no reason for any code, but the dirstatemap one to use this.
316 There is no reason for any code, but the dirstatemap one to use this.
317 """
317 """
318 return self._size == FROM_P2
318 return self.v1_size() == FROM_P2
319
319
320 def v1_state(self):
320 def v1_state(self):
321 """return a "state" suitable for v1 serialization"""
321 """return a "state" suitable for v1 serialization"""
322 return self._state
322 return self._state
323
323
324 def v1_mode(self):
324 def v1_mode(self):
325 """return a "mode" suitable for v1 serialization"""
325 """return a "mode" suitable for v1 serialization"""
326 return self._mode
326 return self._mode
327
327
328 def v1_size(self):
328 def v1_size(self):
329 """return a "size" suitable for v1 serialization"""
329 """return a "size" suitable for v1 serialization"""
330 return self._size
330 return self._size
331
331
332 def v1_mtime(self):
332 def v1_mtime(self):
333 """return a "mtime" suitable for v1 serialization"""
333 """return a "mtime" suitable for v1 serialization"""
334 return self._mtime
334 return self._mtime
335
335
336 def need_delay(self, now):
336 def need_delay(self, now):
337 """True if the stored mtime would be ambiguous with the current time"""
337 """True if the stored mtime would be ambiguous with the current time"""
338 return self._state == b'n' and self._mtime == now
338 return self.v1_state() == b'n' and self.v1_mtime() == now
339
339
340
340
341 def gettype(q):
341 def gettype(q):
342 return int(q & 0xFFFF)
342 return int(q & 0xFFFF)
343
343
344
344
345 class BaseIndexObject(object):
345 class BaseIndexObject(object):
346 # Can I be passed to an algorithme implemented in Rust ?
346 # Can I be passed to an algorithme implemented in Rust ?
347 rust_ext_compat = 0
347 rust_ext_compat = 0
348 # Format of an index entry according to Python's `struct` language
348 # Format of an index entry according to Python's `struct` language
349 index_format = revlog_constants.INDEX_ENTRY_V1
349 index_format = revlog_constants.INDEX_ENTRY_V1
350 # Size of a C unsigned long long int, platform independent
350 # Size of a C unsigned long long int, platform independent
351 big_int_size = struct.calcsize(b'>Q')
351 big_int_size = struct.calcsize(b'>Q')
352 # Size of a C long int, platform independent
352 # Size of a C long int, platform independent
353 int_size = struct.calcsize(b'>i')
353 int_size = struct.calcsize(b'>i')
354 # An empty index entry, used as a default value to be overridden, or nullrev
354 # An empty index entry, used as a default value to be overridden, or nullrev
355 null_item = (
355 null_item = (
356 0,
356 0,
357 0,
357 0,
358 0,
358 0,
359 -1,
359 -1,
360 -1,
360 -1,
361 -1,
361 -1,
362 -1,
362 -1,
363 sha1nodeconstants.nullid,
363 sha1nodeconstants.nullid,
364 0,
364 0,
365 0,
365 0,
366 revlog_constants.COMP_MODE_INLINE,
366 revlog_constants.COMP_MODE_INLINE,
367 revlog_constants.COMP_MODE_INLINE,
367 revlog_constants.COMP_MODE_INLINE,
368 )
368 )
369
369
370 @util.propertycache
370 @util.propertycache
371 def entry_size(self):
371 def entry_size(self):
372 return self.index_format.size
372 return self.index_format.size
373
373
374 @property
374 @property
375 def nodemap(self):
375 def nodemap(self):
376 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
376 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
377 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
377 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
378 return self._nodemap
378 return self._nodemap
379
379
380 @util.propertycache
380 @util.propertycache
381 def _nodemap(self):
381 def _nodemap(self):
382 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
382 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
383 for r in range(0, len(self)):
383 for r in range(0, len(self)):
384 n = self[r][7]
384 n = self[r][7]
385 nodemap[n] = r
385 nodemap[n] = r
386 return nodemap
386 return nodemap
387
387
388 def has_node(self, node):
388 def has_node(self, node):
389 """return True if the node exist in the index"""
389 """return True if the node exist in the index"""
390 return node in self._nodemap
390 return node in self._nodemap
391
391
392 def rev(self, node):
392 def rev(self, node):
393 """return a revision for a node
393 """return a revision for a node
394
394
395 If the node is unknown, raise a RevlogError"""
395 If the node is unknown, raise a RevlogError"""
396 return self._nodemap[node]
396 return self._nodemap[node]
397
397
398 def get_rev(self, node):
398 def get_rev(self, node):
399 """return a revision for a node
399 """return a revision for a node
400
400
401 If the node is unknown, return None"""
401 If the node is unknown, return None"""
402 return self._nodemap.get(node)
402 return self._nodemap.get(node)
403
403
404 def _stripnodes(self, start):
404 def _stripnodes(self, start):
405 if '_nodemap' in vars(self):
405 if '_nodemap' in vars(self):
406 for r in range(start, len(self)):
406 for r in range(start, len(self)):
407 n = self[r][7]
407 n = self[r][7]
408 del self._nodemap[n]
408 del self._nodemap[n]
409
409
410 def clearcaches(self):
410 def clearcaches(self):
411 self.__dict__.pop('_nodemap', None)
411 self.__dict__.pop('_nodemap', None)
412
412
413 def __len__(self):
413 def __len__(self):
414 return self._lgt + len(self._extra)
414 return self._lgt + len(self._extra)
415
415
416 def append(self, tup):
416 def append(self, tup):
417 if '_nodemap' in vars(self):
417 if '_nodemap' in vars(self):
418 self._nodemap[tup[7]] = len(self)
418 self._nodemap[tup[7]] = len(self)
419 data = self._pack_entry(len(self), tup)
419 data = self._pack_entry(len(self), tup)
420 self._extra.append(data)
420 self._extra.append(data)
421
421
422 def _pack_entry(self, rev, entry):
422 def _pack_entry(self, rev, entry):
423 assert entry[8] == 0
423 assert entry[8] == 0
424 assert entry[9] == 0
424 assert entry[9] == 0
425 return self.index_format.pack(*entry[:8])
425 return self.index_format.pack(*entry[:8])
426
426
427 def _check_index(self, i):
427 def _check_index(self, i):
428 if not isinstance(i, int):
428 if not isinstance(i, int):
429 raise TypeError(b"expecting int indexes")
429 raise TypeError(b"expecting int indexes")
430 if i < 0 or i >= len(self):
430 if i < 0 or i >= len(self):
431 raise IndexError
431 raise IndexError
432
432
433 def __getitem__(self, i):
433 def __getitem__(self, i):
434 if i == -1:
434 if i == -1:
435 return self.null_item
435 return self.null_item
436 self._check_index(i)
436 self._check_index(i)
437 if i >= self._lgt:
437 if i >= self._lgt:
438 data = self._extra[i - self._lgt]
438 data = self._extra[i - self._lgt]
439 else:
439 else:
440 index = self._calculate_index(i)
440 index = self._calculate_index(i)
441 data = self._data[index : index + self.entry_size]
441 data = self._data[index : index + self.entry_size]
442 r = self._unpack_entry(i, data)
442 r = self._unpack_entry(i, data)
443 if self._lgt and i == 0:
443 if self._lgt and i == 0:
444 offset = revlogutils.offset_type(0, gettype(r[0]))
444 offset = revlogutils.offset_type(0, gettype(r[0]))
445 r = (offset,) + r[1:]
445 r = (offset,) + r[1:]
446 return r
446 return r
447
447
448 def _unpack_entry(self, rev, data):
448 def _unpack_entry(self, rev, data):
449 r = self.index_format.unpack(data)
449 r = self.index_format.unpack(data)
450 r = r + (
450 r = r + (
451 0,
451 0,
452 0,
452 0,
453 revlog_constants.COMP_MODE_INLINE,
453 revlog_constants.COMP_MODE_INLINE,
454 revlog_constants.COMP_MODE_INLINE,
454 revlog_constants.COMP_MODE_INLINE,
455 )
455 )
456 return r
456 return r
457
457
458 def pack_header(self, header):
458 def pack_header(self, header):
459 """pack header information as binary"""
459 """pack header information as binary"""
460 v_fmt = revlog_constants.INDEX_HEADER
460 v_fmt = revlog_constants.INDEX_HEADER
461 return v_fmt.pack(header)
461 return v_fmt.pack(header)
462
462
463 def entry_binary(self, rev):
463 def entry_binary(self, rev):
464 """return the raw binary string representing a revision"""
464 """return the raw binary string representing a revision"""
465 entry = self[rev]
465 entry = self[rev]
466 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
466 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
467 if rev == 0:
467 if rev == 0:
468 p = p[revlog_constants.INDEX_HEADER.size :]
468 p = p[revlog_constants.INDEX_HEADER.size :]
469 return p
469 return p
470
470
471
471
472 class IndexObject(BaseIndexObject):
472 class IndexObject(BaseIndexObject):
473 def __init__(self, data):
473 def __init__(self, data):
474 assert len(data) % self.entry_size == 0, (
474 assert len(data) % self.entry_size == 0, (
475 len(data),
475 len(data),
476 self.entry_size,
476 self.entry_size,
477 len(data) % self.entry_size,
477 len(data) % self.entry_size,
478 )
478 )
479 self._data = data
479 self._data = data
480 self._lgt = len(data) // self.entry_size
480 self._lgt = len(data) // self.entry_size
481 self._extra = []
481 self._extra = []
482
482
483 def _calculate_index(self, i):
483 def _calculate_index(self, i):
484 return i * self.entry_size
484 return i * self.entry_size
485
485
486 def __delitem__(self, i):
486 def __delitem__(self, i):
487 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
487 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
488 raise ValueError(b"deleting slices only supports a:-1 with step 1")
488 raise ValueError(b"deleting slices only supports a:-1 with step 1")
489 i = i.start
489 i = i.start
490 self._check_index(i)
490 self._check_index(i)
491 self._stripnodes(i)
491 self._stripnodes(i)
492 if i < self._lgt:
492 if i < self._lgt:
493 self._data = self._data[: i * self.entry_size]
493 self._data = self._data[: i * self.entry_size]
494 self._lgt = i
494 self._lgt = i
495 self._extra = []
495 self._extra = []
496 else:
496 else:
497 self._extra = self._extra[: i - self._lgt]
497 self._extra = self._extra[: i - self._lgt]
498
498
499
499
500 class PersistentNodeMapIndexObject(IndexObject):
500 class PersistentNodeMapIndexObject(IndexObject):
501 """a Debug oriented class to test persistent nodemap
501 """a Debug oriented class to test persistent nodemap
502
502
503 We need a simple python object to test API and higher level behavior. See
503 We need a simple python object to test API and higher level behavior. See
504 the Rust implementation for more serious usage. This should be used only
504 the Rust implementation for more serious usage. This should be used only
505 through the dedicated `devel.persistent-nodemap` config.
505 through the dedicated `devel.persistent-nodemap` config.
506 """
506 """
507
507
508 def nodemap_data_all(self):
508 def nodemap_data_all(self):
509 """Return bytes containing a full serialization of a nodemap
509 """Return bytes containing a full serialization of a nodemap
510
510
511 The nodemap should be valid for the full set of revisions in the
511 The nodemap should be valid for the full set of revisions in the
512 index."""
512 index."""
513 return nodemaputil.persistent_data(self)
513 return nodemaputil.persistent_data(self)
514
514
515 def nodemap_data_incremental(self):
515 def nodemap_data_incremental(self):
516 """Return bytes containing a incremental update to persistent nodemap
516 """Return bytes containing a incremental update to persistent nodemap
517
517
518 This containst the data for an append-only update of the data provided
518 This containst the data for an append-only update of the data provided
519 in the last call to `update_nodemap_data`.
519 in the last call to `update_nodemap_data`.
520 """
520 """
521 if self._nm_root is None:
521 if self._nm_root is None:
522 return None
522 return None
523 docket = self._nm_docket
523 docket = self._nm_docket
524 changed, data = nodemaputil.update_persistent_data(
524 changed, data = nodemaputil.update_persistent_data(
525 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
525 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
526 )
526 )
527
527
528 self._nm_root = self._nm_max_idx = self._nm_docket = None
528 self._nm_root = self._nm_max_idx = self._nm_docket = None
529 return docket, changed, data
529 return docket, changed, data
530
530
531 def update_nodemap_data(self, docket, nm_data):
531 def update_nodemap_data(self, docket, nm_data):
532 """provide full block of persisted binary data for a nodemap
532 """provide full block of persisted binary data for a nodemap
533
533
534 The data are expected to come from disk. See `nodemap_data_all` for a
534 The data are expected to come from disk. See `nodemap_data_all` for a
535 produceur of such data."""
535 produceur of such data."""
536 if nm_data is not None:
536 if nm_data is not None:
537 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
537 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
538 if self._nm_root:
538 if self._nm_root:
539 self._nm_docket = docket
539 self._nm_docket = docket
540 else:
540 else:
541 self._nm_root = self._nm_max_idx = self._nm_docket = None
541 self._nm_root = self._nm_max_idx = self._nm_docket = None
542
542
543
543
544 class InlinedIndexObject(BaseIndexObject):
544 class InlinedIndexObject(BaseIndexObject):
545 def __init__(self, data, inline=0):
545 def __init__(self, data, inline=0):
546 self._data = data
546 self._data = data
547 self._lgt = self._inline_scan(None)
547 self._lgt = self._inline_scan(None)
548 self._inline_scan(self._lgt)
548 self._inline_scan(self._lgt)
549 self._extra = []
549 self._extra = []
550
550
551 def _inline_scan(self, lgt):
551 def _inline_scan(self, lgt):
552 off = 0
552 off = 0
553 if lgt is not None:
553 if lgt is not None:
554 self._offsets = [0] * lgt
554 self._offsets = [0] * lgt
555 count = 0
555 count = 0
556 while off <= len(self._data) - self.entry_size:
556 while off <= len(self._data) - self.entry_size:
557 start = off + self.big_int_size
557 start = off + self.big_int_size
558 (s,) = struct.unpack(
558 (s,) = struct.unpack(
559 b'>i',
559 b'>i',
560 self._data[start : start + self.int_size],
560 self._data[start : start + self.int_size],
561 )
561 )
562 if lgt is not None:
562 if lgt is not None:
563 self._offsets[count] = off
563 self._offsets[count] = off
564 count += 1
564 count += 1
565 off += self.entry_size + s
565 off += self.entry_size + s
566 if off != len(self._data):
566 if off != len(self._data):
567 raise ValueError(b"corrupted data")
567 raise ValueError(b"corrupted data")
568 return count
568 return count
569
569
570 def __delitem__(self, i):
570 def __delitem__(self, i):
571 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
571 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
572 raise ValueError(b"deleting slices only supports a:-1 with step 1")
572 raise ValueError(b"deleting slices only supports a:-1 with step 1")
573 i = i.start
573 i = i.start
574 self._check_index(i)
574 self._check_index(i)
575 self._stripnodes(i)
575 self._stripnodes(i)
576 if i < self._lgt:
576 if i < self._lgt:
577 self._offsets = self._offsets[:i]
577 self._offsets = self._offsets[:i]
578 self._lgt = i
578 self._lgt = i
579 self._extra = []
579 self._extra = []
580 else:
580 else:
581 self._extra = self._extra[: i - self._lgt]
581 self._extra = self._extra[: i - self._lgt]
582
582
583 def _calculate_index(self, i):
583 def _calculate_index(self, i):
584 return self._offsets[i]
584 return self._offsets[i]
585
585
586
586
587 def parse_index2(data, inline, revlogv2=False):
587 def parse_index2(data, inline, revlogv2=False):
588 if not inline:
588 if not inline:
589 cls = IndexObject2 if revlogv2 else IndexObject
589 cls = IndexObject2 if revlogv2 else IndexObject
590 return cls(data), None
590 return cls(data), None
591 cls = InlinedIndexObject
591 cls = InlinedIndexObject
592 return cls(data, inline), (0, data)
592 return cls(data, inline), (0, data)
593
593
594
594
595 def parse_index_cl_v2(data):
595 def parse_index_cl_v2(data):
596 return IndexChangelogV2(data), None
596 return IndexChangelogV2(data), None
597
597
598
598
599 class IndexObject2(IndexObject):
599 class IndexObject2(IndexObject):
600 index_format = revlog_constants.INDEX_ENTRY_V2
600 index_format = revlog_constants.INDEX_ENTRY_V2
601
601
602 def replace_sidedata_info(
602 def replace_sidedata_info(
603 self,
603 self,
604 rev,
604 rev,
605 sidedata_offset,
605 sidedata_offset,
606 sidedata_length,
606 sidedata_length,
607 offset_flags,
607 offset_flags,
608 compression_mode,
608 compression_mode,
609 ):
609 ):
610 """
610 """
611 Replace an existing index entry's sidedata offset and length with new
611 Replace an existing index entry's sidedata offset and length with new
612 ones.
612 ones.
613 This cannot be used outside of the context of sidedata rewriting,
613 This cannot be used outside of the context of sidedata rewriting,
614 inside the transaction that creates the revision `rev`.
614 inside the transaction that creates the revision `rev`.
615 """
615 """
616 if rev < 0:
616 if rev < 0:
617 raise KeyError
617 raise KeyError
618 self._check_index(rev)
618 self._check_index(rev)
619 if rev < self._lgt:
619 if rev < self._lgt:
620 msg = b"cannot rewrite entries outside of this transaction"
620 msg = b"cannot rewrite entries outside of this transaction"
621 raise KeyError(msg)
621 raise KeyError(msg)
622 else:
622 else:
623 entry = list(self[rev])
623 entry = list(self[rev])
624 entry[0] = offset_flags
624 entry[0] = offset_flags
625 entry[8] = sidedata_offset
625 entry[8] = sidedata_offset
626 entry[9] = sidedata_length
626 entry[9] = sidedata_length
627 entry[11] = compression_mode
627 entry[11] = compression_mode
628 entry = tuple(entry)
628 entry = tuple(entry)
629 new = self._pack_entry(rev, entry)
629 new = self._pack_entry(rev, entry)
630 self._extra[rev - self._lgt] = new
630 self._extra[rev - self._lgt] = new
631
631
632 def _unpack_entry(self, rev, data):
632 def _unpack_entry(self, rev, data):
633 data = self.index_format.unpack(data)
633 data = self.index_format.unpack(data)
634 entry = data[:10]
634 entry = data[:10]
635 data_comp = data[10] & 3
635 data_comp = data[10] & 3
636 sidedata_comp = (data[10] & (3 << 2)) >> 2
636 sidedata_comp = (data[10] & (3 << 2)) >> 2
637 return entry + (data_comp, sidedata_comp)
637 return entry + (data_comp, sidedata_comp)
638
638
639 def _pack_entry(self, rev, entry):
639 def _pack_entry(self, rev, entry):
640 data = entry[:10]
640 data = entry[:10]
641 data_comp = entry[10] & 3
641 data_comp = entry[10] & 3
642 sidedata_comp = (entry[11] & 3) << 2
642 sidedata_comp = (entry[11] & 3) << 2
643 data += (data_comp | sidedata_comp,)
643 data += (data_comp | sidedata_comp,)
644
644
645 return self.index_format.pack(*data)
645 return self.index_format.pack(*data)
646
646
647 def entry_binary(self, rev):
647 def entry_binary(self, rev):
648 """return the raw binary string representing a revision"""
648 """return the raw binary string representing a revision"""
649 entry = self[rev]
649 entry = self[rev]
650 return self._pack_entry(rev, entry)
650 return self._pack_entry(rev, entry)
651
651
652 def pack_header(self, header):
652 def pack_header(self, header):
653 """pack header information as binary"""
653 """pack header information as binary"""
654 msg = 'version header should go in the docket, not the index: %d'
654 msg = 'version header should go in the docket, not the index: %d'
655 msg %= header
655 msg %= header
656 raise error.ProgrammingError(msg)
656 raise error.ProgrammingError(msg)
657
657
658
658
659 class IndexChangelogV2(IndexObject2):
659 class IndexChangelogV2(IndexObject2):
660 index_format = revlog_constants.INDEX_ENTRY_CL_V2
660 index_format = revlog_constants.INDEX_ENTRY_CL_V2
661
661
662 def _unpack_entry(self, rev, data, r=True):
662 def _unpack_entry(self, rev, data, r=True):
663 items = self.index_format.unpack(data)
663 items = self.index_format.unpack(data)
664 entry = items[:3] + (rev, rev) + items[3:8]
664 entry = items[:3] + (rev, rev) + items[3:8]
665 data_comp = items[8] & 3
665 data_comp = items[8] & 3
666 sidedata_comp = (items[8] >> 2) & 3
666 sidedata_comp = (items[8] >> 2) & 3
667 return entry + (data_comp, sidedata_comp)
667 return entry + (data_comp, sidedata_comp)
668
668
669 def _pack_entry(self, rev, entry):
669 def _pack_entry(self, rev, entry):
670 assert entry[3] == rev, entry[3]
670 assert entry[3] == rev, entry[3]
671 assert entry[4] == rev, entry[4]
671 assert entry[4] == rev, entry[4]
672 data = entry[:3] + entry[5:10]
672 data = entry[:3] + entry[5:10]
673 data_comp = entry[10] & 3
673 data_comp = entry[10] & 3
674 sidedata_comp = (entry[11] & 3) << 2
674 sidedata_comp = (entry[11] & 3) << 2
675 data += (data_comp | sidedata_comp,)
675 data += (data_comp | sidedata_comp,)
676 return self.index_format.pack(*data)
676 return self.index_format.pack(*data)
677
677
678
678
679 def parse_index_devel_nodemap(data, inline):
679 def parse_index_devel_nodemap(data, inline):
680 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
680 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
681 return PersistentNodeMapIndexObject(data), None
681 return PersistentNodeMapIndexObject(data), None
682
682
683
683
684 def parse_dirstate(dmap, copymap, st):
684 def parse_dirstate(dmap, copymap, st):
685 parents = [st[:20], st[20:40]]
685 parents = [st[:20], st[20:40]]
686 # dereference fields so they will be local in loop
686 # dereference fields so they will be local in loop
687 format = b">cllll"
687 format = b">cllll"
688 e_size = struct.calcsize(format)
688 e_size = struct.calcsize(format)
689 pos1 = 40
689 pos1 = 40
690 l = len(st)
690 l = len(st)
691
691
692 # the inner loop
692 # the inner loop
693 while pos1 < l:
693 while pos1 < l:
694 pos2 = pos1 + e_size
694 pos2 = pos1 + e_size
695 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
695 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
696 pos1 = pos2 + e[4]
696 pos1 = pos2 + e[4]
697 f = st[pos2:pos1]
697 f = st[pos2:pos1]
698 if b'\0' in f:
698 if b'\0' in f:
699 f, c = f.split(b'\0')
699 f, c = f.split(b'\0')
700 copymap[f] = c
700 copymap[f] = c
701 dmap[f] = DirstateItem.from_v1_data(*e[:4])
701 dmap[f] = DirstateItem.from_v1_data(*e[:4])
702 return parents
702 return parents
703
703
704
704
705 def pack_dirstate(dmap, copymap, pl, now):
705 def pack_dirstate(dmap, copymap, pl, now):
706 now = int(now)
706 now = int(now)
707 cs = stringio()
707 cs = stringio()
708 write = cs.write
708 write = cs.write
709 write(b"".join(pl))
709 write(b"".join(pl))
710 for f, e in pycompat.iteritems(dmap):
710 for f, e in pycompat.iteritems(dmap):
711 if e.need_delay(now):
711 if e.need_delay(now):
712 # The file was last modified "simultaneously" with the current
712 # The file was last modified "simultaneously" with the current
713 # write to dirstate (i.e. within the same second for file-
713 # write to dirstate (i.e. within the same second for file-
714 # systems with a granularity of 1 sec). This commonly happens
714 # systems with a granularity of 1 sec). This commonly happens
715 # for at least a couple of files on 'update'.
715 # for at least a couple of files on 'update'.
716 # The user could change the file without changing its size
716 # The user could change the file without changing its size
717 # within the same second. Invalidate the file's mtime in
717 # within the same second. Invalidate the file's mtime in
718 # dirstate, forcing future 'status' calls to compare the
718 # dirstate, forcing future 'status' calls to compare the
719 # contents of the file if the size is the same. This prevents
719 # contents of the file if the size is the same. This prevents
720 # mistakenly treating such files as clean.
720 # mistakenly treating such files as clean.
721 e.set_possibly_dirty()
721 e.set_possibly_dirty()
722
722
723 if f in copymap:
723 if f in copymap:
724 f = b"%s\0%s" % (f, copymap[f])
724 f = b"%s\0%s" % (f, copymap[f])
725 e = _pack(
725 e = _pack(
726 b">cllll",
726 b">cllll",
727 e.v1_state(),
727 e.v1_state(),
728 e.v1_mode(),
728 e.v1_mode(),
729 e.v1_size(),
729 e.v1_size(),
730 e.v1_mtime(),
730 e.v1_mtime(),
731 len(f),
731 len(f),
732 )
732 )
733 write(e)
733 write(e)
734 write(f)
734 write(f)
735 return cs.getvalue()
735 return cs.getvalue()
General Comments 0
You need to be logged in to leave comments. Login now