##// END OF EJS Templates
dirstate-item: replace a call to new_normal...
marmoute -
r48974:79ebbe19 default
parent child Browse files
Show More
@@ -1,743 +1,746 b''
1 # parsers.py - Python implementation of parsers.c
1 # parsers.py - Python implementation of parsers.c
2 #
2 #
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11 import zlib
11 import zlib
12
12
13 from ..node import (
13 from ..node import (
14 nullrev,
14 nullrev,
15 sha1nodeconstants,
15 sha1nodeconstants,
16 )
16 )
17 from ..thirdparty import attr
17 from ..thirdparty import attr
18 from .. import (
18 from .. import (
19 error,
19 error,
20 pycompat,
20 pycompat,
21 revlogutils,
21 revlogutils,
22 util,
22 util,
23 )
23 )
24
24
25 from ..revlogutils import nodemap as nodemaputil
25 from ..revlogutils import nodemap as nodemaputil
26 from ..revlogutils import constants as revlog_constants
26 from ..revlogutils import constants as revlog_constants
27
27
28 stringio = pycompat.bytesio
28 stringio = pycompat.bytesio
29
29
30
30
31 _pack = struct.pack
31 _pack = struct.pack
32 _unpack = struct.unpack
32 _unpack = struct.unpack
33 _compress = zlib.compress
33 _compress = zlib.compress
34 _decompress = zlib.decompress
34 _decompress = zlib.decompress
35
35
36
36
37 # a special value used internally for `size` if the file come from the other parent
37 # a special value used internally for `size` if the file come from the other parent
38 FROM_P2 = -2
38 FROM_P2 = -2
39
39
40 # a special value used internally for `size` if the file is modified/merged/added
40 # a special value used internally for `size` if the file is modified/merged/added
41 NONNORMAL = -1
41 NONNORMAL = -1
42
42
43 # a special value used internally for `time` if the time is ambigeous
43 # a special value used internally for `time` if the time is ambigeous
44 AMBIGUOUS_TIME = -1
44 AMBIGUOUS_TIME = -1
45
45
46
46
47 @attr.s(slots=True, init=False)
47 @attr.s(slots=True, init=False)
48 class DirstateItem(object):
48 class DirstateItem(object):
49 """represent a dirstate entry
49 """represent a dirstate entry
50
50
51 It hold multiple attributes
51 It hold multiple attributes
52
52
53 # about file tracking
53 # about file tracking
54 - wc_tracked: is the file tracked by the working copy
54 - wc_tracked: is the file tracked by the working copy
55 - p1_tracked: is the file tracked in working copy first parent
55 - p1_tracked: is the file tracked in working copy first parent
56 - p2_info: the file has been involved in some merge operation. Either
56 - p2_info: the file has been involved in some merge operation. Either
57 because it was actually merged, or because the p2 version was
57 because it was actually merged, or because the p2 version was
58 ahead, or because some renamed moved it there. In either case
58 ahead, or because some renamed moved it there. In either case
59 `hg status` will want it displayed as modified.
59 `hg status` will want it displayed as modified.
60
60
61 # about the file state expected from p1 manifest:
61 # about the file state expected from p1 manifest:
62 - mode: the file mode in p1
62 - mode: the file mode in p1
63 - size: the file size in p1
63 - size: the file size in p1
64
64
65 These value can be set to None, which mean we don't have a meaningful value
65 These value can be set to None, which mean we don't have a meaningful value
66 to compare with. Either because we don't really care about them as there
66 to compare with. Either because we don't really care about them as there
67 `status` is known without having to look at the disk or because we don't
67 `status` is known without having to look at the disk or because we don't
68 know these right now and a full comparison will be needed to find out if
68 know these right now and a full comparison will be needed to find out if
69 the file is clean.
69 the file is clean.
70
70
71 # about the file state on disk last time we saw it:
71 # about the file state on disk last time we saw it:
72 - mtime: the last known clean mtime for the file.
72 - mtime: the last known clean mtime for the file.
73
73
74 This value can be set to None if no cachable state exist. Either because we
74 This value can be set to None if no cachable state exist. Either because we
75 do not care (see previous section) or because we could not cache something
75 do not care (see previous section) or because we could not cache something
76 yet.
76 yet.
77 """
77 """
78
78
79 _wc_tracked = attr.ib()
79 _wc_tracked = attr.ib()
80 _p1_tracked = attr.ib()
80 _p1_tracked = attr.ib()
81 _p2_info = attr.ib()
81 _p2_info = attr.ib()
82 _mode = attr.ib()
82 _mode = attr.ib()
83 _size = attr.ib()
83 _size = attr.ib()
84 _mtime = attr.ib()
84 _mtime = attr.ib()
85
85
86 def __init__(
86 def __init__(
87 self,
87 self,
88 wc_tracked=False,
88 wc_tracked=False,
89 p1_tracked=False,
89 p1_tracked=False,
90 p2_info=False,
90 p2_info=False,
91 has_meaningful_data=True,
91 has_meaningful_data=True,
92 has_meaningful_mtime=True,
92 has_meaningful_mtime=True,
93 parentfiledata=None,
93 parentfiledata=None,
94 ):
94 ):
95 self._wc_tracked = wc_tracked
95 self._wc_tracked = wc_tracked
96 self._p1_tracked = p1_tracked
96 self._p1_tracked = p1_tracked
97 self._p2_info = p2_info
97 self._p2_info = p2_info
98
98
99 self._mode = None
99 self._mode = None
100 self._size = None
100 self._size = None
101 self._mtime = None
101 self._mtime = None
102 if parentfiledata is None:
102 if parentfiledata is None:
103 has_meaningful_mtime = False
103 has_meaningful_mtime = False
104 has_meaningful_data = False
104 has_meaningful_data = False
105 if has_meaningful_data:
105 if has_meaningful_data:
106 self._mode = parentfiledata[0]
106 self._mode = parentfiledata[0]
107 self._size = parentfiledata[1]
107 self._size = parentfiledata[1]
108 if has_meaningful_mtime:
108 if has_meaningful_mtime:
109 self._mtime = parentfiledata[2]
109 self._mtime = parentfiledata[2]
110
110
111 @classmethod
111 @classmethod
112 def new_normal(cls, mode, size, mtime):
112 def new_normal(cls, mode, size, mtime):
113 """constructor to help legacy API to build a new "normal" item
113 """constructor to help legacy API to build a new "normal" item
114
114
115 Should eventually be removed
115 Should eventually be removed
116 """
116 """
117 assert size != FROM_P2
117 assert size != FROM_P2
118 assert size != NONNORMAL
118 assert size != NONNORMAL
119 return cls(
119 return cls(
120 wc_tracked=True,
120 wc_tracked=True,
121 p1_tracked=True,
121 p1_tracked=True,
122 parentfiledata=(mode, size, mtime),
122 parentfiledata=(mode, size, mtime),
123 )
123 )
124
124
125 @classmethod
125 @classmethod
126 def from_v1_data(cls, state, mode, size, mtime):
126 def from_v1_data(cls, state, mode, size, mtime):
127 """Build a new DirstateItem object from V1 data
127 """Build a new DirstateItem object from V1 data
128
128
129 Since the dirstate-v1 format is frozen, the signature of this function
129 Since the dirstate-v1 format is frozen, the signature of this function
130 is not expected to change, unlike the __init__ one.
130 is not expected to change, unlike the __init__ one.
131 """
131 """
132 if state == b'm':
132 if state == b'm':
133 return cls(wc_tracked=True, p1_tracked=True, p2_info=True)
133 return cls(wc_tracked=True, p1_tracked=True, p2_info=True)
134 elif state == b'a':
134 elif state == b'a':
135 return cls(wc_tracked=True)
135 return cls(wc_tracked=True)
136 elif state == b'r':
136 elif state == b'r':
137 if size == NONNORMAL:
137 if size == NONNORMAL:
138 p1_tracked = True
138 p1_tracked = True
139 p2_info = True
139 p2_info = True
140 elif size == FROM_P2:
140 elif size == FROM_P2:
141 p1_tracked = False
141 p1_tracked = False
142 p2_info = True
142 p2_info = True
143 else:
143 else:
144 p1_tracked = True
144 p1_tracked = True
145 p2_info = False
145 p2_info = False
146 return cls(p1_tracked=p1_tracked, p2_info=p2_info)
146 return cls(p1_tracked=p1_tracked, p2_info=p2_info)
147 elif state == b'n':
147 elif state == b'n':
148 if size == FROM_P2:
148 if size == FROM_P2:
149 return cls(wc_tracked=True, p2_info=True)
149 return cls(wc_tracked=True, p2_info=True)
150 elif size == NONNORMAL:
150 elif size == NONNORMAL:
151 return cls(wc_tracked=True, p1_tracked=True)
151 return cls(wc_tracked=True, p1_tracked=True)
152 elif mtime == AMBIGUOUS_TIME:
152 elif mtime == AMBIGUOUS_TIME:
153 instance = cls.new_normal(mode, size, 42)
153 return cls(
154 instance._mtime = None
154 wc_tracked=True,
155 return instance
155 p1_tracked=True,
156 has_meaningful_mtime=False,
157 parentfiledata=(mode, size, 42),
158 )
156 else:
159 else:
157 return cls.new_normal(mode, size, mtime)
160 return cls.new_normal(mode, size, mtime)
158 else:
161 else:
159 raise RuntimeError(b'unknown state: %s' % state)
162 raise RuntimeError(b'unknown state: %s' % state)
160
163
161 def set_possibly_dirty(self):
164 def set_possibly_dirty(self):
162 """Mark a file as "possibly dirty"
165 """Mark a file as "possibly dirty"
163
166
164 This means the next status call will have to actually check its content
167 This means the next status call will have to actually check its content
165 to make sure it is correct.
168 to make sure it is correct.
166 """
169 """
167 self._mtime = None
170 self._mtime = None
168
171
169 def set_clean(self, mode, size, mtime):
172 def set_clean(self, mode, size, mtime):
170 """mark a file as "clean" cancelling potential "possibly dirty call"
173 """mark a file as "clean" cancelling potential "possibly dirty call"
171
174
172 Note: this function is a descendant of `dirstate.normal` and is
175 Note: this function is a descendant of `dirstate.normal` and is
173 currently expected to be call on "normal" entry only. There are not
176 currently expected to be call on "normal" entry only. There are not
174 reason for this to not change in the future as long as the ccode is
177 reason for this to not change in the future as long as the ccode is
175 updated to preserve the proper state of the non-normal files.
178 updated to preserve the proper state of the non-normal files.
176 """
179 """
177 self._wc_tracked = True
180 self._wc_tracked = True
178 self._p1_tracked = True
181 self._p1_tracked = True
179 self._mode = mode
182 self._mode = mode
180 self._size = size
183 self._size = size
181 self._mtime = mtime
184 self._mtime = mtime
182
185
183 def set_tracked(self):
186 def set_tracked(self):
184 """mark a file as tracked in the working copy
187 """mark a file as tracked in the working copy
185
188
186 This will ultimately be called by command like `hg add`.
189 This will ultimately be called by command like `hg add`.
187 """
190 """
188 self._wc_tracked = True
191 self._wc_tracked = True
189 # `set_tracked` is replacing various `normallookup` call. So we mark
192 # `set_tracked` is replacing various `normallookup` call. So we mark
190 # the files as needing lookup
193 # the files as needing lookup
191 #
194 #
192 # Consider dropping this in the future in favor of something less broad.
195 # Consider dropping this in the future in favor of something less broad.
193 self._mtime = None
196 self._mtime = None
194
197
195 def set_untracked(self):
198 def set_untracked(self):
196 """mark a file as untracked in the working copy
199 """mark a file as untracked in the working copy
197
200
198 This will ultimately be called by command like `hg remove`.
201 This will ultimately be called by command like `hg remove`.
199 """
202 """
200 self._wc_tracked = False
203 self._wc_tracked = False
201 self._mode = None
204 self._mode = None
202 self._size = None
205 self._size = None
203 self._mtime = None
206 self._mtime = None
204
207
205 def drop_merge_data(self):
208 def drop_merge_data(self):
206 """remove all "merge-only" from a DirstateItem
209 """remove all "merge-only" from a DirstateItem
207
210
208 This is to be call by the dirstatemap code when the second parent is dropped
211 This is to be call by the dirstatemap code when the second parent is dropped
209 """
212 """
210 if self._p2_info:
213 if self._p2_info:
211 self._p2_info = False
214 self._p2_info = False
212 self._mode = None
215 self._mode = None
213 self._size = None
216 self._size = None
214 self._mtime = None
217 self._mtime = None
215
218
216 @property
219 @property
217 def mode(self):
220 def mode(self):
218 return self.v1_mode()
221 return self.v1_mode()
219
222
220 @property
223 @property
221 def size(self):
224 def size(self):
222 return self.v1_size()
225 return self.v1_size()
223
226
224 @property
227 @property
225 def mtime(self):
228 def mtime(self):
226 return self.v1_mtime()
229 return self.v1_mtime()
227
230
228 @property
231 @property
229 def state(self):
232 def state(self):
230 """
233 """
231 States are:
234 States are:
232 n normal
235 n normal
233 m needs merging
236 m needs merging
234 r marked for removal
237 r marked for removal
235 a marked for addition
238 a marked for addition
236
239
237 XXX This "state" is a bit obscure and mostly a direct expression of the
240 XXX This "state" is a bit obscure and mostly a direct expression of the
238 dirstatev1 format. It would make sense to ultimately deprecate it in
241 dirstatev1 format. It would make sense to ultimately deprecate it in
239 favor of the more "semantic" attributes.
242 favor of the more "semantic" attributes.
240 """
243 """
241 if not self.any_tracked:
244 if not self.any_tracked:
242 return b'?'
245 return b'?'
243 return self.v1_state()
246 return self.v1_state()
244
247
245 @property
248 @property
246 def tracked(self):
249 def tracked(self):
247 """True is the file is tracked in the working copy"""
250 """True is the file is tracked in the working copy"""
248 return self._wc_tracked
251 return self._wc_tracked
249
252
250 @property
253 @property
251 def any_tracked(self):
254 def any_tracked(self):
252 """True is the file is tracked anywhere (wc or parents)"""
255 """True is the file is tracked anywhere (wc or parents)"""
253 return self._wc_tracked or self._p1_tracked or self._p2_info
256 return self._wc_tracked or self._p1_tracked or self._p2_info
254
257
255 @property
258 @property
256 def added(self):
259 def added(self):
257 """True if the file has been added"""
260 """True if the file has been added"""
258 return self._wc_tracked and not (self._p1_tracked or self._p2_info)
261 return self._wc_tracked and not (self._p1_tracked or self._p2_info)
259
262
260 @property
263 @property
261 def maybe_clean(self):
264 def maybe_clean(self):
262 """True if the file has a chance to be in the "clean" state"""
265 """True if the file has a chance to be in the "clean" state"""
263 if not self._wc_tracked:
266 if not self._wc_tracked:
264 return False
267 return False
265 elif not self._p1_tracked:
268 elif not self._p1_tracked:
266 return False
269 return False
267 elif self._p2_info:
270 elif self._p2_info:
268 return False
271 return False
269 return True
272 return True
270
273
271 @property
274 @property
272 def p1_tracked(self):
275 def p1_tracked(self):
273 """True if the file is tracked in the first parent manifest"""
276 """True if the file is tracked in the first parent manifest"""
274 return self._p1_tracked
277 return self._p1_tracked
275
278
276 @property
279 @property
277 def p2_info(self):
280 def p2_info(self):
278 """True if the file needed to merge or apply any input from p2
281 """True if the file needed to merge or apply any input from p2
279
282
280 See the class documentation for details.
283 See the class documentation for details.
281 """
284 """
282 return self._wc_tracked and self._p2_info
285 return self._wc_tracked and self._p2_info
283
286
284 @property
287 @property
285 def removed(self):
288 def removed(self):
286 """True if the file has been removed"""
289 """True if the file has been removed"""
287 return not self._wc_tracked and (self._p1_tracked or self._p2_info)
290 return not self._wc_tracked and (self._p1_tracked or self._p2_info)
288
291
289 def v1_state(self):
292 def v1_state(self):
290 """return a "state" suitable for v1 serialization"""
293 """return a "state" suitable for v1 serialization"""
291 if not self.any_tracked:
294 if not self.any_tracked:
292 # the object has no state to record, this is -currently-
295 # the object has no state to record, this is -currently-
293 # unsupported
296 # unsupported
294 raise RuntimeError('untracked item')
297 raise RuntimeError('untracked item')
295 elif self.removed:
298 elif self.removed:
296 return b'r'
299 return b'r'
297 elif self._p1_tracked and self._p2_info:
300 elif self._p1_tracked and self._p2_info:
298 return b'm'
301 return b'm'
299 elif self.added:
302 elif self.added:
300 return b'a'
303 return b'a'
301 else:
304 else:
302 return b'n'
305 return b'n'
303
306
304 def v1_mode(self):
307 def v1_mode(self):
305 """return a "mode" suitable for v1 serialization"""
308 """return a "mode" suitable for v1 serialization"""
306 return self._mode if self._mode is not None else 0
309 return self._mode if self._mode is not None else 0
307
310
308 def v1_size(self):
311 def v1_size(self):
309 """return a "size" suitable for v1 serialization"""
312 """return a "size" suitable for v1 serialization"""
310 if not self.any_tracked:
313 if not self.any_tracked:
311 # the object has no state to record, this is -currently-
314 # the object has no state to record, this is -currently-
312 # unsupported
315 # unsupported
313 raise RuntimeError('untracked item')
316 raise RuntimeError('untracked item')
314 elif self.removed and self._p1_tracked and self._p2_info:
317 elif self.removed and self._p1_tracked and self._p2_info:
315 return NONNORMAL
318 return NONNORMAL
316 elif self._p2_info:
319 elif self._p2_info:
317 return FROM_P2
320 return FROM_P2
318 elif self.removed:
321 elif self.removed:
319 return 0
322 return 0
320 elif self.added:
323 elif self.added:
321 return NONNORMAL
324 return NONNORMAL
322 elif self._size is None:
325 elif self._size is None:
323 return NONNORMAL
326 return NONNORMAL
324 else:
327 else:
325 return self._size
328 return self._size
326
329
327 def v1_mtime(self):
330 def v1_mtime(self):
328 """return a "mtime" suitable for v1 serialization"""
331 """return a "mtime" suitable for v1 serialization"""
329 if not self.any_tracked:
332 if not self.any_tracked:
330 # the object has no state to record, this is -currently-
333 # the object has no state to record, this is -currently-
331 # unsupported
334 # unsupported
332 raise RuntimeError('untracked item')
335 raise RuntimeError('untracked item')
333 elif self.removed:
336 elif self.removed:
334 return 0
337 return 0
335 elif self._mtime is None:
338 elif self._mtime is None:
336 return AMBIGUOUS_TIME
339 return AMBIGUOUS_TIME
337 elif self._p2_info:
340 elif self._p2_info:
338 return AMBIGUOUS_TIME
341 return AMBIGUOUS_TIME
339 elif not self._p1_tracked:
342 elif not self._p1_tracked:
340 return AMBIGUOUS_TIME
343 return AMBIGUOUS_TIME
341 else:
344 else:
342 return self._mtime
345 return self._mtime
343
346
344 def need_delay(self, now):
347 def need_delay(self, now):
345 """True if the stored mtime would be ambiguous with the current time"""
348 """True if the stored mtime would be ambiguous with the current time"""
346 return self.v1_state() == b'n' and self.v1_mtime() == now
349 return self.v1_state() == b'n' and self.v1_mtime() == now
347
350
348
351
349 def gettype(q):
352 def gettype(q):
350 return int(q & 0xFFFF)
353 return int(q & 0xFFFF)
351
354
352
355
353 class BaseIndexObject(object):
356 class BaseIndexObject(object):
354 # Can I be passed to an algorithme implemented in Rust ?
357 # Can I be passed to an algorithme implemented in Rust ?
355 rust_ext_compat = 0
358 rust_ext_compat = 0
356 # Format of an index entry according to Python's `struct` language
359 # Format of an index entry according to Python's `struct` language
357 index_format = revlog_constants.INDEX_ENTRY_V1
360 index_format = revlog_constants.INDEX_ENTRY_V1
358 # Size of a C unsigned long long int, platform independent
361 # Size of a C unsigned long long int, platform independent
359 big_int_size = struct.calcsize(b'>Q')
362 big_int_size = struct.calcsize(b'>Q')
360 # Size of a C long int, platform independent
363 # Size of a C long int, platform independent
361 int_size = struct.calcsize(b'>i')
364 int_size = struct.calcsize(b'>i')
362 # An empty index entry, used as a default value to be overridden, or nullrev
365 # An empty index entry, used as a default value to be overridden, or nullrev
363 null_item = (
366 null_item = (
364 0,
367 0,
365 0,
368 0,
366 0,
369 0,
367 -1,
370 -1,
368 -1,
371 -1,
369 -1,
372 -1,
370 -1,
373 -1,
371 sha1nodeconstants.nullid,
374 sha1nodeconstants.nullid,
372 0,
375 0,
373 0,
376 0,
374 revlog_constants.COMP_MODE_INLINE,
377 revlog_constants.COMP_MODE_INLINE,
375 revlog_constants.COMP_MODE_INLINE,
378 revlog_constants.COMP_MODE_INLINE,
376 )
379 )
377
380
378 @util.propertycache
381 @util.propertycache
379 def entry_size(self):
382 def entry_size(self):
380 return self.index_format.size
383 return self.index_format.size
381
384
382 @property
385 @property
383 def nodemap(self):
386 def nodemap(self):
384 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
387 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
385 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
388 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
386 return self._nodemap
389 return self._nodemap
387
390
388 @util.propertycache
391 @util.propertycache
389 def _nodemap(self):
392 def _nodemap(self):
390 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
393 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
391 for r in range(0, len(self)):
394 for r in range(0, len(self)):
392 n = self[r][7]
395 n = self[r][7]
393 nodemap[n] = r
396 nodemap[n] = r
394 return nodemap
397 return nodemap
395
398
396 def has_node(self, node):
399 def has_node(self, node):
397 """return True if the node exist in the index"""
400 """return True if the node exist in the index"""
398 return node in self._nodemap
401 return node in self._nodemap
399
402
400 def rev(self, node):
403 def rev(self, node):
401 """return a revision for a node
404 """return a revision for a node
402
405
403 If the node is unknown, raise a RevlogError"""
406 If the node is unknown, raise a RevlogError"""
404 return self._nodemap[node]
407 return self._nodemap[node]
405
408
406 def get_rev(self, node):
409 def get_rev(self, node):
407 """return a revision for a node
410 """return a revision for a node
408
411
409 If the node is unknown, return None"""
412 If the node is unknown, return None"""
410 return self._nodemap.get(node)
413 return self._nodemap.get(node)
411
414
412 def _stripnodes(self, start):
415 def _stripnodes(self, start):
413 if '_nodemap' in vars(self):
416 if '_nodemap' in vars(self):
414 for r in range(start, len(self)):
417 for r in range(start, len(self)):
415 n = self[r][7]
418 n = self[r][7]
416 del self._nodemap[n]
419 del self._nodemap[n]
417
420
418 def clearcaches(self):
421 def clearcaches(self):
419 self.__dict__.pop('_nodemap', None)
422 self.__dict__.pop('_nodemap', None)
420
423
421 def __len__(self):
424 def __len__(self):
422 return self._lgt + len(self._extra)
425 return self._lgt + len(self._extra)
423
426
424 def append(self, tup):
427 def append(self, tup):
425 if '_nodemap' in vars(self):
428 if '_nodemap' in vars(self):
426 self._nodemap[tup[7]] = len(self)
429 self._nodemap[tup[7]] = len(self)
427 data = self._pack_entry(len(self), tup)
430 data = self._pack_entry(len(self), tup)
428 self._extra.append(data)
431 self._extra.append(data)
429
432
430 def _pack_entry(self, rev, entry):
433 def _pack_entry(self, rev, entry):
431 assert entry[8] == 0
434 assert entry[8] == 0
432 assert entry[9] == 0
435 assert entry[9] == 0
433 return self.index_format.pack(*entry[:8])
436 return self.index_format.pack(*entry[:8])
434
437
435 def _check_index(self, i):
438 def _check_index(self, i):
436 if not isinstance(i, int):
439 if not isinstance(i, int):
437 raise TypeError(b"expecting int indexes")
440 raise TypeError(b"expecting int indexes")
438 if i < 0 or i >= len(self):
441 if i < 0 or i >= len(self):
439 raise IndexError
442 raise IndexError
440
443
441 def __getitem__(self, i):
444 def __getitem__(self, i):
442 if i == -1:
445 if i == -1:
443 return self.null_item
446 return self.null_item
444 self._check_index(i)
447 self._check_index(i)
445 if i >= self._lgt:
448 if i >= self._lgt:
446 data = self._extra[i - self._lgt]
449 data = self._extra[i - self._lgt]
447 else:
450 else:
448 index = self._calculate_index(i)
451 index = self._calculate_index(i)
449 data = self._data[index : index + self.entry_size]
452 data = self._data[index : index + self.entry_size]
450 r = self._unpack_entry(i, data)
453 r = self._unpack_entry(i, data)
451 if self._lgt and i == 0:
454 if self._lgt and i == 0:
452 offset = revlogutils.offset_type(0, gettype(r[0]))
455 offset = revlogutils.offset_type(0, gettype(r[0]))
453 r = (offset,) + r[1:]
456 r = (offset,) + r[1:]
454 return r
457 return r
455
458
456 def _unpack_entry(self, rev, data):
459 def _unpack_entry(self, rev, data):
457 r = self.index_format.unpack(data)
460 r = self.index_format.unpack(data)
458 r = r + (
461 r = r + (
459 0,
462 0,
460 0,
463 0,
461 revlog_constants.COMP_MODE_INLINE,
464 revlog_constants.COMP_MODE_INLINE,
462 revlog_constants.COMP_MODE_INLINE,
465 revlog_constants.COMP_MODE_INLINE,
463 )
466 )
464 return r
467 return r
465
468
466 def pack_header(self, header):
469 def pack_header(self, header):
467 """pack header information as binary"""
470 """pack header information as binary"""
468 v_fmt = revlog_constants.INDEX_HEADER
471 v_fmt = revlog_constants.INDEX_HEADER
469 return v_fmt.pack(header)
472 return v_fmt.pack(header)
470
473
471 def entry_binary(self, rev):
474 def entry_binary(self, rev):
472 """return the raw binary string representing a revision"""
475 """return the raw binary string representing a revision"""
473 entry = self[rev]
476 entry = self[rev]
474 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
477 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
475 if rev == 0:
478 if rev == 0:
476 p = p[revlog_constants.INDEX_HEADER.size :]
479 p = p[revlog_constants.INDEX_HEADER.size :]
477 return p
480 return p
478
481
479
482
480 class IndexObject(BaseIndexObject):
483 class IndexObject(BaseIndexObject):
481 def __init__(self, data):
484 def __init__(self, data):
482 assert len(data) % self.entry_size == 0, (
485 assert len(data) % self.entry_size == 0, (
483 len(data),
486 len(data),
484 self.entry_size,
487 self.entry_size,
485 len(data) % self.entry_size,
488 len(data) % self.entry_size,
486 )
489 )
487 self._data = data
490 self._data = data
488 self._lgt = len(data) // self.entry_size
491 self._lgt = len(data) // self.entry_size
489 self._extra = []
492 self._extra = []
490
493
491 def _calculate_index(self, i):
494 def _calculate_index(self, i):
492 return i * self.entry_size
495 return i * self.entry_size
493
496
494 def __delitem__(self, i):
497 def __delitem__(self, i):
495 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
498 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
496 raise ValueError(b"deleting slices only supports a:-1 with step 1")
499 raise ValueError(b"deleting slices only supports a:-1 with step 1")
497 i = i.start
500 i = i.start
498 self._check_index(i)
501 self._check_index(i)
499 self._stripnodes(i)
502 self._stripnodes(i)
500 if i < self._lgt:
503 if i < self._lgt:
501 self._data = self._data[: i * self.entry_size]
504 self._data = self._data[: i * self.entry_size]
502 self._lgt = i
505 self._lgt = i
503 self._extra = []
506 self._extra = []
504 else:
507 else:
505 self._extra = self._extra[: i - self._lgt]
508 self._extra = self._extra[: i - self._lgt]
506
509
507
510
508 class PersistentNodeMapIndexObject(IndexObject):
511 class PersistentNodeMapIndexObject(IndexObject):
509 """a Debug oriented class to test persistent nodemap
512 """a Debug oriented class to test persistent nodemap
510
513
511 We need a simple python object to test API and higher level behavior. See
514 We need a simple python object to test API and higher level behavior. See
512 the Rust implementation for more serious usage. This should be used only
515 the Rust implementation for more serious usage. This should be used only
513 through the dedicated `devel.persistent-nodemap` config.
516 through the dedicated `devel.persistent-nodemap` config.
514 """
517 """
515
518
516 def nodemap_data_all(self):
519 def nodemap_data_all(self):
517 """Return bytes containing a full serialization of a nodemap
520 """Return bytes containing a full serialization of a nodemap
518
521
519 The nodemap should be valid for the full set of revisions in the
522 The nodemap should be valid for the full set of revisions in the
520 index."""
523 index."""
521 return nodemaputil.persistent_data(self)
524 return nodemaputil.persistent_data(self)
522
525
523 def nodemap_data_incremental(self):
526 def nodemap_data_incremental(self):
524 """Return bytes containing a incremental update to persistent nodemap
527 """Return bytes containing a incremental update to persistent nodemap
525
528
526 This containst the data for an append-only update of the data provided
529 This containst the data for an append-only update of the data provided
527 in the last call to `update_nodemap_data`.
530 in the last call to `update_nodemap_data`.
528 """
531 """
529 if self._nm_root is None:
532 if self._nm_root is None:
530 return None
533 return None
531 docket = self._nm_docket
534 docket = self._nm_docket
532 changed, data = nodemaputil.update_persistent_data(
535 changed, data = nodemaputil.update_persistent_data(
533 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
536 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
534 )
537 )
535
538
536 self._nm_root = self._nm_max_idx = self._nm_docket = None
539 self._nm_root = self._nm_max_idx = self._nm_docket = None
537 return docket, changed, data
540 return docket, changed, data
538
541
539 def update_nodemap_data(self, docket, nm_data):
542 def update_nodemap_data(self, docket, nm_data):
540 """provide full block of persisted binary data for a nodemap
543 """provide full block of persisted binary data for a nodemap
541
544
542 The data are expected to come from disk. See `nodemap_data_all` for a
545 The data are expected to come from disk. See `nodemap_data_all` for a
543 produceur of such data."""
546 produceur of such data."""
544 if nm_data is not None:
547 if nm_data is not None:
545 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
548 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
546 if self._nm_root:
549 if self._nm_root:
547 self._nm_docket = docket
550 self._nm_docket = docket
548 else:
551 else:
549 self._nm_root = self._nm_max_idx = self._nm_docket = None
552 self._nm_root = self._nm_max_idx = self._nm_docket = None
550
553
551
554
552 class InlinedIndexObject(BaseIndexObject):
555 class InlinedIndexObject(BaseIndexObject):
553 def __init__(self, data, inline=0):
556 def __init__(self, data, inline=0):
554 self._data = data
557 self._data = data
555 self._lgt = self._inline_scan(None)
558 self._lgt = self._inline_scan(None)
556 self._inline_scan(self._lgt)
559 self._inline_scan(self._lgt)
557 self._extra = []
560 self._extra = []
558
561
559 def _inline_scan(self, lgt):
562 def _inline_scan(self, lgt):
560 off = 0
563 off = 0
561 if lgt is not None:
564 if lgt is not None:
562 self._offsets = [0] * lgt
565 self._offsets = [0] * lgt
563 count = 0
566 count = 0
564 while off <= len(self._data) - self.entry_size:
567 while off <= len(self._data) - self.entry_size:
565 start = off + self.big_int_size
568 start = off + self.big_int_size
566 (s,) = struct.unpack(
569 (s,) = struct.unpack(
567 b'>i',
570 b'>i',
568 self._data[start : start + self.int_size],
571 self._data[start : start + self.int_size],
569 )
572 )
570 if lgt is not None:
573 if lgt is not None:
571 self._offsets[count] = off
574 self._offsets[count] = off
572 count += 1
575 count += 1
573 off += self.entry_size + s
576 off += self.entry_size + s
574 if off != len(self._data):
577 if off != len(self._data):
575 raise ValueError(b"corrupted data")
578 raise ValueError(b"corrupted data")
576 return count
579 return count
577
580
578 def __delitem__(self, i):
581 def __delitem__(self, i):
579 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
582 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
580 raise ValueError(b"deleting slices only supports a:-1 with step 1")
583 raise ValueError(b"deleting slices only supports a:-1 with step 1")
581 i = i.start
584 i = i.start
582 self._check_index(i)
585 self._check_index(i)
583 self._stripnodes(i)
586 self._stripnodes(i)
584 if i < self._lgt:
587 if i < self._lgt:
585 self._offsets = self._offsets[:i]
588 self._offsets = self._offsets[:i]
586 self._lgt = i
589 self._lgt = i
587 self._extra = []
590 self._extra = []
588 else:
591 else:
589 self._extra = self._extra[: i - self._lgt]
592 self._extra = self._extra[: i - self._lgt]
590
593
591 def _calculate_index(self, i):
594 def _calculate_index(self, i):
592 return self._offsets[i]
595 return self._offsets[i]
593
596
594
597
595 def parse_index2(data, inline, revlogv2=False):
598 def parse_index2(data, inline, revlogv2=False):
596 if not inline:
599 if not inline:
597 cls = IndexObject2 if revlogv2 else IndexObject
600 cls = IndexObject2 if revlogv2 else IndexObject
598 return cls(data), None
601 return cls(data), None
599 cls = InlinedIndexObject
602 cls = InlinedIndexObject
600 return cls(data, inline), (0, data)
603 return cls(data, inline), (0, data)
601
604
602
605
603 def parse_index_cl_v2(data):
606 def parse_index_cl_v2(data):
604 return IndexChangelogV2(data), None
607 return IndexChangelogV2(data), None
605
608
606
609
607 class IndexObject2(IndexObject):
610 class IndexObject2(IndexObject):
608 index_format = revlog_constants.INDEX_ENTRY_V2
611 index_format = revlog_constants.INDEX_ENTRY_V2
609
612
610 def replace_sidedata_info(
613 def replace_sidedata_info(
611 self,
614 self,
612 rev,
615 rev,
613 sidedata_offset,
616 sidedata_offset,
614 sidedata_length,
617 sidedata_length,
615 offset_flags,
618 offset_flags,
616 compression_mode,
619 compression_mode,
617 ):
620 ):
618 """
621 """
619 Replace an existing index entry's sidedata offset and length with new
622 Replace an existing index entry's sidedata offset and length with new
620 ones.
623 ones.
621 This cannot be used outside of the context of sidedata rewriting,
624 This cannot be used outside of the context of sidedata rewriting,
622 inside the transaction that creates the revision `rev`.
625 inside the transaction that creates the revision `rev`.
623 """
626 """
624 if rev < 0:
627 if rev < 0:
625 raise KeyError
628 raise KeyError
626 self._check_index(rev)
629 self._check_index(rev)
627 if rev < self._lgt:
630 if rev < self._lgt:
628 msg = b"cannot rewrite entries outside of this transaction"
631 msg = b"cannot rewrite entries outside of this transaction"
629 raise KeyError(msg)
632 raise KeyError(msg)
630 else:
633 else:
631 entry = list(self[rev])
634 entry = list(self[rev])
632 entry[0] = offset_flags
635 entry[0] = offset_flags
633 entry[8] = sidedata_offset
636 entry[8] = sidedata_offset
634 entry[9] = sidedata_length
637 entry[9] = sidedata_length
635 entry[11] = compression_mode
638 entry[11] = compression_mode
636 entry = tuple(entry)
639 entry = tuple(entry)
637 new = self._pack_entry(rev, entry)
640 new = self._pack_entry(rev, entry)
638 self._extra[rev - self._lgt] = new
641 self._extra[rev - self._lgt] = new
639
642
640 def _unpack_entry(self, rev, data):
643 def _unpack_entry(self, rev, data):
641 data = self.index_format.unpack(data)
644 data = self.index_format.unpack(data)
642 entry = data[:10]
645 entry = data[:10]
643 data_comp = data[10] & 3
646 data_comp = data[10] & 3
644 sidedata_comp = (data[10] & (3 << 2)) >> 2
647 sidedata_comp = (data[10] & (3 << 2)) >> 2
645 return entry + (data_comp, sidedata_comp)
648 return entry + (data_comp, sidedata_comp)
646
649
647 def _pack_entry(self, rev, entry):
650 def _pack_entry(self, rev, entry):
648 data = entry[:10]
651 data = entry[:10]
649 data_comp = entry[10] & 3
652 data_comp = entry[10] & 3
650 sidedata_comp = (entry[11] & 3) << 2
653 sidedata_comp = (entry[11] & 3) << 2
651 data += (data_comp | sidedata_comp,)
654 data += (data_comp | sidedata_comp,)
652
655
653 return self.index_format.pack(*data)
656 return self.index_format.pack(*data)
654
657
655 def entry_binary(self, rev):
658 def entry_binary(self, rev):
656 """return the raw binary string representing a revision"""
659 """return the raw binary string representing a revision"""
657 entry = self[rev]
660 entry = self[rev]
658 return self._pack_entry(rev, entry)
661 return self._pack_entry(rev, entry)
659
662
660 def pack_header(self, header):
663 def pack_header(self, header):
661 """pack header information as binary"""
664 """pack header information as binary"""
662 msg = 'version header should go in the docket, not the index: %d'
665 msg = 'version header should go in the docket, not the index: %d'
663 msg %= header
666 msg %= header
664 raise error.ProgrammingError(msg)
667 raise error.ProgrammingError(msg)
665
668
666
669
667 class IndexChangelogV2(IndexObject2):
670 class IndexChangelogV2(IndexObject2):
668 index_format = revlog_constants.INDEX_ENTRY_CL_V2
671 index_format = revlog_constants.INDEX_ENTRY_CL_V2
669
672
670 def _unpack_entry(self, rev, data, r=True):
673 def _unpack_entry(self, rev, data, r=True):
671 items = self.index_format.unpack(data)
674 items = self.index_format.unpack(data)
672 entry = items[:3] + (rev, rev) + items[3:8]
675 entry = items[:3] + (rev, rev) + items[3:8]
673 data_comp = items[8] & 3
676 data_comp = items[8] & 3
674 sidedata_comp = (items[8] >> 2) & 3
677 sidedata_comp = (items[8] >> 2) & 3
675 return entry + (data_comp, sidedata_comp)
678 return entry + (data_comp, sidedata_comp)
676
679
677 def _pack_entry(self, rev, entry):
680 def _pack_entry(self, rev, entry):
678 assert entry[3] == rev, entry[3]
681 assert entry[3] == rev, entry[3]
679 assert entry[4] == rev, entry[4]
682 assert entry[4] == rev, entry[4]
680 data = entry[:3] + entry[5:10]
683 data = entry[:3] + entry[5:10]
681 data_comp = entry[10] & 3
684 data_comp = entry[10] & 3
682 sidedata_comp = (entry[11] & 3) << 2
685 sidedata_comp = (entry[11] & 3) << 2
683 data += (data_comp | sidedata_comp,)
686 data += (data_comp | sidedata_comp,)
684 return self.index_format.pack(*data)
687 return self.index_format.pack(*data)
685
688
686
689
687 def parse_index_devel_nodemap(data, inline):
690 def parse_index_devel_nodemap(data, inline):
688 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
691 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
689 return PersistentNodeMapIndexObject(data), None
692 return PersistentNodeMapIndexObject(data), None
690
693
691
694
692 def parse_dirstate(dmap, copymap, st):
695 def parse_dirstate(dmap, copymap, st):
693 parents = [st[:20], st[20:40]]
696 parents = [st[:20], st[20:40]]
694 # dereference fields so they will be local in loop
697 # dereference fields so they will be local in loop
695 format = b">cllll"
698 format = b">cllll"
696 e_size = struct.calcsize(format)
699 e_size = struct.calcsize(format)
697 pos1 = 40
700 pos1 = 40
698 l = len(st)
701 l = len(st)
699
702
700 # the inner loop
703 # the inner loop
701 while pos1 < l:
704 while pos1 < l:
702 pos2 = pos1 + e_size
705 pos2 = pos1 + e_size
703 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
706 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
704 pos1 = pos2 + e[4]
707 pos1 = pos2 + e[4]
705 f = st[pos2:pos1]
708 f = st[pos2:pos1]
706 if b'\0' in f:
709 if b'\0' in f:
707 f, c = f.split(b'\0')
710 f, c = f.split(b'\0')
708 copymap[f] = c
711 copymap[f] = c
709 dmap[f] = DirstateItem.from_v1_data(*e[:4])
712 dmap[f] = DirstateItem.from_v1_data(*e[:4])
710 return parents
713 return parents
711
714
712
715
713 def pack_dirstate(dmap, copymap, pl, now):
716 def pack_dirstate(dmap, copymap, pl, now):
714 now = int(now)
717 now = int(now)
715 cs = stringio()
718 cs = stringio()
716 write = cs.write
719 write = cs.write
717 write(b"".join(pl))
720 write(b"".join(pl))
718 for f, e in pycompat.iteritems(dmap):
721 for f, e in pycompat.iteritems(dmap):
719 if e.need_delay(now):
722 if e.need_delay(now):
720 # The file was last modified "simultaneously" with the current
723 # The file was last modified "simultaneously" with the current
721 # write to dirstate (i.e. within the same second for file-
724 # write to dirstate (i.e. within the same second for file-
722 # systems with a granularity of 1 sec). This commonly happens
725 # systems with a granularity of 1 sec). This commonly happens
723 # for at least a couple of files on 'update'.
726 # for at least a couple of files on 'update'.
724 # The user could change the file without changing its size
727 # The user could change the file without changing its size
725 # within the same second. Invalidate the file's mtime in
728 # within the same second. Invalidate the file's mtime in
726 # dirstate, forcing future 'status' calls to compare the
729 # dirstate, forcing future 'status' calls to compare the
727 # contents of the file if the size is the same. This prevents
730 # contents of the file if the size is the same. This prevents
728 # mistakenly treating such files as clean.
731 # mistakenly treating such files as clean.
729 e.set_possibly_dirty()
732 e.set_possibly_dirty()
730
733
731 if f in copymap:
734 if f in copymap:
732 f = b"%s\0%s" % (f, copymap[f])
735 f = b"%s\0%s" % (f, copymap[f])
733 e = _pack(
736 e = _pack(
734 b">cllll",
737 b">cllll",
735 e.v1_state(),
738 e.v1_state(),
736 e.v1_mode(),
739 e.v1_mode(),
737 e.v1_size(),
740 e.v1_size(),
738 e.v1_mtime(),
741 e.v1_mtime(),
739 len(f),
742 len(f),
740 )
743 )
741 write(e)
744 write(e)
742 write(f)
745 write(f)
743 return cs.getvalue()
746 return cs.getvalue()
General Comments 0
You need to be logged in to leave comments. Login now