##// END OF EJS Templates
dirstate-item: replace call to new_added...
marmoute -
r48968:d342815f default
parent child Browse files
Show More
@@ -1,767 +1,767 b''
1 # parsers.py - Python implementation of parsers.c
1 # parsers.py - Python implementation of parsers.c
2 #
2 #
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11 import zlib
11 import zlib
12
12
13 from ..node import (
13 from ..node import (
14 nullrev,
14 nullrev,
15 sha1nodeconstants,
15 sha1nodeconstants,
16 )
16 )
17 from ..thirdparty import attr
17 from ..thirdparty import attr
18 from .. import (
18 from .. import (
19 error,
19 error,
20 pycompat,
20 pycompat,
21 revlogutils,
21 revlogutils,
22 util,
22 util,
23 )
23 )
24
24
25 from ..revlogutils import nodemap as nodemaputil
25 from ..revlogutils import nodemap as nodemaputil
26 from ..revlogutils import constants as revlog_constants
26 from ..revlogutils import constants as revlog_constants
27
27
28 stringio = pycompat.bytesio
28 stringio = pycompat.bytesio
29
29
30
30
31 _pack = struct.pack
31 _pack = struct.pack
32 _unpack = struct.unpack
32 _unpack = struct.unpack
33 _compress = zlib.compress
33 _compress = zlib.compress
34 _decompress = zlib.decompress
34 _decompress = zlib.decompress
35
35
36
36
37 # a special value used internally for `size` if the file come from the other parent
37 # a special value used internally for `size` if the file come from the other parent
38 FROM_P2 = -2
38 FROM_P2 = -2
39
39
40 # a special value used internally for `size` if the file is modified/merged/added
40 # a special value used internally for `size` if the file is modified/merged/added
41 NONNORMAL = -1
41 NONNORMAL = -1
42
42
43 # a special value used internally for `time` if the time is ambigeous
43 # a special value used internally for `time` if the time is ambigeous
44 AMBIGUOUS_TIME = -1
44 AMBIGUOUS_TIME = -1
45
45
46
46
47 @attr.s(slots=True, init=False)
47 @attr.s(slots=True, init=False)
48 class DirstateItem(object):
48 class DirstateItem(object):
49 """represent a dirstate entry
49 """represent a dirstate entry
50
50
51 It hold multiple attributes
51 It hold multiple attributes
52
52
53 # about file tracking
53 # about file tracking
54 - wc_tracked: is the file tracked by the working copy
54 - wc_tracked: is the file tracked by the working copy
55 - p1_tracked: is the file tracked in working copy first parent
55 - p1_tracked: is the file tracked in working copy first parent
56 - p2_info: the file has been involved in some merge operation. Either
56 - p2_info: the file has been involved in some merge operation. Either
57 because it was actually merged, or because the p2 version was
57 because it was actually merged, or because the p2 version was
58 ahead, or because some renamed moved it there. In either case
58 ahead, or because some renamed moved it there. In either case
59 `hg status` will want it displayed as modified.
59 `hg status` will want it displayed as modified.
60
60
61 # about the file state expected from p1 manifest:
61 # about the file state expected from p1 manifest:
62 - mode: the file mode in p1
62 - mode: the file mode in p1
63 - size: the file size in p1
63 - size: the file size in p1
64
64
65 These value can be set to None, which mean we don't have a meaningful value
65 These value can be set to None, which mean we don't have a meaningful value
66 to compare with. Either because we don't really care about them as there
66 to compare with. Either because we don't really care about them as there
67 `status` is known without having to look at the disk or because we don't
67 `status` is known without having to look at the disk or because we don't
68 know these right now and a full comparison will be needed to find out if
68 know these right now and a full comparison will be needed to find out if
69 the file is clean.
69 the file is clean.
70
70
71 # about the file state on disk last time we saw it:
71 # about the file state on disk last time we saw it:
72 - mtime: the last known clean mtime for the file.
72 - mtime: the last known clean mtime for the file.
73
73
74 This value can be set to None if no cachable state exist. Either because we
74 This value can be set to None if no cachable state exist. Either because we
75 do not care (see previous section) or because we could not cache something
75 do not care (see previous section) or because we could not cache something
76 yet.
76 yet.
77 """
77 """
78
78
79 _wc_tracked = attr.ib()
79 _wc_tracked = attr.ib()
80 _p1_tracked = attr.ib()
80 _p1_tracked = attr.ib()
81 _p2_info = attr.ib()
81 _p2_info = attr.ib()
82 _mode = attr.ib()
82 _mode = attr.ib()
83 _size = attr.ib()
83 _size = attr.ib()
84 _mtime = attr.ib()
84 _mtime = attr.ib()
85
85
86 def __init__(
86 def __init__(
87 self,
87 self,
88 wc_tracked=False,
88 wc_tracked=False,
89 p1_tracked=False,
89 p1_tracked=False,
90 p2_info=False,
90 p2_info=False,
91 has_meaningful_data=True,
91 has_meaningful_data=True,
92 has_meaningful_mtime=True,
92 has_meaningful_mtime=True,
93 parentfiledata=None,
93 parentfiledata=None,
94 ):
94 ):
95 self._wc_tracked = wc_tracked
95 self._wc_tracked = wc_tracked
96 self._p1_tracked = p1_tracked
96 self._p1_tracked = p1_tracked
97 self._p2_info = p2_info
97 self._p2_info = p2_info
98
98
99 self._mode = None
99 self._mode = None
100 self._size = None
100 self._size = None
101 self._mtime = None
101 self._mtime = None
102 if parentfiledata is None:
102 if parentfiledata is None:
103 has_meaningful_mtime = False
103 has_meaningful_mtime = False
104 has_meaningful_data = False
104 has_meaningful_data = False
105 if has_meaningful_data:
105 if has_meaningful_data:
106 self._mode = parentfiledata[0]
106 self._mode = parentfiledata[0]
107 self._size = parentfiledata[1]
107 self._size = parentfiledata[1]
108 if has_meaningful_mtime:
108 if has_meaningful_mtime:
109 self._mtime = parentfiledata[2]
109 self._mtime = parentfiledata[2]
110
110
111 @classmethod
111 @classmethod
112 def new_added(cls):
112 def new_added(cls):
113 """constructor to help legacy API to build a new "added" item
113 """constructor to help legacy API to build a new "added" item
114
114
115 Should eventually be removed
115 Should eventually be removed
116 """
116 """
117 return cls(wc_tracked=True)
117 return cls(wc_tracked=True)
118
118
119 @classmethod
119 @classmethod
120 def new_from_p2(cls):
120 def new_from_p2(cls):
121 """constructor to help legacy API to build a new "from_p2" item
121 """constructor to help legacy API to build a new "from_p2" item
122
122
123 Should eventually be removed
123 Should eventually be removed
124 """
124 """
125 return cls(wc_tracked=True, p2_info=True)
125 return cls(wc_tracked=True, p2_info=True)
126
126
127 @classmethod
127 @classmethod
128 def new_possibly_dirty(cls):
128 def new_possibly_dirty(cls):
129 """constructor to help legacy API to build a new "possibly_dirty" item
129 """constructor to help legacy API to build a new "possibly_dirty" item
130
130
131 Should eventually be removed
131 Should eventually be removed
132 """
132 """
133 return cls(wc_tracked=True, p1_tracked=True)
133 return cls(wc_tracked=True, p1_tracked=True)
134
134
135 @classmethod
135 @classmethod
136 def new_normal(cls, mode, size, mtime):
136 def new_normal(cls, mode, size, mtime):
137 """constructor to help legacy API to build a new "normal" item
137 """constructor to help legacy API to build a new "normal" item
138
138
139 Should eventually be removed
139 Should eventually be removed
140 """
140 """
141 assert size != FROM_P2
141 assert size != FROM_P2
142 assert size != NONNORMAL
142 assert size != NONNORMAL
143 return cls(
143 return cls(
144 wc_tracked=True,
144 wc_tracked=True,
145 p1_tracked=True,
145 p1_tracked=True,
146 parentfiledata=(mode, size, mtime),
146 parentfiledata=(mode, size, mtime),
147 )
147 )
148
148
149 @classmethod
149 @classmethod
150 def from_v1_data(cls, state, mode, size, mtime):
150 def from_v1_data(cls, state, mode, size, mtime):
151 """Build a new DirstateItem object from V1 data
151 """Build a new DirstateItem object from V1 data
152
152
153 Since the dirstate-v1 format is frozen, the signature of this function
153 Since the dirstate-v1 format is frozen, the signature of this function
154 is not expected to change, unlike the __init__ one.
154 is not expected to change, unlike the __init__ one.
155 """
155 """
156 if state == b'm':
156 if state == b'm':
157 return cls(wc_tracked=True, p1_tracked=True, p2_info=True)
157 return cls(wc_tracked=True, p1_tracked=True, p2_info=True)
158 elif state == b'a':
158 elif state == b'a':
159 return cls.new_added()
159 return cls(wc_tracked=True)
160 elif state == b'r':
160 elif state == b'r':
161 if size == NONNORMAL:
161 if size == NONNORMAL:
162 p1_tracked = True
162 p1_tracked = True
163 p2_info = True
163 p2_info = True
164 elif size == FROM_P2:
164 elif size == FROM_P2:
165 p1_tracked = False
165 p1_tracked = False
166 p2_info = True
166 p2_info = True
167 else:
167 else:
168 p1_tracked = True
168 p1_tracked = True
169 p2_info = False
169 p2_info = False
170 return cls(p1_tracked=p1_tracked, p2_info=p2_info)
170 return cls(p1_tracked=p1_tracked, p2_info=p2_info)
171 elif state == b'n':
171 elif state == b'n':
172 if size == FROM_P2:
172 if size == FROM_P2:
173 return cls.new_from_p2()
173 return cls.new_from_p2()
174 elif size == NONNORMAL:
174 elif size == NONNORMAL:
175 return cls.new_possibly_dirty()
175 return cls.new_possibly_dirty()
176 elif mtime == AMBIGUOUS_TIME:
176 elif mtime == AMBIGUOUS_TIME:
177 instance = cls.new_normal(mode, size, 42)
177 instance = cls.new_normal(mode, size, 42)
178 instance._mtime = None
178 instance._mtime = None
179 return instance
179 return instance
180 else:
180 else:
181 return cls.new_normal(mode, size, mtime)
181 return cls.new_normal(mode, size, mtime)
182 else:
182 else:
183 raise RuntimeError(b'unknown state: %s' % state)
183 raise RuntimeError(b'unknown state: %s' % state)
184
184
185 def set_possibly_dirty(self):
185 def set_possibly_dirty(self):
186 """Mark a file as "possibly dirty"
186 """Mark a file as "possibly dirty"
187
187
188 This means the next status call will have to actually check its content
188 This means the next status call will have to actually check its content
189 to make sure it is correct.
189 to make sure it is correct.
190 """
190 """
191 self._mtime = None
191 self._mtime = None
192
192
193 def set_clean(self, mode, size, mtime):
193 def set_clean(self, mode, size, mtime):
194 """mark a file as "clean" cancelling potential "possibly dirty call"
194 """mark a file as "clean" cancelling potential "possibly dirty call"
195
195
196 Note: this function is a descendant of `dirstate.normal` and is
196 Note: this function is a descendant of `dirstate.normal` and is
197 currently expected to be call on "normal" entry only. There are not
197 currently expected to be call on "normal" entry only. There are not
198 reason for this to not change in the future as long as the ccode is
198 reason for this to not change in the future as long as the ccode is
199 updated to preserve the proper state of the non-normal files.
199 updated to preserve the proper state of the non-normal files.
200 """
200 """
201 self._wc_tracked = True
201 self._wc_tracked = True
202 self._p1_tracked = True
202 self._p1_tracked = True
203 self._mode = mode
203 self._mode = mode
204 self._size = size
204 self._size = size
205 self._mtime = mtime
205 self._mtime = mtime
206
206
207 def set_tracked(self):
207 def set_tracked(self):
208 """mark a file as tracked in the working copy
208 """mark a file as tracked in the working copy
209
209
210 This will ultimately be called by command like `hg add`.
210 This will ultimately be called by command like `hg add`.
211 """
211 """
212 self._wc_tracked = True
212 self._wc_tracked = True
213 # `set_tracked` is replacing various `normallookup` call. So we mark
213 # `set_tracked` is replacing various `normallookup` call. So we mark
214 # the files as needing lookup
214 # the files as needing lookup
215 #
215 #
216 # Consider dropping this in the future in favor of something less broad.
216 # Consider dropping this in the future in favor of something less broad.
217 self._mtime = None
217 self._mtime = None
218
218
219 def set_untracked(self):
219 def set_untracked(self):
220 """mark a file as untracked in the working copy
220 """mark a file as untracked in the working copy
221
221
222 This will ultimately be called by command like `hg remove`.
222 This will ultimately be called by command like `hg remove`.
223 """
223 """
224 self._wc_tracked = False
224 self._wc_tracked = False
225 self._mode = None
225 self._mode = None
226 self._size = None
226 self._size = None
227 self._mtime = None
227 self._mtime = None
228
228
229 def drop_merge_data(self):
229 def drop_merge_data(self):
230 """remove all "merge-only" from a DirstateItem
230 """remove all "merge-only" from a DirstateItem
231
231
232 This is to be call by the dirstatemap code when the second parent is dropped
232 This is to be call by the dirstatemap code when the second parent is dropped
233 """
233 """
234 if self._p2_info:
234 if self._p2_info:
235 self._p2_info = False
235 self._p2_info = False
236 self._mode = None
236 self._mode = None
237 self._size = None
237 self._size = None
238 self._mtime = None
238 self._mtime = None
239
239
240 @property
240 @property
241 def mode(self):
241 def mode(self):
242 return self.v1_mode()
242 return self.v1_mode()
243
243
244 @property
244 @property
245 def size(self):
245 def size(self):
246 return self.v1_size()
246 return self.v1_size()
247
247
248 @property
248 @property
249 def mtime(self):
249 def mtime(self):
250 return self.v1_mtime()
250 return self.v1_mtime()
251
251
252 @property
252 @property
253 def state(self):
253 def state(self):
254 """
254 """
255 States are:
255 States are:
256 n normal
256 n normal
257 m needs merging
257 m needs merging
258 r marked for removal
258 r marked for removal
259 a marked for addition
259 a marked for addition
260
260
261 XXX This "state" is a bit obscure and mostly a direct expression of the
261 XXX This "state" is a bit obscure and mostly a direct expression of the
262 dirstatev1 format. It would make sense to ultimately deprecate it in
262 dirstatev1 format. It would make sense to ultimately deprecate it in
263 favor of the more "semantic" attributes.
263 favor of the more "semantic" attributes.
264 """
264 """
265 if not self.any_tracked:
265 if not self.any_tracked:
266 return b'?'
266 return b'?'
267 return self.v1_state()
267 return self.v1_state()
268
268
269 @property
269 @property
270 def tracked(self):
270 def tracked(self):
271 """True is the file is tracked in the working copy"""
271 """True is the file is tracked in the working copy"""
272 return self._wc_tracked
272 return self._wc_tracked
273
273
274 @property
274 @property
275 def any_tracked(self):
275 def any_tracked(self):
276 """True is the file is tracked anywhere (wc or parents)"""
276 """True is the file is tracked anywhere (wc or parents)"""
277 return self._wc_tracked or self._p1_tracked or self._p2_info
277 return self._wc_tracked or self._p1_tracked or self._p2_info
278
278
279 @property
279 @property
280 def added(self):
280 def added(self):
281 """True if the file has been added"""
281 """True if the file has been added"""
282 return self._wc_tracked and not (self._p1_tracked or self._p2_info)
282 return self._wc_tracked and not (self._p1_tracked or self._p2_info)
283
283
284 @property
284 @property
285 def maybe_clean(self):
285 def maybe_clean(self):
286 """True if the file has a chance to be in the "clean" state"""
286 """True if the file has a chance to be in the "clean" state"""
287 if not self._wc_tracked:
287 if not self._wc_tracked:
288 return False
288 return False
289 elif not self._p1_tracked:
289 elif not self._p1_tracked:
290 return False
290 return False
291 elif self._p2_info:
291 elif self._p2_info:
292 return False
292 return False
293 return True
293 return True
294
294
295 @property
295 @property
296 def p1_tracked(self):
296 def p1_tracked(self):
297 """True if the file is tracked in the first parent manifest"""
297 """True if the file is tracked in the first parent manifest"""
298 return self._p1_tracked
298 return self._p1_tracked
299
299
300 @property
300 @property
301 def p2_info(self):
301 def p2_info(self):
302 """True if the file needed to merge or apply any input from p2
302 """True if the file needed to merge or apply any input from p2
303
303
304 See the class documentation for details.
304 See the class documentation for details.
305 """
305 """
306 return self._wc_tracked and self._p2_info
306 return self._wc_tracked and self._p2_info
307
307
308 @property
308 @property
309 def removed(self):
309 def removed(self):
310 """True if the file has been removed"""
310 """True if the file has been removed"""
311 return not self._wc_tracked and (self._p1_tracked or self._p2_info)
311 return not self._wc_tracked and (self._p1_tracked or self._p2_info)
312
312
313 def v1_state(self):
313 def v1_state(self):
314 """return a "state" suitable for v1 serialization"""
314 """return a "state" suitable for v1 serialization"""
315 if not self.any_tracked:
315 if not self.any_tracked:
316 # the object has no state to record, this is -currently-
316 # the object has no state to record, this is -currently-
317 # unsupported
317 # unsupported
318 raise RuntimeError('untracked item')
318 raise RuntimeError('untracked item')
319 elif self.removed:
319 elif self.removed:
320 return b'r'
320 return b'r'
321 elif self._p1_tracked and self._p2_info:
321 elif self._p1_tracked and self._p2_info:
322 return b'm'
322 return b'm'
323 elif self.added:
323 elif self.added:
324 return b'a'
324 return b'a'
325 else:
325 else:
326 return b'n'
326 return b'n'
327
327
328 def v1_mode(self):
328 def v1_mode(self):
329 """return a "mode" suitable for v1 serialization"""
329 """return a "mode" suitable for v1 serialization"""
330 return self._mode if self._mode is not None else 0
330 return self._mode if self._mode is not None else 0
331
331
332 def v1_size(self):
332 def v1_size(self):
333 """return a "size" suitable for v1 serialization"""
333 """return a "size" suitable for v1 serialization"""
334 if not self.any_tracked:
334 if not self.any_tracked:
335 # the object has no state to record, this is -currently-
335 # the object has no state to record, this is -currently-
336 # unsupported
336 # unsupported
337 raise RuntimeError('untracked item')
337 raise RuntimeError('untracked item')
338 elif self.removed and self._p1_tracked and self._p2_info:
338 elif self.removed and self._p1_tracked and self._p2_info:
339 return NONNORMAL
339 return NONNORMAL
340 elif self._p2_info:
340 elif self._p2_info:
341 return FROM_P2
341 return FROM_P2
342 elif self.removed:
342 elif self.removed:
343 return 0
343 return 0
344 elif self.added:
344 elif self.added:
345 return NONNORMAL
345 return NONNORMAL
346 elif self._size is None:
346 elif self._size is None:
347 return NONNORMAL
347 return NONNORMAL
348 else:
348 else:
349 return self._size
349 return self._size
350
350
351 def v1_mtime(self):
351 def v1_mtime(self):
352 """return a "mtime" suitable for v1 serialization"""
352 """return a "mtime" suitable for v1 serialization"""
353 if not self.any_tracked:
353 if not self.any_tracked:
354 # the object has no state to record, this is -currently-
354 # the object has no state to record, this is -currently-
355 # unsupported
355 # unsupported
356 raise RuntimeError('untracked item')
356 raise RuntimeError('untracked item')
357 elif self.removed:
357 elif self.removed:
358 return 0
358 return 0
359 elif self._mtime is None:
359 elif self._mtime is None:
360 return AMBIGUOUS_TIME
360 return AMBIGUOUS_TIME
361 elif self._p2_info:
361 elif self._p2_info:
362 return AMBIGUOUS_TIME
362 return AMBIGUOUS_TIME
363 elif not self._p1_tracked:
363 elif not self._p1_tracked:
364 return AMBIGUOUS_TIME
364 return AMBIGUOUS_TIME
365 else:
365 else:
366 return self._mtime
366 return self._mtime
367
367
368 def need_delay(self, now):
368 def need_delay(self, now):
369 """True if the stored mtime would be ambiguous with the current time"""
369 """True if the stored mtime would be ambiguous with the current time"""
370 return self.v1_state() == b'n' and self.v1_mtime() == now
370 return self.v1_state() == b'n' and self.v1_mtime() == now
371
371
372
372
373 def gettype(q):
373 def gettype(q):
374 return int(q & 0xFFFF)
374 return int(q & 0xFFFF)
375
375
376
376
377 class BaseIndexObject(object):
377 class BaseIndexObject(object):
378 # Can I be passed to an algorithme implemented in Rust ?
378 # Can I be passed to an algorithme implemented in Rust ?
379 rust_ext_compat = 0
379 rust_ext_compat = 0
380 # Format of an index entry according to Python's `struct` language
380 # Format of an index entry according to Python's `struct` language
381 index_format = revlog_constants.INDEX_ENTRY_V1
381 index_format = revlog_constants.INDEX_ENTRY_V1
382 # Size of a C unsigned long long int, platform independent
382 # Size of a C unsigned long long int, platform independent
383 big_int_size = struct.calcsize(b'>Q')
383 big_int_size = struct.calcsize(b'>Q')
384 # Size of a C long int, platform independent
384 # Size of a C long int, platform independent
385 int_size = struct.calcsize(b'>i')
385 int_size = struct.calcsize(b'>i')
386 # An empty index entry, used as a default value to be overridden, or nullrev
386 # An empty index entry, used as a default value to be overridden, or nullrev
387 null_item = (
387 null_item = (
388 0,
388 0,
389 0,
389 0,
390 0,
390 0,
391 -1,
391 -1,
392 -1,
392 -1,
393 -1,
393 -1,
394 -1,
394 -1,
395 sha1nodeconstants.nullid,
395 sha1nodeconstants.nullid,
396 0,
396 0,
397 0,
397 0,
398 revlog_constants.COMP_MODE_INLINE,
398 revlog_constants.COMP_MODE_INLINE,
399 revlog_constants.COMP_MODE_INLINE,
399 revlog_constants.COMP_MODE_INLINE,
400 )
400 )
401
401
402 @util.propertycache
402 @util.propertycache
403 def entry_size(self):
403 def entry_size(self):
404 return self.index_format.size
404 return self.index_format.size
405
405
406 @property
406 @property
407 def nodemap(self):
407 def nodemap(self):
408 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
408 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
409 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
409 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
410 return self._nodemap
410 return self._nodemap
411
411
412 @util.propertycache
412 @util.propertycache
413 def _nodemap(self):
413 def _nodemap(self):
414 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
414 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
415 for r in range(0, len(self)):
415 for r in range(0, len(self)):
416 n = self[r][7]
416 n = self[r][7]
417 nodemap[n] = r
417 nodemap[n] = r
418 return nodemap
418 return nodemap
419
419
420 def has_node(self, node):
420 def has_node(self, node):
421 """return True if the node exist in the index"""
421 """return True if the node exist in the index"""
422 return node in self._nodemap
422 return node in self._nodemap
423
423
424 def rev(self, node):
424 def rev(self, node):
425 """return a revision for a node
425 """return a revision for a node
426
426
427 If the node is unknown, raise a RevlogError"""
427 If the node is unknown, raise a RevlogError"""
428 return self._nodemap[node]
428 return self._nodemap[node]
429
429
430 def get_rev(self, node):
430 def get_rev(self, node):
431 """return a revision for a node
431 """return a revision for a node
432
432
433 If the node is unknown, return None"""
433 If the node is unknown, return None"""
434 return self._nodemap.get(node)
434 return self._nodemap.get(node)
435
435
436 def _stripnodes(self, start):
436 def _stripnodes(self, start):
437 if '_nodemap' in vars(self):
437 if '_nodemap' in vars(self):
438 for r in range(start, len(self)):
438 for r in range(start, len(self)):
439 n = self[r][7]
439 n = self[r][7]
440 del self._nodemap[n]
440 del self._nodemap[n]
441
441
442 def clearcaches(self):
442 def clearcaches(self):
443 self.__dict__.pop('_nodemap', None)
443 self.__dict__.pop('_nodemap', None)
444
444
445 def __len__(self):
445 def __len__(self):
446 return self._lgt + len(self._extra)
446 return self._lgt + len(self._extra)
447
447
448 def append(self, tup):
448 def append(self, tup):
449 if '_nodemap' in vars(self):
449 if '_nodemap' in vars(self):
450 self._nodemap[tup[7]] = len(self)
450 self._nodemap[tup[7]] = len(self)
451 data = self._pack_entry(len(self), tup)
451 data = self._pack_entry(len(self), tup)
452 self._extra.append(data)
452 self._extra.append(data)
453
453
454 def _pack_entry(self, rev, entry):
454 def _pack_entry(self, rev, entry):
455 assert entry[8] == 0
455 assert entry[8] == 0
456 assert entry[9] == 0
456 assert entry[9] == 0
457 return self.index_format.pack(*entry[:8])
457 return self.index_format.pack(*entry[:8])
458
458
459 def _check_index(self, i):
459 def _check_index(self, i):
460 if not isinstance(i, int):
460 if not isinstance(i, int):
461 raise TypeError(b"expecting int indexes")
461 raise TypeError(b"expecting int indexes")
462 if i < 0 or i >= len(self):
462 if i < 0 or i >= len(self):
463 raise IndexError
463 raise IndexError
464
464
465 def __getitem__(self, i):
465 def __getitem__(self, i):
466 if i == -1:
466 if i == -1:
467 return self.null_item
467 return self.null_item
468 self._check_index(i)
468 self._check_index(i)
469 if i >= self._lgt:
469 if i >= self._lgt:
470 data = self._extra[i - self._lgt]
470 data = self._extra[i - self._lgt]
471 else:
471 else:
472 index = self._calculate_index(i)
472 index = self._calculate_index(i)
473 data = self._data[index : index + self.entry_size]
473 data = self._data[index : index + self.entry_size]
474 r = self._unpack_entry(i, data)
474 r = self._unpack_entry(i, data)
475 if self._lgt and i == 0:
475 if self._lgt and i == 0:
476 offset = revlogutils.offset_type(0, gettype(r[0]))
476 offset = revlogutils.offset_type(0, gettype(r[0]))
477 r = (offset,) + r[1:]
477 r = (offset,) + r[1:]
478 return r
478 return r
479
479
480 def _unpack_entry(self, rev, data):
480 def _unpack_entry(self, rev, data):
481 r = self.index_format.unpack(data)
481 r = self.index_format.unpack(data)
482 r = r + (
482 r = r + (
483 0,
483 0,
484 0,
484 0,
485 revlog_constants.COMP_MODE_INLINE,
485 revlog_constants.COMP_MODE_INLINE,
486 revlog_constants.COMP_MODE_INLINE,
486 revlog_constants.COMP_MODE_INLINE,
487 )
487 )
488 return r
488 return r
489
489
490 def pack_header(self, header):
490 def pack_header(self, header):
491 """pack header information as binary"""
491 """pack header information as binary"""
492 v_fmt = revlog_constants.INDEX_HEADER
492 v_fmt = revlog_constants.INDEX_HEADER
493 return v_fmt.pack(header)
493 return v_fmt.pack(header)
494
494
495 def entry_binary(self, rev):
495 def entry_binary(self, rev):
496 """return the raw binary string representing a revision"""
496 """return the raw binary string representing a revision"""
497 entry = self[rev]
497 entry = self[rev]
498 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
498 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
499 if rev == 0:
499 if rev == 0:
500 p = p[revlog_constants.INDEX_HEADER.size :]
500 p = p[revlog_constants.INDEX_HEADER.size :]
501 return p
501 return p
502
502
503
503
504 class IndexObject(BaseIndexObject):
504 class IndexObject(BaseIndexObject):
505 def __init__(self, data):
505 def __init__(self, data):
506 assert len(data) % self.entry_size == 0, (
506 assert len(data) % self.entry_size == 0, (
507 len(data),
507 len(data),
508 self.entry_size,
508 self.entry_size,
509 len(data) % self.entry_size,
509 len(data) % self.entry_size,
510 )
510 )
511 self._data = data
511 self._data = data
512 self._lgt = len(data) // self.entry_size
512 self._lgt = len(data) // self.entry_size
513 self._extra = []
513 self._extra = []
514
514
515 def _calculate_index(self, i):
515 def _calculate_index(self, i):
516 return i * self.entry_size
516 return i * self.entry_size
517
517
518 def __delitem__(self, i):
518 def __delitem__(self, i):
519 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
519 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
520 raise ValueError(b"deleting slices only supports a:-1 with step 1")
520 raise ValueError(b"deleting slices only supports a:-1 with step 1")
521 i = i.start
521 i = i.start
522 self._check_index(i)
522 self._check_index(i)
523 self._stripnodes(i)
523 self._stripnodes(i)
524 if i < self._lgt:
524 if i < self._lgt:
525 self._data = self._data[: i * self.entry_size]
525 self._data = self._data[: i * self.entry_size]
526 self._lgt = i
526 self._lgt = i
527 self._extra = []
527 self._extra = []
528 else:
528 else:
529 self._extra = self._extra[: i - self._lgt]
529 self._extra = self._extra[: i - self._lgt]
530
530
531
531
532 class PersistentNodeMapIndexObject(IndexObject):
532 class PersistentNodeMapIndexObject(IndexObject):
533 """a Debug oriented class to test persistent nodemap
533 """a Debug oriented class to test persistent nodemap
534
534
535 We need a simple python object to test API and higher level behavior. See
535 We need a simple python object to test API and higher level behavior. See
536 the Rust implementation for more serious usage. This should be used only
536 the Rust implementation for more serious usage. This should be used only
537 through the dedicated `devel.persistent-nodemap` config.
537 through the dedicated `devel.persistent-nodemap` config.
538 """
538 """
539
539
540 def nodemap_data_all(self):
540 def nodemap_data_all(self):
541 """Return bytes containing a full serialization of a nodemap
541 """Return bytes containing a full serialization of a nodemap
542
542
543 The nodemap should be valid for the full set of revisions in the
543 The nodemap should be valid for the full set of revisions in the
544 index."""
544 index."""
545 return nodemaputil.persistent_data(self)
545 return nodemaputil.persistent_data(self)
546
546
547 def nodemap_data_incremental(self):
547 def nodemap_data_incremental(self):
548 """Return bytes containing a incremental update to persistent nodemap
548 """Return bytes containing a incremental update to persistent nodemap
549
549
550 This containst the data for an append-only update of the data provided
550 This containst the data for an append-only update of the data provided
551 in the last call to `update_nodemap_data`.
551 in the last call to `update_nodemap_data`.
552 """
552 """
553 if self._nm_root is None:
553 if self._nm_root is None:
554 return None
554 return None
555 docket = self._nm_docket
555 docket = self._nm_docket
556 changed, data = nodemaputil.update_persistent_data(
556 changed, data = nodemaputil.update_persistent_data(
557 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
557 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
558 )
558 )
559
559
560 self._nm_root = self._nm_max_idx = self._nm_docket = None
560 self._nm_root = self._nm_max_idx = self._nm_docket = None
561 return docket, changed, data
561 return docket, changed, data
562
562
563 def update_nodemap_data(self, docket, nm_data):
563 def update_nodemap_data(self, docket, nm_data):
564 """provide full block of persisted binary data for a nodemap
564 """provide full block of persisted binary data for a nodemap
565
565
566 The data are expected to come from disk. See `nodemap_data_all` for a
566 The data are expected to come from disk. See `nodemap_data_all` for a
567 produceur of such data."""
567 produceur of such data."""
568 if nm_data is not None:
568 if nm_data is not None:
569 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
569 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
570 if self._nm_root:
570 if self._nm_root:
571 self._nm_docket = docket
571 self._nm_docket = docket
572 else:
572 else:
573 self._nm_root = self._nm_max_idx = self._nm_docket = None
573 self._nm_root = self._nm_max_idx = self._nm_docket = None
574
574
575
575
576 class InlinedIndexObject(BaseIndexObject):
576 class InlinedIndexObject(BaseIndexObject):
577 def __init__(self, data, inline=0):
577 def __init__(self, data, inline=0):
578 self._data = data
578 self._data = data
579 self._lgt = self._inline_scan(None)
579 self._lgt = self._inline_scan(None)
580 self._inline_scan(self._lgt)
580 self._inline_scan(self._lgt)
581 self._extra = []
581 self._extra = []
582
582
583 def _inline_scan(self, lgt):
583 def _inline_scan(self, lgt):
584 off = 0
584 off = 0
585 if lgt is not None:
585 if lgt is not None:
586 self._offsets = [0] * lgt
586 self._offsets = [0] * lgt
587 count = 0
587 count = 0
588 while off <= len(self._data) - self.entry_size:
588 while off <= len(self._data) - self.entry_size:
589 start = off + self.big_int_size
589 start = off + self.big_int_size
590 (s,) = struct.unpack(
590 (s,) = struct.unpack(
591 b'>i',
591 b'>i',
592 self._data[start : start + self.int_size],
592 self._data[start : start + self.int_size],
593 )
593 )
594 if lgt is not None:
594 if lgt is not None:
595 self._offsets[count] = off
595 self._offsets[count] = off
596 count += 1
596 count += 1
597 off += self.entry_size + s
597 off += self.entry_size + s
598 if off != len(self._data):
598 if off != len(self._data):
599 raise ValueError(b"corrupted data")
599 raise ValueError(b"corrupted data")
600 return count
600 return count
601
601
602 def __delitem__(self, i):
602 def __delitem__(self, i):
603 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
603 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
604 raise ValueError(b"deleting slices only supports a:-1 with step 1")
604 raise ValueError(b"deleting slices only supports a:-1 with step 1")
605 i = i.start
605 i = i.start
606 self._check_index(i)
606 self._check_index(i)
607 self._stripnodes(i)
607 self._stripnodes(i)
608 if i < self._lgt:
608 if i < self._lgt:
609 self._offsets = self._offsets[:i]
609 self._offsets = self._offsets[:i]
610 self._lgt = i
610 self._lgt = i
611 self._extra = []
611 self._extra = []
612 else:
612 else:
613 self._extra = self._extra[: i - self._lgt]
613 self._extra = self._extra[: i - self._lgt]
614
614
615 def _calculate_index(self, i):
615 def _calculate_index(self, i):
616 return self._offsets[i]
616 return self._offsets[i]
617
617
618
618
619 def parse_index2(data, inline, revlogv2=False):
619 def parse_index2(data, inline, revlogv2=False):
620 if not inline:
620 if not inline:
621 cls = IndexObject2 if revlogv2 else IndexObject
621 cls = IndexObject2 if revlogv2 else IndexObject
622 return cls(data), None
622 return cls(data), None
623 cls = InlinedIndexObject
623 cls = InlinedIndexObject
624 return cls(data, inline), (0, data)
624 return cls(data, inline), (0, data)
625
625
626
626
627 def parse_index_cl_v2(data):
627 def parse_index_cl_v2(data):
628 return IndexChangelogV2(data), None
628 return IndexChangelogV2(data), None
629
629
630
630
631 class IndexObject2(IndexObject):
631 class IndexObject2(IndexObject):
632 index_format = revlog_constants.INDEX_ENTRY_V2
632 index_format = revlog_constants.INDEX_ENTRY_V2
633
633
634 def replace_sidedata_info(
634 def replace_sidedata_info(
635 self,
635 self,
636 rev,
636 rev,
637 sidedata_offset,
637 sidedata_offset,
638 sidedata_length,
638 sidedata_length,
639 offset_flags,
639 offset_flags,
640 compression_mode,
640 compression_mode,
641 ):
641 ):
642 """
642 """
643 Replace an existing index entry's sidedata offset and length with new
643 Replace an existing index entry's sidedata offset and length with new
644 ones.
644 ones.
645 This cannot be used outside of the context of sidedata rewriting,
645 This cannot be used outside of the context of sidedata rewriting,
646 inside the transaction that creates the revision `rev`.
646 inside the transaction that creates the revision `rev`.
647 """
647 """
648 if rev < 0:
648 if rev < 0:
649 raise KeyError
649 raise KeyError
650 self._check_index(rev)
650 self._check_index(rev)
651 if rev < self._lgt:
651 if rev < self._lgt:
652 msg = b"cannot rewrite entries outside of this transaction"
652 msg = b"cannot rewrite entries outside of this transaction"
653 raise KeyError(msg)
653 raise KeyError(msg)
654 else:
654 else:
655 entry = list(self[rev])
655 entry = list(self[rev])
656 entry[0] = offset_flags
656 entry[0] = offset_flags
657 entry[8] = sidedata_offset
657 entry[8] = sidedata_offset
658 entry[9] = sidedata_length
658 entry[9] = sidedata_length
659 entry[11] = compression_mode
659 entry[11] = compression_mode
660 entry = tuple(entry)
660 entry = tuple(entry)
661 new = self._pack_entry(rev, entry)
661 new = self._pack_entry(rev, entry)
662 self._extra[rev - self._lgt] = new
662 self._extra[rev - self._lgt] = new
663
663
664 def _unpack_entry(self, rev, data):
664 def _unpack_entry(self, rev, data):
665 data = self.index_format.unpack(data)
665 data = self.index_format.unpack(data)
666 entry = data[:10]
666 entry = data[:10]
667 data_comp = data[10] & 3
667 data_comp = data[10] & 3
668 sidedata_comp = (data[10] & (3 << 2)) >> 2
668 sidedata_comp = (data[10] & (3 << 2)) >> 2
669 return entry + (data_comp, sidedata_comp)
669 return entry + (data_comp, sidedata_comp)
670
670
671 def _pack_entry(self, rev, entry):
671 def _pack_entry(self, rev, entry):
672 data = entry[:10]
672 data = entry[:10]
673 data_comp = entry[10] & 3
673 data_comp = entry[10] & 3
674 sidedata_comp = (entry[11] & 3) << 2
674 sidedata_comp = (entry[11] & 3) << 2
675 data += (data_comp | sidedata_comp,)
675 data += (data_comp | sidedata_comp,)
676
676
677 return self.index_format.pack(*data)
677 return self.index_format.pack(*data)
678
678
679 def entry_binary(self, rev):
679 def entry_binary(self, rev):
680 """return the raw binary string representing a revision"""
680 """return the raw binary string representing a revision"""
681 entry = self[rev]
681 entry = self[rev]
682 return self._pack_entry(rev, entry)
682 return self._pack_entry(rev, entry)
683
683
684 def pack_header(self, header):
684 def pack_header(self, header):
685 """pack header information as binary"""
685 """pack header information as binary"""
686 msg = 'version header should go in the docket, not the index: %d'
686 msg = 'version header should go in the docket, not the index: %d'
687 msg %= header
687 msg %= header
688 raise error.ProgrammingError(msg)
688 raise error.ProgrammingError(msg)
689
689
690
690
691 class IndexChangelogV2(IndexObject2):
691 class IndexChangelogV2(IndexObject2):
692 index_format = revlog_constants.INDEX_ENTRY_CL_V2
692 index_format = revlog_constants.INDEX_ENTRY_CL_V2
693
693
694 def _unpack_entry(self, rev, data, r=True):
694 def _unpack_entry(self, rev, data, r=True):
695 items = self.index_format.unpack(data)
695 items = self.index_format.unpack(data)
696 entry = items[:3] + (rev, rev) + items[3:8]
696 entry = items[:3] + (rev, rev) + items[3:8]
697 data_comp = items[8] & 3
697 data_comp = items[8] & 3
698 sidedata_comp = (items[8] >> 2) & 3
698 sidedata_comp = (items[8] >> 2) & 3
699 return entry + (data_comp, sidedata_comp)
699 return entry + (data_comp, sidedata_comp)
700
700
701 def _pack_entry(self, rev, entry):
701 def _pack_entry(self, rev, entry):
702 assert entry[3] == rev, entry[3]
702 assert entry[3] == rev, entry[3]
703 assert entry[4] == rev, entry[4]
703 assert entry[4] == rev, entry[4]
704 data = entry[:3] + entry[5:10]
704 data = entry[:3] + entry[5:10]
705 data_comp = entry[10] & 3
705 data_comp = entry[10] & 3
706 sidedata_comp = (entry[11] & 3) << 2
706 sidedata_comp = (entry[11] & 3) << 2
707 data += (data_comp | sidedata_comp,)
707 data += (data_comp | sidedata_comp,)
708 return self.index_format.pack(*data)
708 return self.index_format.pack(*data)
709
709
710
710
711 def parse_index_devel_nodemap(data, inline):
711 def parse_index_devel_nodemap(data, inline):
712 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
712 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
713 return PersistentNodeMapIndexObject(data), None
713 return PersistentNodeMapIndexObject(data), None
714
714
715
715
716 def parse_dirstate(dmap, copymap, st):
716 def parse_dirstate(dmap, copymap, st):
717 parents = [st[:20], st[20:40]]
717 parents = [st[:20], st[20:40]]
718 # dereference fields so they will be local in loop
718 # dereference fields so they will be local in loop
719 format = b">cllll"
719 format = b">cllll"
720 e_size = struct.calcsize(format)
720 e_size = struct.calcsize(format)
721 pos1 = 40
721 pos1 = 40
722 l = len(st)
722 l = len(st)
723
723
724 # the inner loop
724 # the inner loop
725 while pos1 < l:
725 while pos1 < l:
726 pos2 = pos1 + e_size
726 pos2 = pos1 + e_size
727 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
727 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
728 pos1 = pos2 + e[4]
728 pos1 = pos2 + e[4]
729 f = st[pos2:pos1]
729 f = st[pos2:pos1]
730 if b'\0' in f:
730 if b'\0' in f:
731 f, c = f.split(b'\0')
731 f, c = f.split(b'\0')
732 copymap[f] = c
732 copymap[f] = c
733 dmap[f] = DirstateItem.from_v1_data(*e[:4])
733 dmap[f] = DirstateItem.from_v1_data(*e[:4])
734 return parents
734 return parents
735
735
736
736
737 def pack_dirstate(dmap, copymap, pl, now):
737 def pack_dirstate(dmap, copymap, pl, now):
738 now = int(now)
738 now = int(now)
739 cs = stringio()
739 cs = stringio()
740 write = cs.write
740 write = cs.write
741 write(b"".join(pl))
741 write(b"".join(pl))
742 for f, e in pycompat.iteritems(dmap):
742 for f, e in pycompat.iteritems(dmap):
743 if e.need_delay(now):
743 if e.need_delay(now):
744 # The file was last modified "simultaneously" with the current
744 # The file was last modified "simultaneously" with the current
745 # write to dirstate (i.e. within the same second for file-
745 # write to dirstate (i.e. within the same second for file-
746 # systems with a granularity of 1 sec). This commonly happens
746 # systems with a granularity of 1 sec). This commonly happens
747 # for at least a couple of files on 'update'.
747 # for at least a couple of files on 'update'.
748 # The user could change the file without changing its size
748 # The user could change the file without changing its size
749 # within the same second. Invalidate the file's mtime in
749 # within the same second. Invalidate the file's mtime in
750 # dirstate, forcing future 'status' calls to compare the
750 # dirstate, forcing future 'status' calls to compare the
751 # contents of the file if the size is the same. This prevents
751 # contents of the file if the size is the same. This prevents
752 # mistakenly treating such files as clean.
752 # mistakenly treating such files as clean.
753 e.set_possibly_dirty()
753 e.set_possibly_dirty()
754
754
755 if f in copymap:
755 if f in copymap:
756 f = b"%s\0%s" % (f, copymap[f])
756 f = b"%s\0%s" % (f, copymap[f])
757 e = _pack(
757 e = _pack(
758 b">cllll",
758 b">cllll",
759 e.v1_state(),
759 e.v1_state(),
760 e.v1_mode(),
760 e.v1_mode(),
761 e.v1_size(),
761 e.v1_size(),
762 e.v1_mtime(),
762 e.v1_mtime(),
763 len(f),
763 len(f),
764 )
764 )
765 write(e)
765 write(e)
766 write(f)
766 write(f)
767 return cs.getvalue()
767 return cs.getvalue()
@@ -1,422 +1,426 b''
1 use crate::errors::HgError;
1 use crate::errors::HgError;
2 use bitflags::bitflags;
2 use bitflags::bitflags;
3 use std::convert::TryFrom;
3 use std::convert::TryFrom;
4
4
5 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
5 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
6 pub enum EntryState {
6 pub enum EntryState {
7 Normal,
7 Normal,
8 Added,
8 Added,
9 Removed,
9 Removed,
10 Merged,
10 Merged,
11 }
11 }
12
12
13 /// The C implementation uses all signed types. This will be an issue
13 /// The C implementation uses all signed types. This will be an issue
14 /// either when 4GB+ source files are commonplace or in 2038, whichever
14 /// either when 4GB+ source files are commonplace or in 2038, whichever
15 /// comes first.
15 /// comes first.
16 #[derive(Debug, PartialEq, Copy, Clone)]
16 #[derive(Debug, PartialEq, Copy, Clone)]
17 pub struct DirstateEntry {
17 pub struct DirstateEntry {
18 pub(crate) flags: Flags,
18 pub(crate) flags: Flags,
19 mode_size: Option<(i32, i32)>,
19 mode_size: Option<(i32, i32)>,
20 mtime: Option<i32>,
20 mtime: Option<i32>,
21 }
21 }
22
22
23 bitflags! {
23 bitflags! {
24 pub(crate) struct Flags: u8 {
24 pub(crate) struct Flags: u8 {
25 const WDIR_TRACKED = 1 << 0;
25 const WDIR_TRACKED = 1 << 0;
26 const P1_TRACKED = 1 << 1;
26 const P1_TRACKED = 1 << 1;
27 const P2_INFO = 1 << 2;
27 const P2_INFO = 1 << 2;
28 }
28 }
29 }
29 }
30
30
31 pub const V1_RANGEMASK: i32 = 0x7FFFFFFF;
31 pub const V1_RANGEMASK: i32 = 0x7FFFFFFF;
32
32
33 pub const MTIME_UNSET: i32 = -1;
33 pub const MTIME_UNSET: i32 = -1;
34
34
35 /// A `DirstateEntry` with a size of `-2` means that it was merged from the
35 /// A `DirstateEntry` with a size of `-2` means that it was merged from the
36 /// other parent. This allows revert to pick the right status back during a
36 /// other parent. This allows revert to pick the right status back during a
37 /// merge.
37 /// merge.
38 pub const SIZE_FROM_OTHER_PARENT: i32 = -2;
38 pub const SIZE_FROM_OTHER_PARENT: i32 = -2;
39 /// A special value used for internal representation of special case in
39 /// A special value used for internal representation of special case in
40 /// dirstate v1 format.
40 /// dirstate v1 format.
41 pub const SIZE_NON_NORMAL: i32 = -1;
41 pub const SIZE_NON_NORMAL: i32 = -1;
42
42
43 impl DirstateEntry {
43 impl DirstateEntry {
44 pub fn from_v2_data(
44 pub fn from_v2_data(
45 wdir_tracked: bool,
45 wdir_tracked: bool,
46 p1_tracked: bool,
46 p1_tracked: bool,
47 p2_info: bool,
47 p2_info: bool,
48 mode_size: Option<(i32, i32)>,
48 mode_size: Option<(i32, i32)>,
49 mtime: Option<i32>,
49 mtime: Option<i32>,
50 ) -> Self {
50 ) -> Self {
51 let mut flags = Flags::empty();
51 let mut flags = Flags::empty();
52 flags.set(Flags::WDIR_TRACKED, wdir_tracked);
52 flags.set(Flags::WDIR_TRACKED, wdir_tracked);
53 flags.set(Flags::P1_TRACKED, p1_tracked);
53 flags.set(Flags::P1_TRACKED, p1_tracked);
54 flags.set(Flags::P2_INFO, p2_info);
54 flags.set(Flags::P2_INFO, p2_info);
55 Self {
55 Self {
56 flags,
56 flags,
57 mode_size,
57 mode_size,
58 mtime,
58 mtime,
59 }
59 }
60 }
60 }
61
61
62 pub fn from_v1_data(
62 pub fn from_v1_data(
63 state: EntryState,
63 state: EntryState,
64 mode: i32,
64 mode: i32,
65 size: i32,
65 size: i32,
66 mtime: i32,
66 mtime: i32,
67 ) -> Self {
67 ) -> Self {
68 match state {
68 match state {
69 EntryState::Normal => {
69 EntryState::Normal => {
70 if size == SIZE_FROM_OTHER_PARENT {
70 if size == SIZE_FROM_OTHER_PARENT {
71 Self::new_from_p2()
71 Self::new_from_p2()
72 } else if size == SIZE_NON_NORMAL {
72 } else if size == SIZE_NON_NORMAL {
73 Self::new_possibly_dirty()
73 Self::new_possibly_dirty()
74 } else if mtime == MTIME_UNSET {
74 } else if mtime == MTIME_UNSET {
75 Self {
75 Self {
76 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
76 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
77 mode_size: Some((mode, size)),
77 mode_size: Some((mode, size)),
78 mtime: None,
78 mtime: None,
79 }
79 }
80 } else {
80 } else {
81 Self::new_normal(mode, size, mtime)
81 Self::new_normal(mode, size, mtime)
82 }
82 }
83 }
83 }
84 EntryState::Added => Self::new_added(),
84 EntryState::Added => Self {
85 flags: Flags::WDIR_TRACKED,
86 mode_size: None,
87 mtime: None,
88 },
85 EntryState::Removed => Self {
89 EntryState::Removed => Self {
86 flags: if size == SIZE_NON_NORMAL {
90 flags: if size == SIZE_NON_NORMAL {
87 Flags::P1_TRACKED | Flags::P2_INFO
91 Flags::P1_TRACKED | Flags::P2_INFO
88 } else if size == SIZE_FROM_OTHER_PARENT {
92 } else if size == SIZE_FROM_OTHER_PARENT {
89 // We don’t know if P1_TRACKED should be set (file history)
93 // We don’t know if P1_TRACKED should be set (file history)
90 Flags::P2_INFO
94 Flags::P2_INFO
91 } else {
95 } else {
92 Flags::P1_TRACKED
96 Flags::P1_TRACKED
93 },
97 },
94 mode_size: None,
98 mode_size: None,
95 mtime: None,
99 mtime: None,
96 },
100 },
97 EntryState::Merged => Self {
101 EntryState::Merged => Self {
98 flags: Flags::WDIR_TRACKED
102 flags: Flags::WDIR_TRACKED
99 | Flags::P1_TRACKED // might not be true because of rename ?
103 | Flags::P1_TRACKED // might not be true because of rename ?
100 | Flags::P2_INFO, // might not be true because of rename ?
104 | Flags::P2_INFO, // might not be true because of rename ?
101 mode_size: None,
105 mode_size: None,
102 mtime: None,
106 mtime: None,
103 },
107 },
104 }
108 }
105 }
109 }
106
110
107 pub fn new_from_p2() -> Self {
111 pub fn new_from_p2() -> Self {
108 Self {
112 Self {
109 // might be missing P1_TRACKED
113 // might be missing P1_TRACKED
110 flags: Flags::WDIR_TRACKED | Flags::P2_INFO,
114 flags: Flags::WDIR_TRACKED | Flags::P2_INFO,
111 mode_size: None,
115 mode_size: None,
112 mtime: None,
116 mtime: None,
113 }
117 }
114 }
118 }
115
119
116 pub fn new_possibly_dirty() -> Self {
120 pub fn new_possibly_dirty() -> Self {
117 Self {
121 Self {
118 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
122 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
119 mode_size: None,
123 mode_size: None,
120 mtime: None,
124 mtime: None,
121 }
125 }
122 }
126 }
123
127
124 pub fn new_added() -> Self {
128 pub fn new_added() -> Self {
125 Self {
129 Self {
126 flags: Flags::WDIR_TRACKED,
130 flags: Flags::WDIR_TRACKED,
127 mode_size: None,
131 mode_size: None,
128 mtime: None,
132 mtime: None,
129 }
133 }
130 }
134 }
131
135
132 pub fn new_normal(mode: i32, size: i32, mtime: i32) -> Self {
136 pub fn new_normal(mode: i32, size: i32, mtime: i32) -> Self {
133 Self {
137 Self {
134 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
138 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
135 mode_size: Some((mode, size)),
139 mode_size: Some((mode, size)),
136 mtime: Some(mtime),
140 mtime: Some(mtime),
137 }
141 }
138 }
142 }
139
143
140 /// Creates a new entry in "removed" state.
144 /// Creates a new entry in "removed" state.
141 ///
145 ///
142 /// `size` is expected to be zero, `SIZE_NON_NORMAL`, or
146 /// `size` is expected to be zero, `SIZE_NON_NORMAL`, or
143 /// `SIZE_FROM_OTHER_PARENT`
147 /// `SIZE_FROM_OTHER_PARENT`
144 pub fn new_removed(size: i32) -> Self {
148 pub fn new_removed(size: i32) -> Self {
145 Self::from_v1_data(EntryState::Removed, 0, size, 0)
149 Self::from_v1_data(EntryState::Removed, 0, size, 0)
146 }
150 }
147
151
148 pub fn tracked(&self) -> bool {
152 pub fn tracked(&self) -> bool {
149 self.flags.contains(Flags::WDIR_TRACKED)
153 self.flags.contains(Flags::WDIR_TRACKED)
150 }
154 }
151
155
152 pub fn p1_tracked(&self) -> bool {
156 pub fn p1_tracked(&self) -> bool {
153 self.flags.contains(Flags::P1_TRACKED)
157 self.flags.contains(Flags::P1_TRACKED)
154 }
158 }
155
159
156 fn in_either_parent(&self) -> bool {
160 fn in_either_parent(&self) -> bool {
157 self.flags.intersects(Flags::P1_TRACKED | Flags::P2_INFO)
161 self.flags.intersects(Flags::P1_TRACKED | Flags::P2_INFO)
158 }
162 }
159
163
160 pub fn removed(&self) -> bool {
164 pub fn removed(&self) -> bool {
161 self.in_either_parent() && !self.flags.contains(Flags::WDIR_TRACKED)
165 self.in_either_parent() && !self.flags.contains(Flags::WDIR_TRACKED)
162 }
166 }
163
167
164 pub fn p2_info(&self) -> bool {
168 pub fn p2_info(&self) -> bool {
165 self.flags.contains(Flags::WDIR_TRACKED | Flags::P2_INFO)
169 self.flags.contains(Flags::WDIR_TRACKED | Flags::P2_INFO)
166 }
170 }
167
171
168 pub fn added(&self) -> bool {
172 pub fn added(&self) -> bool {
169 self.flags.contains(Flags::WDIR_TRACKED) && !self.in_either_parent()
173 self.flags.contains(Flags::WDIR_TRACKED) && !self.in_either_parent()
170 }
174 }
171
175
172 pub fn maybe_clean(&self) -> bool {
176 pub fn maybe_clean(&self) -> bool {
173 if !self.flags.contains(Flags::WDIR_TRACKED) {
177 if !self.flags.contains(Flags::WDIR_TRACKED) {
174 false
178 false
175 } else if !self.flags.contains(Flags::P1_TRACKED) {
179 } else if !self.flags.contains(Flags::P1_TRACKED) {
176 false
180 false
177 } else if self.flags.contains(Flags::P2_INFO) {
181 } else if self.flags.contains(Flags::P2_INFO) {
178 false
182 false
179 } else {
183 } else {
180 true
184 true
181 }
185 }
182 }
186 }
183
187
184 pub fn any_tracked(&self) -> bool {
188 pub fn any_tracked(&self) -> bool {
185 self.flags.intersects(
189 self.flags.intersects(
186 Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO,
190 Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO,
187 )
191 )
188 }
192 }
189
193
190 /// Returns `(wdir_tracked, p1_tracked, p2_info, mode_size, mtime)`
194 /// Returns `(wdir_tracked, p1_tracked, p2_info, mode_size, mtime)`
191 pub(crate) fn v2_data(
195 pub(crate) fn v2_data(
192 &self,
196 &self,
193 ) -> (bool, bool, bool, Option<(i32, i32)>, Option<i32>) {
197 ) -> (bool, bool, bool, Option<(i32, i32)>, Option<i32>) {
194 if !self.any_tracked() {
198 if !self.any_tracked() {
195 // TODO: return an Option instead?
199 // TODO: return an Option instead?
196 panic!("Accessing v1_state of an untracked DirstateEntry")
200 panic!("Accessing v1_state of an untracked DirstateEntry")
197 }
201 }
198 let wdir_tracked = self.flags.contains(Flags::WDIR_TRACKED);
202 let wdir_tracked = self.flags.contains(Flags::WDIR_TRACKED);
199 let p1_tracked = self.flags.contains(Flags::P1_TRACKED);
203 let p1_tracked = self.flags.contains(Flags::P1_TRACKED);
200 let p2_info = self.flags.contains(Flags::P2_INFO);
204 let p2_info = self.flags.contains(Flags::P2_INFO);
201 let mode_size = self.mode_size;
205 let mode_size = self.mode_size;
202 let mtime = self.mtime;
206 let mtime = self.mtime;
203 (wdir_tracked, p1_tracked, p2_info, mode_size, mtime)
207 (wdir_tracked, p1_tracked, p2_info, mode_size, mtime)
204 }
208 }
205
209
206 fn v1_state(&self) -> EntryState {
210 fn v1_state(&self) -> EntryState {
207 if !self.any_tracked() {
211 if !self.any_tracked() {
208 // TODO: return an Option instead?
212 // TODO: return an Option instead?
209 panic!("Accessing v1_state of an untracked DirstateEntry")
213 panic!("Accessing v1_state of an untracked DirstateEntry")
210 }
214 }
211 if self.removed() {
215 if self.removed() {
212 EntryState::Removed
216 EntryState::Removed
213 } else if self
217 } else if self
214 .flags
218 .flags
215 .contains(Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO)
219 .contains(Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO)
216 {
220 {
217 EntryState::Merged
221 EntryState::Merged
218 } else if self.added() {
222 } else if self.added() {
219 EntryState::Added
223 EntryState::Added
220 } else {
224 } else {
221 EntryState::Normal
225 EntryState::Normal
222 }
226 }
223 }
227 }
224
228
225 fn v1_mode(&self) -> i32 {
229 fn v1_mode(&self) -> i32 {
226 if let Some((mode, _size)) = self.mode_size {
230 if let Some((mode, _size)) = self.mode_size {
227 mode
231 mode
228 } else {
232 } else {
229 0
233 0
230 }
234 }
231 }
235 }
232
236
233 fn v1_size(&self) -> i32 {
237 fn v1_size(&self) -> i32 {
234 if !self.any_tracked() {
238 if !self.any_tracked() {
235 // TODO: return an Option instead?
239 // TODO: return an Option instead?
236 panic!("Accessing v1_size of an untracked DirstateEntry")
240 panic!("Accessing v1_size of an untracked DirstateEntry")
237 }
241 }
238 if self.removed()
242 if self.removed()
239 && self.flags.contains(Flags::P1_TRACKED | Flags::P2_INFO)
243 && self.flags.contains(Flags::P1_TRACKED | Flags::P2_INFO)
240 {
244 {
241 SIZE_NON_NORMAL
245 SIZE_NON_NORMAL
242 } else if self.flags.contains(Flags::P2_INFO) {
246 } else if self.flags.contains(Flags::P2_INFO) {
243 SIZE_FROM_OTHER_PARENT
247 SIZE_FROM_OTHER_PARENT
244 } else if self.removed() {
248 } else if self.removed() {
245 0
249 0
246 } else if self.added() {
250 } else if self.added() {
247 SIZE_NON_NORMAL
251 SIZE_NON_NORMAL
248 } else if let Some((_mode, size)) = self.mode_size {
252 } else if let Some((_mode, size)) = self.mode_size {
249 size
253 size
250 } else {
254 } else {
251 SIZE_NON_NORMAL
255 SIZE_NON_NORMAL
252 }
256 }
253 }
257 }
254
258
255 fn v1_mtime(&self) -> i32 {
259 fn v1_mtime(&self) -> i32 {
256 if !self.any_tracked() {
260 if !self.any_tracked() {
257 // TODO: return an Option instead?
261 // TODO: return an Option instead?
258 panic!("Accessing v1_mtime of an untracked DirstateEntry")
262 panic!("Accessing v1_mtime of an untracked DirstateEntry")
259 }
263 }
260 if self.removed() {
264 if self.removed() {
261 0
265 0
262 } else if self.flags.contains(Flags::P2_INFO) {
266 } else if self.flags.contains(Flags::P2_INFO) {
263 MTIME_UNSET
267 MTIME_UNSET
264 } else if !self.flags.contains(Flags::P1_TRACKED) {
268 } else if !self.flags.contains(Flags::P1_TRACKED) {
265 MTIME_UNSET
269 MTIME_UNSET
266 } else {
270 } else {
267 self.mtime.unwrap_or(MTIME_UNSET)
271 self.mtime.unwrap_or(MTIME_UNSET)
268 }
272 }
269 }
273 }
270
274
271 // TODO: return `Option<EntryState>`? None when `!self.any_tracked`
275 // TODO: return `Option<EntryState>`? None when `!self.any_tracked`
272 pub fn state(&self) -> EntryState {
276 pub fn state(&self) -> EntryState {
273 self.v1_state()
277 self.v1_state()
274 }
278 }
275
279
276 // TODO: return Option?
280 // TODO: return Option?
277 pub fn mode(&self) -> i32 {
281 pub fn mode(&self) -> i32 {
278 self.v1_mode()
282 self.v1_mode()
279 }
283 }
280
284
281 // TODO: return Option?
285 // TODO: return Option?
282 pub fn size(&self) -> i32 {
286 pub fn size(&self) -> i32 {
283 self.v1_size()
287 self.v1_size()
284 }
288 }
285
289
286 // TODO: return Option?
290 // TODO: return Option?
287 pub fn mtime(&self) -> i32 {
291 pub fn mtime(&self) -> i32 {
288 self.v1_mtime()
292 self.v1_mtime()
289 }
293 }
290
294
291 pub fn drop_merge_data(&mut self) {
295 pub fn drop_merge_data(&mut self) {
292 if self.flags.contains(Flags::P2_INFO) {
296 if self.flags.contains(Flags::P2_INFO) {
293 self.flags.remove(Flags::P2_INFO);
297 self.flags.remove(Flags::P2_INFO);
294 self.mode_size = None;
298 self.mode_size = None;
295 self.mtime = None;
299 self.mtime = None;
296 }
300 }
297 }
301 }
298
302
299 pub fn set_possibly_dirty(&mut self) {
303 pub fn set_possibly_dirty(&mut self) {
300 self.mtime = None
304 self.mtime = None
301 }
305 }
302
306
303 pub fn set_clean(&mut self, mode: i32, size: i32, mtime: i32) {
307 pub fn set_clean(&mut self, mode: i32, size: i32, mtime: i32) {
304 self.flags.insert(Flags::WDIR_TRACKED | Flags::P1_TRACKED);
308 self.flags.insert(Flags::WDIR_TRACKED | Flags::P1_TRACKED);
305 self.mode_size = Some((mode, size));
309 self.mode_size = Some((mode, size));
306 self.mtime = Some(mtime);
310 self.mtime = Some(mtime);
307 }
311 }
308
312
309 pub fn set_tracked(&mut self) {
313 pub fn set_tracked(&mut self) {
310 self.flags.insert(Flags::WDIR_TRACKED);
314 self.flags.insert(Flags::WDIR_TRACKED);
311 // `set_tracked` is replacing various `normallookup` call. So we mark
315 // `set_tracked` is replacing various `normallookup` call. So we mark
312 // the files as needing lookup
316 // the files as needing lookup
313 //
317 //
314 // Consider dropping this in the future in favor of something less
318 // Consider dropping this in the future in favor of something less
315 // broad.
319 // broad.
316 self.mtime = None;
320 self.mtime = None;
317 }
321 }
318
322
319 pub fn set_untracked(&mut self) {
323 pub fn set_untracked(&mut self) {
320 self.flags.remove(Flags::WDIR_TRACKED);
324 self.flags.remove(Flags::WDIR_TRACKED);
321 self.mode_size = None;
325 self.mode_size = None;
322 self.mtime = None;
326 self.mtime = None;
323 }
327 }
324
328
325 /// Returns `(state, mode, size, mtime)` for the puprose of serialization
329 /// Returns `(state, mode, size, mtime)` for the puprose of serialization
326 /// in the dirstate-v1 format.
330 /// in the dirstate-v1 format.
327 ///
331 ///
328 /// This includes marker values such as `mtime == -1`. In the future we may
332 /// This includes marker values such as `mtime == -1`. In the future we may
329 /// want to not represent these cases that way in memory, but serialization
333 /// want to not represent these cases that way in memory, but serialization
330 /// will need to keep the same format.
334 /// will need to keep the same format.
331 pub fn v1_data(&self) -> (u8, i32, i32, i32) {
335 pub fn v1_data(&self) -> (u8, i32, i32, i32) {
332 (
336 (
333 self.v1_state().into(),
337 self.v1_state().into(),
334 self.v1_mode(),
338 self.v1_mode(),
335 self.v1_size(),
339 self.v1_size(),
336 self.v1_mtime(),
340 self.v1_mtime(),
337 )
341 )
338 }
342 }
339
343
340 pub(crate) fn is_from_other_parent(&self) -> bool {
344 pub(crate) fn is_from_other_parent(&self) -> bool {
341 self.state() == EntryState::Normal
345 self.state() == EntryState::Normal
342 && self.size() == SIZE_FROM_OTHER_PARENT
346 && self.size() == SIZE_FROM_OTHER_PARENT
343 }
347 }
344
348
345 // TODO: other platforms
349 // TODO: other platforms
346 #[cfg(unix)]
350 #[cfg(unix)]
347 pub fn mode_changed(
351 pub fn mode_changed(
348 &self,
352 &self,
349 filesystem_metadata: &std::fs::Metadata,
353 filesystem_metadata: &std::fs::Metadata,
350 ) -> bool {
354 ) -> bool {
351 use std::os::unix::fs::MetadataExt;
355 use std::os::unix::fs::MetadataExt;
352 const EXEC_BIT_MASK: u32 = 0o100;
356 const EXEC_BIT_MASK: u32 = 0o100;
353 let dirstate_exec_bit = (self.mode() as u32) & EXEC_BIT_MASK;
357 let dirstate_exec_bit = (self.mode() as u32) & EXEC_BIT_MASK;
354 let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK;
358 let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK;
355 dirstate_exec_bit != fs_exec_bit
359 dirstate_exec_bit != fs_exec_bit
356 }
360 }
357
361
358 /// Returns a `(state, mode, size, mtime)` tuple as for
362 /// Returns a `(state, mode, size, mtime)` tuple as for
359 /// `DirstateMapMethods::debug_iter`.
363 /// `DirstateMapMethods::debug_iter`.
360 pub fn debug_tuple(&self) -> (u8, i32, i32, i32) {
364 pub fn debug_tuple(&self) -> (u8, i32, i32, i32) {
361 (self.state().into(), self.mode(), self.size(), self.mtime())
365 (self.state().into(), self.mode(), self.size(), self.mtime())
362 }
366 }
363
367
364 pub fn mtime_is_ambiguous(&self, now: i32) -> bool {
368 pub fn mtime_is_ambiguous(&self, now: i32) -> bool {
365 self.state() == EntryState::Normal && self.mtime() == now
369 self.state() == EntryState::Normal && self.mtime() == now
366 }
370 }
367
371
368 pub fn clear_ambiguous_mtime(&mut self, now: i32) -> bool {
372 pub fn clear_ambiguous_mtime(&mut self, now: i32) -> bool {
369 let ambiguous = self.mtime_is_ambiguous(now);
373 let ambiguous = self.mtime_is_ambiguous(now);
370 if ambiguous {
374 if ambiguous {
371 // The file was last modified "simultaneously" with the current
375 // The file was last modified "simultaneously" with the current
372 // write to dirstate (i.e. within the same second for file-
376 // write to dirstate (i.e. within the same second for file-
373 // systems with a granularity of 1 sec). This commonly happens
377 // systems with a granularity of 1 sec). This commonly happens
374 // for at least a couple of files on 'update'.
378 // for at least a couple of files on 'update'.
375 // The user could change the file without changing its size
379 // The user could change the file without changing its size
376 // within the same second. Invalidate the file's mtime in
380 // within the same second. Invalidate the file's mtime in
377 // dirstate, forcing future 'status' calls to compare the
381 // dirstate, forcing future 'status' calls to compare the
378 // contents of the file if the size is the same. This prevents
382 // contents of the file if the size is the same. This prevents
379 // mistakenly treating such files as clean.
383 // mistakenly treating such files as clean.
380 self.set_possibly_dirty()
384 self.set_possibly_dirty()
381 }
385 }
382 ambiguous
386 ambiguous
383 }
387 }
384 }
388 }
385
389
386 impl EntryState {
390 impl EntryState {
387 pub fn is_tracked(self) -> bool {
391 pub fn is_tracked(self) -> bool {
388 use EntryState::*;
392 use EntryState::*;
389 match self {
393 match self {
390 Normal | Added | Merged => true,
394 Normal | Added | Merged => true,
391 Removed => false,
395 Removed => false,
392 }
396 }
393 }
397 }
394 }
398 }
395
399
396 impl TryFrom<u8> for EntryState {
400 impl TryFrom<u8> for EntryState {
397 type Error = HgError;
401 type Error = HgError;
398
402
399 fn try_from(value: u8) -> Result<Self, Self::Error> {
403 fn try_from(value: u8) -> Result<Self, Self::Error> {
400 match value {
404 match value {
401 b'n' => Ok(EntryState::Normal),
405 b'n' => Ok(EntryState::Normal),
402 b'a' => Ok(EntryState::Added),
406 b'a' => Ok(EntryState::Added),
403 b'r' => Ok(EntryState::Removed),
407 b'r' => Ok(EntryState::Removed),
404 b'm' => Ok(EntryState::Merged),
408 b'm' => Ok(EntryState::Merged),
405 _ => Err(HgError::CorruptedRepository(format!(
409 _ => Err(HgError::CorruptedRepository(format!(
406 "Incorrect dirstate entry state {}",
410 "Incorrect dirstate entry state {}",
407 value
411 value
408 ))),
412 ))),
409 }
413 }
410 }
414 }
411 }
415 }
412
416
413 impl Into<u8> for EntryState {
417 impl Into<u8> for EntryState {
414 fn into(self) -> u8 {
418 fn into(self) -> u8 {
415 match self {
419 match self {
416 EntryState::Normal => b'n',
420 EntryState::Normal => b'n',
417 EntryState::Added => b'a',
421 EntryState::Added => b'a',
418 EntryState::Removed => b'r',
422 EntryState::Removed => b'r',
419 EntryState::Merged => b'm',
423 EntryState::Merged => b'm',
420 }
424 }
421 }
425 }
422 }
426 }
General Comments 0
You need to be logged in to leave comments. Login now