##// END OF EJS Templates
dirstate-item: directly use `p2_info` in `v1_size`...
marmoute -
r48961:6ac2b417 default
parent child Browse files
Show More
@@ -1,797 +1,793
1 # parsers.py - Python implementation of parsers.c
1 # parsers.py - Python implementation of parsers.c
2 #
2 #
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11 import zlib
11 import zlib
12
12
13 from ..node import (
13 from ..node import (
14 nullrev,
14 nullrev,
15 sha1nodeconstants,
15 sha1nodeconstants,
16 )
16 )
17 from ..thirdparty import attr
17 from ..thirdparty import attr
18 from .. import (
18 from .. import (
19 error,
19 error,
20 pycompat,
20 pycompat,
21 revlogutils,
21 revlogutils,
22 util,
22 util,
23 )
23 )
24
24
25 from ..revlogutils import nodemap as nodemaputil
25 from ..revlogutils import nodemap as nodemaputil
26 from ..revlogutils import constants as revlog_constants
26 from ..revlogutils import constants as revlog_constants
27
27
28 stringio = pycompat.bytesio
28 stringio = pycompat.bytesio
29
29
30
30
31 _pack = struct.pack
31 _pack = struct.pack
32 _unpack = struct.unpack
32 _unpack = struct.unpack
33 _compress = zlib.compress
33 _compress = zlib.compress
34 _decompress = zlib.decompress
34 _decompress = zlib.decompress
35
35
36
36
37 # a special value used internally for `size` if the file come from the other parent
37 # a special value used internally for `size` if the file come from the other parent
38 FROM_P2 = -2
38 FROM_P2 = -2
39
39
40 # a special value used internally for `size` if the file is modified/merged/added
40 # a special value used internally for `size` if the file is modified/merged/added
41 NONNORMAL = -1
41 NONNORMAL = -1
42
42
43 # a special value used internally for `time` if the time is ambigeous
43 # a special value used internally for `time` if the time is ambigeous
44 AMBIGUOUS_TIME = -1
44 AMBIGUOUS_TIME = -1
45
45
46
46
47 @attr.s(slots=True, init=False)
47 @attr.s(slots=True, init=False)
48 class DirstateItem(object):
48 class DirstateItem(object):
49 """represent a dirstate entry
49 """represent a dirstate entry
50
50
51 It hold multiple attributes
51 It hold multiple attributes
52
52
53 # about file tracking
53 # about file tracking
54 - wc_tracked: is the file tracked by the working copy
54 - wc_tracked: is the file tracked by the working copy
55 - p1_tracked: is the file tracked in working copy first parent
55 - p1_tracked: is the file tracked in working copy first parent
56 - p2_info: the file has been involved in some merge operation. Either
56 - p2_info: the file has been involved in some merge operation. Either
57 because it was actually merged, or because the p2 version was
57 because it was actually merged, or because the p2 version was
58 ahead, or because some renamed moved it there. In either case
58 ahead, or because some renamed moved it there. In either case
59 `hg status` will want it displayed as modified.
59 `hg status` will want it displayed as modified.
60
60
61 # about the file state expected from p1 manifest:
61 # about the file state expected from p1 manifest:
62 - mode: the file mode in p1
62 - mode: the file mode in p1
63 - size: the file size in p1
63 - size: the file size in p1
64
64
65 These value can be set to None, which mean we don't have a meaningful value
65 These value can be set to None, which mean we don't have a meaningful value
66 to compare with. Either because we don't really care about them as there
66 to compare with. Either because we don't really care about them as there
67 `status` is known without having to look at the disk or because we don't
67 `status` is known without having to look at the disk or because we don't
68 know these right now and a full comparison will be needed to find out if
68 know these right now and a full comparison will be needed to find out if
69 the file is clean.
69 the file is clean.
70
70
71 # about the file state on disk last time we saw it:
71 # about the file state on disk last time we saw it:
72 - mtime: the last known clean mtime for the file.
72 - mtime: the last known clean mtime for the file.
73
73
74 This value can be set to None if no cachable state exist. Either because we
74 This value can be set to None if no cachable state exist. Either because we
75 do not care (see previous section) or because we could not cache something
75 do not care (see previous section) or because we could not cache something
76 yet.
76 yet.
77 """
77 """
78
78
79 _wc_tracked = attr.ib()
79 _wc_tracked = attr.ib()
80 _p1_tracked = attr.ib()
80 _p1_tracked = attr.ib()
81 _p2_info = attr.ib()
81 _p2_info = attr.ib()
82 _mode = attr.ib()
82 _mode = attr.ib()
83 _size = attr.ib()
83 _size = attr.ib()
84 _mtime = attr.ib()
84 _mtime = attr.ib()
85
85
86 def __init__(
86 def __init__(
87 self,
87 self,
88 wc_tracked=False,
88 wc_tracked=False,
89 p1_tracked=False,
89 p1_tracked=False,
90 p2_info=False,
90 p2_info=False,
91 has_meaningful_data=True,
91 has_meaningful_data=True,
92 has_meaningful_mtime=True,
92 has_meaningful_mtime=True,
93 parentfiledata=None,
93 parentfiledata=None,
94 ):
94 ):
95 self._wc_tracked = wc_tracked
95 self._wc_tracked = wc_tracked
96 self._p1_tracked = p1_tracked
96 self._p1_tracked = p1_tracked
97 self._p2_info = p2_info
97 self._p2_info = p2_info
98
98
99 self._mode = None
99 self._mode = None
100 self._size = None
100 self._size = None
101 self._mtime = None
101 self._mtime = None
102 if parentfiledata is None:
102 if parentfiledata is None:
103 has_meaningful_mtime = False
103 has_meaningful_mtime = False
104 has_meaningful_data = False
104 has_meaningful_data = False
105 if has_meaningful_data:
105 if has_meaningful_data:
106 self._mode = parentfiledata[0]
106 self._mode = parentfiledata[0]
107 self._size = parentfiledata[1]
107 self._size = parentfiledata[1]
108 if has_meaningful_mtime:
108 if has_meaningful_mtime:
109 self._mtime = parentfiledata[2]
109 self._mtime = parentfiledata[2]
110
110
111 @classmethod
111 @classmethod
112 def new_added(cls):
112 def new_added(cls):
113 """constructor to help legacy API to build a new "added" item
113 """constructor to help legacy API to build a new "added" item
114
114
115 Should eventually be removed
115 Should eventually be removed
116 """
116 """
117 return cls(wc_tracked=True)
117 return cls(wc_tracked=True)
118
118
119 @classmethod
119 @classmethod
120 def new_merged(cls):
120 def new_merged(cls):
121 """constructor to help legacy API to build a new "merged" item
121 """constructor to help legacy API to build a new "merged" item
122
122
123 Should eventually be removed
123 Should eventually be removed
124 """
124 """
125 return cls(wc_tracked=True, p1_tracked=True, p2_info=True)
125 return cls(wc_tracked=True, p1_tracked=True, p2_info=True)
126
126
127 @classmethod
127 @classmethod
128 def new_from_p2(cls):
128 def new_from_p2(cls):
129 """constructor to help legacy API to build a new "from_p2" item
129 """constructor to help legacy API to build a new "from_p2" item
130
130
131 Should eventually be removed
131 Should eventually be removed
132 """
132 """
133 return cls(wc_tracked=True, p2_info=True)
133 return cls(wc_tracked=True, p2_info=True)
134
134
135 @classmethod
135 @classmethod
136 def new_possibly_dirty(cls):
136 def new_possibly_dirty(cls):
137 """constructor to help legacy API to build a new "possibly_dirty" item
137 """constructor to help legacy API to build a new "possibly_dirty" item
138
138
139 Should eventually be removed
139 Should eventually be removed
140 """
140 """
141 return cls(wc_tracked=True, p1_tracked=True)
141 return cls(wc_tracked=True, p1_tracked=True)
142
142
143 @classmethod
143 @classmethod
144 def new_normal(cls, mode, size, mtime):
144 def new_normal(cls, mode, size, mtime):
145 """constructor to help legacy API to build a new "normal" item
145 """constructor to help legacy API to build a new "normal" item
146
146
147 Should eventually be removed
147 Should eventually be removed
148 """
148 """
149 assert size != FROM_P2
149 assert size != FROM_P2
150 assert size != NONNORMAL
150 assert size != NONNORMAL
151 return cls(
151 return cls(
152 wc_tracked=True,
152 wc_tracked=True,
153 p1_tracked=True,
153 p1_tracked=True,
154 parentfiledata=(mode, size, mtime),
154 parentfiledata=(mode, size, mtime),
155 )
155 )
156
156
157 @classmethod
157 @classmethod
158 def from_v1_data(cls, state, mode, size, mtime):
158 def from_v1_data(cls, state, mode, size, mtime):
159 """Build a new DirstateItem object from V1 data
159 """Build a new DirstateItem object from V1 data
160
160
161 Since the dirstate-v1 format is frozen, the signature of this function
161 Since the dirstate-v1 format is frozen, the signature of this function
162 is not expected to change, unlike the __init__ one.
162 is not expected to change, unlike the __init__ one.
163 """
163 """
164 if state == b'm':
164 if state == b'm':
165 return cls.new_merged()
165 return cls.new_merged()
166 elif state == b'a':
166 elif state == b'a':
167 return cls.new_added()
167 return cls.new_added()
168 elif state == b'r':
168 elif state == b'r':
169 if size == NONNORMAL:
169 if size == NONNORMAL:
170 p1_tracked = True
170 p1_tracked = True
171 p2_info = True
171 p2_info = True
172 elif size == FROM_P2:
172 elif size == FROM_P2:
173 p1_tracked = False
173 p1_tracked = False
174 p2_info = True
174 p2_info = True
175 else:
175 else:
176 p1_tracked = True
176 p1_tracked = True
177 p2_info = False
177 p2_info = False
178 return cls(p1_tracked=p1_tracked, p2_info=p2_info)
178 return cls(p1_tracked=p1_tracked, p2_info=p2_info)
179 elif state == b'n':
179 elif state == b'n':
180 if size == FROM_P2:
180 if size == FROM_P2:
181 return cls.new_from_p2()
181 return cls.new_from_p2()
182 elif size == NONNORMAL:
182 elif size == NONNORMAL:
183 return cls.new_possibly_dirty()
183 return cls.new_possibly_dirty()
184 elif mtime == AMBIGUOUS_TIME:
184 elif mtime == AMBIGUOUS_TIME:
185 instance = cls.new_normal(mode, size, 42)
185 instance = cls.new_normal(mode, size, 42)
186 instance._mtime = None
186 instance._mtime = None
187 return instance
187 return instance
188 else:
188 else:
189 return cls.new_normal(mode, size, mtime)
189 return cls.new_normal(mode, size, mtime)
190 else:
190 else:
191 raise RuntimeError(b'unknown state: %s' % state)
191 raise RuntimeError(b'unknown state: %s' % state)
192
192
193 def set_possibly_dirty(self):
193 def set_possibly_dirty(self):
194 """Mark a file as "possibly dirty"
194 """Mark a file as "possibly dirty"
195
195
196 This means the next status call will have to actually check its content
196 This means the next status call will have to actually check its content
197 to make sure it is correct.
197 to make sure it is correct.
198 """
198 """
199 self._mtime = None
199 self._mtime = None
200
200
201 def set_clean(self, mode, size, mtime):
201 def set_clean(self, mode, size, mtime):
202 """mark a file as "clean" cancelling potential "possibly dirty call"
202 """mark a file as "clean" cancelling potential "possibly dirty call"
203
203
204 Note: this function is a descendant of `dirstate.normal` and is
204 Note: this function is a descendant of `dirstate.normal` and is
205 currently expected to be call on "normal" entry only. There are not
205 currently expected to be call on "normal" entry only. There are not
206 reason for this to not change in the future as long as the ccode is
206 reason for this to not change in the future as long as the ccode is
207 updated to preserve the proper state of the non-normal files.
207 updated to preserve the proper state of the non-normal files.
208 """
208 """
209 self._wc_tracked = True
209 self._wc_tracked = True
210 self._p1_tracked = True
210 self._p1_tracked = True
211 self._mode = mode
211 self._mode = mode
212 self._size = size
212 self._size = size
213 self._mtime = mtime
213 self._mtime = mtime
214
214
215 def set_tracked(self):
215 def set_tracked(self):
216 """mark a file as tracked in the working copy
216 """mark a file as tracked in the working copy
217
217
218 This will ultimately be called by command like `hg add`.
218 This will ultimately be called by command like `hg add`.
219 """
219 """
220 self._wc_tracked = True
220 self._wc_tracked = True
221 # `set_tracked` is replacing various `normallookup` call. So we mark
221 # `set_tracked` is replacing various `normallookup` call. So we mark
222 # the files as needing lookup
222 # the files as needing lookup
223 #
223 #
224 # Consider dropping this in the future in favor of something less broad.
224 # Consider dropping this in the future in favor of something less broad.
225 self._mtime = None
225 self._mtime = None
226
226
227 def set_untracked(self):
227 def set_untracked(self):
228 """mark a file as untracked in the working copy
228 """mark a file as untracked in the working copy
229
229
230 This will ultimately be called by command like `hg remove`.
230 This will ultimately be called by command like `hg remove`.
231 """
231 """
232 self._wc_tracked = False
232 self._wc_tracked = False
233 self._mode = None
233 self._mode = None
234 self._size = None
234 self._size = None
235 self._mtime = None
235 self._mtime = None
236
236
237 def drop_merge_data(self):
237 def drop_merge_data(self):
238 """remove all "merge-only" from a DirstateItem
238 """remove all "merge-only" from a DirstateItem
239
239
240 This is to be call by the dirstatemap code when the second parent is dropped
240 This is to be call by the dirstatemap code when the second parent is dropped
241 """
241 """
242 if self._p2_info:
242 if self._p2_info:
243 self._p2_info = False
243 self._p2_info = False
244 self._mode = None
244 self._mode = None
245 self._size = None
245 self._size = None
246 self._mtime = None
246 self._mtime = None
247
247
248 @property
248 @property
249 def mode(self):
249 def mode(self):
250 return self.v1_mode()
250 return self.v1_mode()
251
251
252 @property
252 @property
253 def size(self):
253 def size(self):
254 return self.v1_size()
254 return self.v1_size()
255
255
256 @property
256 @property
257 def mtime(self):
257 def mtime(self):
258 return self.v1_mtime()
258 return self.v1_mtime()
259
259
260 @property
260 @property
261 def state(self):
261 def state(self):
262 """
262 """
263 States are:
263 States are:
264 n normal
264 n normal
265 m needs merging
265 m needs merging
266 r marked for removal
266 r marked for removal
267 a marked for addition
267 a marked for addition
268
268
269 XXX This "state" is a bit obscure and mostly a direct expression of the
269 XXX This "state" is a bit obscure and mostly a direct expression of the
270 dirstatev1 format. It would make sense to ultimately deprecate it in
270 dirstatev1 format. It would make sense to ultimately deprecate it in
271 favor of the more "semantic" attributes.
271 favor of the more "semantic" attributes.
272 """
272 """
273 if not self.any_tracked:
273 if not self.any_tracked:
274 return b'?'
274 return b'?'
275 return self.v1_state()
275 return self.v1_state()
276
276
277 @property
277 @property
278 def tracked(self):
278 def tracked(self):
279 """True is the file is tracked in the working copy"""
279 """True is the file is tracked in the working copy"""
280 return self._wc_tracked
280 return self._wc_tracked
281
281
282 @property
282 @property
283 def any_tracked(self):
283 def any_tracked(self):
284 """True is the file is tracked anywhere (wc or parents)"""
284 """True is the file is tracked anywhere (wc or parents)"""
285 return self._wc_tracked or self._p1_tracked or self._p2_info
285 return self._wc_tracked or self._p1_tracked or self._p2_info
286
286
287 @property
287 @property
288 def added(self):
288 def added(self):
289 """True if the file has been added"""
289 """True if the file has been added"""
290 return self._wc_tracked and not (self._p1_tracked or self._p2_info)
290 return self._wc_tracked and not (self._p1_tracked or self._p2_info)
291
291
292 @property
292 @property
293 def maybe_clean(self):
293 def maybe_clean(self):
294 """True if the file has a chance to be in the "clean" state"""
294 """True if the file has a chance to be in the "clean" state"""
295 if not self._wc_tracked:
295 if not self._wc_tracked:
296 return False
296 return False
297 elif not self._p1_tracked:
297 elif not self._p1_tracked:
298 return False
298 return False
299 elif self._p2_info:
299 elif self._p2_info:
300 return False
300 return False
301 return True
301 return True
302
302
303 @property
303 @property
304 def p1_tracked(self):
304 def p1_tracked(self):
305 """True if the file is tracked in the first parent manifest"""
305 """True if the file is tracked in the first parent manifest"""
306 return self._p1_tracked
306 return self._p1_tracked
307
307
308 @property
308 @property
309 def p2_info(self):
309 def p2_info(self):
310 """True if the file needed to merge or apply any input from p2
310 """True if the file needed to merge or apply any input from p2
311
311
312 See the class documentation for details.
312 See the class documentation for details.
313 """
313 """
314 return self._wc_tracked and self._p2_info
314 return self._wc_tracked and self._p2_info
315
315
316 @property
316 @property
317 def merged(self):
317 def merged(self):
318 """True if the file has been merged
318 """True if the file has been merged
319
319
320 Should only be set if a merge is in progress in the dirstate
320 Should only be set if a merge is in progress in the dirstate
321 """
321 """
322 return self._wc_tracked and self._p1_tracked and self._p2_info
322 return self._wc_tracked and self._p1_tracked and self._p2_info
323
323
324 @property
324 @property
325 def from_p2(self):
325 def from_p2(self):
326 """True if the file have been fetched from p2 during the current merge
326 """True if the file have been fetched from p2 during the current merge
327
327
328 This is only True is the file is currently tracked.
328 This is only True is the file is currently tracked.
329
329
330 Should only be set if a merge is in progress in the dirstate
330 Should only be set if a merge is in progress in the dirstate
331 """
331 """
332 return self._wc_tracked and (not self._p1_tracked) and self._p2_info
332 return self._wc_tracked and (not self._p1_tracked) and self._p2_info
333
333
334 @property
334 @property
335 def removed(self):
335 def removed(self):
336 """True if the file has been removed"""
336 """True if the file has been removed"""
337 return not self._wc_tracked and (self._p1_tracked or self._p2_info)
337 return not self._wc_tracked and (self._p1_tracked or self._p2_info)
338
338
339 def v1_state(self):
339 def v1_state(self):
340 """return a "state" suitable for v1 serialization"""
340 """return a "state" suitable for v1 serialization"""
341 if not self.any_tracked:
341 if not self.any_tracked:
342 # the object has no state to record, this is -currently-
342 # the object has no state to record, this is -currently-
343 # unsupported
343 # unsupported
344 raise RuntimeError('untracked item')
344 raise RuntimeError('untracked item')
345 elif self.removed:
345 elif self.removed:
346 return b'r'
346 return b'r'
347 elif self.merged:
347 elif self.merged:
348 return b'm'
348 return b'm'
349 elif self.added:
349 elif self.added:
350 return b'a'
350 return b'a'
351 else:
351 else:
352 return b'n'
352 return b'n'
353
353
354 def v1_mode(self):
354 def v1_mode(self):
355 """return a "mode" suitable for v1 serialization"""
355 """return a "mode" suitable for v1 serialization"""
356 return self._mode if self._mode is not None else 0
356 return self._mode if self._mode is not None else 0
357
357
358 def v1_size(self):
358 def v1_size(self):
359 """return a "size" suitable for v1 serialization"""
359 """return a "size" suitable for v1 serialization"""
360 if not self.any_tracked:
360 if not self.any_tracked:
361 # the object has no state to record, this is -currently-
361 # the object has no state to record, this is -currently-
362 # unsupported
362 # unsupported
363 raise RuntimeError('untracked item')
363 raise RuntimeError('untracked item')
364 elif self.removed and self._p1_tracked and self._p2_info:
364 elif self.removed and self._p1_tracked and self._p2_info:
365 return NONNORMAL
365 return NONNORMAL
366 elif self.removed and self._p2_info:
366 elif self._p2_info:
367 return FROM_P2
367 return FROM_P2
368 elif self.removed:
368 elif self.removed:
369 return 0
369 return 0
370 elif self.merged:
371 return FROM_P2
372 elif self.added:
370 elif self.added:
373 return NONNORMAL
371 return NONNORMAL
374 elif self.from_p2:
375 return FROM_P2
376 elif self._size is None:
372 elif self._size is None:
377 return NONNORMAL
373 return NONNORMAL
378 else:
374 else:
379 return self._size
375 return self._size
380
376
381 def v1_mtime(self):
377 def v1_mtime(self):
382 """return a "mtime" suitable for v1 serialization"""
378 """return a "mtime" suitable for v1 serialization"""
383 if not self.any_tracked:
379 if not self.any_tracked:
384 # the object has no state to record, this is -currently-
380 # the object has no state to record, this is -currently-
385 # unsupported
381 # unsupported
386 raise RuntimeError('untracked item')
382 raise RuntimeError('untracked item')
387 elif self.removed:
383 elif self.removed:
388 return 0
384 return 0
389 elif self._mtime is None:
385 elif self._mtime is None:
390 return AMBIGUOUS_TIME
386 return AMBIGUOUS_TIME
391 elif self._p2_info:
387 elif self._p2_info:
392 return AMBIGUOUS_TIME
388 return AMBIGUOUS_TIME
393 elif not self._p1_tracked:
389 elif not self._p1_tracked:
394 return AMBIGUOUS_TIME
390 return AMBIGUOUS_TIME
395 else:
391 else:
396 return self._mtime
392 return self._mtime
397
393
398 def need_delay(self, now):
394 def need_delay(self, now):
399 """True if the stored mtime would be ambiguous with the current time"""
395 """True if the stored mtime would be ambiguous with the current time"""
400 return self.v1_state() == b'n' and self.v1_mtime() == now
396 return self.v1_state() == b'n' and self.v1_mtime() == now
401
397
402
398
403 def gettype(q):
399 def gettype(q):
404 return int(q & 0xFFFF)
400 return int(q & 0xFFFF)
405
401
406
402
407 class BaseIndexObject(object):
403 class BaseIndexObject(object):
408 # Can I be passed to an algorithme implemented in Rust ?
404 # Can I be passed to an algorithme implemented in Rust ?
409 rust_ext_compat = 0
405 rust_ext_compat = 0
410 # Format of an index entry according to Python's `struct` language
406 # Format of an index entry according to Python's `struct` language
411 index_format = revlog_constants.INDEX_ENTRY_V1
407 index_format = revlog_constants.INDEX_ENTRY_V1
412 # Size of a C unsigned long long int, platform independent
408 # Size of a C unsigned long long int, platform independent
413 big_int_size = struct.calcsize(b'>Q')
409 big_int_size = struct.calcsize(b'>Q')
414 # Size of a C long int, platform independent
410 # Size of a C long int, platform independent
415 int_size = struct.calcsize(b'>i')
411 int_size = struct.calcsize(b'>i')
416 # An empty index entry, used as a default value to be overridden, or nullrev
412 # An empty index entry, used as a default value to be overridden, or nullrev
417 null_item = (
413 null_item = (
418 0,
414 0,
419 0,
415 0,
420 0,
416 0,
421 -1,
417 -1,
422 -1,
418 -1,
423 -1,
419 -1,
424 -1,
420 -1,
425 sha1nodeconstants.nullid,
421 sha1nodeconstants.nullid,
426 0,
422 0,
427 0,
423 0,
428 revlog_constants.COMP_MODE_INLINE,
424 revlog_constants.COMP_MODE_INLINE,
429 revlog_constants.COMP_MODE_INLINE,
425 revlog_constants.COMP_MODE_INLINE,
430 )
426 )
431
427
432 @util.propertycache
428 @util.propertycache
433 def entry_size(self):
429 def entry_size(self):
434 return self.index_format.size
430 return self.index_format.size
435
431
436 @property
432 @property
437 def nodemap(self):
433 def nodemap(self):
438 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
434 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
439 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
435 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
440 return self._nodemap
436 return self._nodemap
441
437
442 @util.propertycache
438 @util.propertycache
443 def _nodemap(self):
439 def _nodemap(self):
444 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
440 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
445 for r in range(0, len(self)):
441 for r in range(0, len(self)):
446 n = self[r][7]
442 n = self[r][7]
447 nodemap[n] = r
443 nodemap[n] = r
448 return nodemap
444 return nodemap
449
445
450 def has_node(self, node):
446 def has_node(self, node):
451 """return True if the node exist in the index"""
447 """return True if the node exist in the index"""
452 return node in self._nodemap
448 return node in self._nodemap
453
449
454 def rev(self, node):
450 def rev(self, node):
455 """return a revision for a node
451 """return a revision for a node
456
452
457 If the node is unknown, raise a RevlogError"""
453 If the node is unknown, raise a RevlogError"""
458 return self._nodemap[node]
454 return self._nodemap[node]
459
455
460 def get_rev(self, node):
456 def get_rev(self, node):
461 """return a revision for a node
457 """return a revision for a node
462
458
463 If the node is unknown, return None"""
459 If the node is unknown, return None"""
464 return self._nodemap.get(node)
460 return self._nodemap.get(node)
465
461
466 def _stripnodes(self, start):
462 def _stripnodes(self, start):
467 if '_nodemap' in vars(self):
463 if '_nodemap' in vars(self):
468 for r in range(start, len(self)):
464 for r in range(start, len(self)):
469 n = self[r][7]
465 n = self[r][7]
470 del self._nodemap[n]
466 del self._nodemap[n]
471
467
472 def clearcaches(self):
468 def clearcaches(self):
473 self.__dict__.pop('_nodemap', None)
469 self.__dict__.pop('_nodemap', None)
474
470
475 def __len__(self):
471 def __len__(self):
476 return self._lgt + len(self._extra)
472 return self._lgt + len(self._extra)
477
473
478 def append(self, tup):
474 def append(self, tup):
479 if '_nodemap' in vars(self):
475 if '_nodemap' in vars(self):
480 self._nodemap[tup[7]] = len(self)
476 self._nodemap[tup[7]] = len(self)
481 data = self._pack_entry(len(self), tup)
477 data = self._pack_entry(len(self), tup)
482 self._extra.append(data)
478 self._extra.append(data)
483
479
484 def _pack_entry(self, rev, entry):
480 def _pack_entry(self, rev, entry):
485 assert entry[8] == 0
481 assert entry[8] == 0
486 assert entry[9] == 0
482 assert entry[9] == 0
487 return self.index_format.pack(*entry[:8])
483 return self.index_format.pack(*entry[:8])
488
484
489 def _check_index(self, i):
485 def _check_index(self, i):
490 if not isinstance(i, int):
486 if not isinstance(i, int):
491 raise TypeError(b"expecting int indexes")
487 raise TypeError(b"expecting int indexes")
492 if i < 0 or i >= len(self):
488 if i < 0 or i >= len(self):
493 raise IndexError
489 raise IndexError
494
490
495 def __getitem__(self, i):
491 def __getitem__(self, i):
496 if i == -1:
492 if i == -1:
497 return self.null_item
493 return self.null_item
498 self._check_index(i)
494 self._check_index(i)
499 if i >= self._lgt:
495 if i >= self._lgt:
500 data = self._extra[i - self._lgt]
496 data = self._extra[i - self._lgt]
501 else:
497 else:
502 index = self._calculate_index(i)
498 index = self._calculate_index(i)
503 data = self._data[index : index + self.entry_size]
499 data = self._data[index : index + self.entry_size]
504 r = self._unpack_entry(i, data)
500 r = self._unpack_entry(i, data)
505 if self._lgt and i == 0:
501 if self._lgt and i == 0:
506 offset = revlogutils.offset_type(0, gettype(r[0]))
502 offset = revlogutils.offset_type(0, gettype(r[0]))
507 r = (offset,) + r[1:]
503 r = (offset,) + r[1:]
508 return r
504 return r
509
505
510 def _unpack_entry(self, rev, data):
506 def _unpack_entry(self, rev, data):
511 r = self.index_format.unpack(data)
507 r = self.index_format.unpack(data)
512 r = r + (
508 r = r + (
513 0,
509 0,
514 0,
510 0,
515 revlog_constants.COMP_MODE_INLINE,
511 revlog_constants.COMP_MODE_INLINE,
516 revlog_constants.COMP_MODE_INLINE,
512 revlog_constants.COMP_MODE_INLINE,
517 )
513 )
518 return r
514 return r
519
515
520 def pack_header(self, header):
516 def pack_header(self, header):
521 """pack header information as binary"""
517 """pack header information as binary"""
522 v_fmt = revlog_constants.INDEX_HEADER
518 v_fmt = revlog_constants.INDEX_HEADER
523 return v_fmt.pack(header)
519 return v_fmt.pack(header)
524
520
525 def entry_binary(self, rev):
521 def entry_binary(self, rev):
526 """return the raw binary string representing a revision"""
522 """return the raw binary string representing a revision"""
527 entry = self[rev]
523 entry = self[rev]
528 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
524 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
529 if rev == 0:
525 if rev == 0:
530 p = p[revlog_constants.INDEX_HEADER.size :]
526 p = p[revlog_constants.INDEX_HEADER.size :]
531 return p
527 return p
532
528
533
529
534 class IndexObject(BaseIndexObject):
530 class IndexObject(BaseIndexObject):
535 def __init__(self, data):
531 def __init__(self, data):
536 assert len(data) % self.entry_size == 0, (
532 assert len(data) % self.entry_size == 0, (
537 len(data),
533 len(data),
538 self.entry_size,
534 self.entry_size,
539 len(data) % self.entry_size,
535 len(data) % self.entry_size,
540 )
536 )
541 self._data = data
537 self._data = data
542 self._lgt = len(data) // self.entry_size
538 self._lgt = len(data) // self.entry_size
543 self._extra = []
539 self._extra = []
544
540
545 def _calculate_index(self, i):
541 def _calculate_index(self, i):
546 return i * self.entry_size
542 return i * self.entry_size
547
543
548 def __delitem__(self, i):
544 def __delitem__(self, i):
549 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
545 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
550 raise ValueError(b"deleting slices only supports a:-1 with step 1")
546 raise ValueError(b"deleting slices only supports a:-1 with step 1")
551 i = i.start
547 i = i.start
552 self._check_index(i)
548 self._check_index(i)
553 self._stripnodes(i)
549 self._stripnodes(i)
554 if i < self._lgt:
550 if i < self._lgt:
555 self._data = self._data[: i * self.entry_size]
551 self._data = self._data[: i * self.entry_size]
556 self._lgt = i
552 self._lgt = i
557 self._extra = []
553 self._extra = []
558 else:
554 else:
559 self._extra = self._extra[: i - self._lgt]
555 self._extra = self._extra[: i - self._lgt]
560
556
561
557
562 class PersistentNodeMapIndexObject(IndexObject):
558 class PersistentNodeMapIndexObject(IndexObject):
563 """a Debug oriented class to test persistent nodemap
559 """a Debug oriented class to test persistent nodemap
564
560
565 We need a simple python object to test API and higher level behavior. See
561 We need a simple python object to test API and higher level behavior. See
566 the Rust implementation for more serious usage. This should be used only
562 the Rust implementation for more serious usage. This should be used only
567 through the dedicated `devel.persistent-nodemap` config.
563 through the dedicated `devel.persistent-nodemap` config.
568 """
564 """
569
565
570 def nodemap_data_all(self):
566 def nodemap_data_all(self):
571 """Return bytes containing a full serialization of a nodemap
567 """Return bytes containing a full serialization of a nodemap
572
568
573 The nodemap should be valid for the full set of revisions in the
569 The nodemap should be valid for the full set of revisions in the
574 index."""
570 index."""
575 return nodemaputil.persistent_data(self)
571 return nodemaputil.persistent_data(self)
576
572
577 def nodemap_data_incremental(self):
573 def nodemap_data_incremental(self):
578 """Return bytes containing a incremental update to persistent nodemap
574 """Return bytes containing a incremental update to persistent nodemap
579
575
580 This containst the data for an append-only update of the data provided
576 This containst the data for an append-only update of the data provided
581 in the last call to `update_nodemap_data`.
577 in the last call to `update_nodemap_data`.
582 """
578 """
583 if self._nm_root is None:
579 if self._nm_root is None:
584 return None
580 return None
585 docket = self._nm_docket
581 docket = self._nm_docket
586 changed, data = nodemaputil.update_persistent_data(
582 changed, data = nodemaputil.update_persistent_data(
587 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
583 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
588 )
584 )
589
585
590 self._nm_root = self._nm_max_idx = self._nm_docket = None
586 self._nm_root = self._nm_max_idx = self._nm_docket = None
591 return docket, changed, data
587 return docket, changed, data
592
588
593 def update_nodemap_data(self, docket, nm_data):
589 def update_nodemap_data(self, docket, nm_data):
594 """provide full block of persisted binary data for a nodemap
590 """provide full block of persisted binary data for a nodemap
595
591
596 The data are expected to come from disk. See `nodemap_data_all` for a
592 The data are expected to come from disk. See `nodemap_data_all` for a
597 produceur of such data."""
593 produceur of such data."""
598 if nm_data is not None:
594 if nm_data is not None:
599 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
595 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
600 if self._nm_root:
596 if self._nm_root:
601 self._nm_docket = docket
597 self._nm_docket = docket
602 else:
598 else:
603 self._nm_root = self._nm_max_idx = self._nm_docket = None
599 self._nm_root = self._nm_max_idx = self._nm_docket = None
604
600
605
601
606 class InlinedIndexObject(BaseIndexObject):
602 class InlinedIndexObject(BaseIndexObject):
607 def __init__(self, data, inline=0):
603 def __init__(self, data, inline=0):
608 self._data = data
604 self._data = data
609 self._lgt = self._inline_scan(None)
605 self._lgt = self._inline_scan(None)
610 self._inline_scan(self._lgt)
606 self._inline_scan(self._lgt)
611 self._extra = []
607 self._extra = []
612
608
613 def _inline_scan(self, lgt):
609 def _inline_scan(self, lgt):
614 off = 0
610 off = 0
615 if lgt is not None:
611 if lgt is not None:
616 self._offsets = [0] * lgt
612 self._offsets = [0] * lgt
617 count = 0
613 count = 0
618 while off <= len(self._data) - self.entry_size:
614 while off <= len(self._data) - self.entry_size:
619 start = off + self.big_int_size
615 start = off + self.big_int_size
620 (s,) = struct.unpack(
616 (s,) = struct.unpack(
621 b'>i',
617 b'>i',
622 self._data[start : start + self.int_size],
618 self._data[start : start + self.int_size],
623 )
619 )
624 if lgt is not None:
620 if lgt is not None:
625 self._offsets[count] = off
621 self._offsets[count] = off
626 count += 1
622 count += 1
627 off += self.entry_size + s
623 off += self.entry_size + s
628 if off != len(self._data):
624 if off != len(self._data):
629 raise ValueError(b"corrupted data")
625 raise ValueError(b"corrupted data")
630 return count
626 return count
631
627
632 def __delitem__(self, i):
628 def __delitem__(self, i):
633 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
629 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
634 raise ValueError(b"deleting slices only supports a:-1 with step 1")
630 raise ValueError(b"deleting slices only supports a:-1 with step 1")
635 i = i.start
631 i = i.start
636 self._check_index(i)
632 self._check_index(i)
637 self._stripnodes(i)
633 self._stripnodes(i)
638 if i < self._lgt:
634 if i < self._lgt:
639 self._offsets = self._offsets[:i]
635 self._offsets = self._offsets[:i]
640 self._lgt = i
636 self._lgt = i
641 self._extra = []
637 self._extra = []
642 else:
638 else:
643 self._extra = self._extra[: i - self._lgt]
639 self._extra = self._extra[: i - self._lgt]
644
640
645 def _calculate_index(self, i):
641 def _calculate_index(self, i):
646 return self._offsets[i]
642 return self._offsets[i]
647
643
648
644
649 def parse_index2(data, inline, revlogv2=False):
645 def parse_index2(data, inline, revlogv2=False):
650 if not inline:
646 if not inline:
651 cls = IndexObject2 if revlogv2 else IndexObject
647 cls = IndexObject2 if revlogv2 else IndexObject
652 return cls(data), None
648 return cls(data), None
653 cls = InlinedIndexObject
649 cls = InlinedIndexObject
654 return cls(data, inline), (0, data)
650 return cls(data, inline), (0, data)
655
651
656
652
657 def parse_index_cl_v2(data):
653 def parse_index_cl_v2(data):
658 return IndexChangelogV2(data), None
654 return IndexChangelogV2(data), None
659
655
660
656
661 class IndexObject2(IndexObject):
657 class IndexObject2(IndexObject):
662 index_format = revlog_constants.INDEX_ENTRY_V2
658 index_format = revlog_constants.INDEX_ENTRY_V2
663
659
664 def replace_sidedata_info(
660 def replace_sidedata_info(
665 self,
661 self,
666 rev,
662 rev,
667 sidedata_offset,
663 sidedata_offset,
668 sidedata_length,
664 sidedata_length,
669 offset_flags,
665 offset_flags,
670 compression_mode,
666 compression_mode,
671 ):
667 ):
672 """
668 """
673 Replace an existing index entry's sidedata offset and length with new
669 Replace an existing index entry's sidedata offset and length with new
674 ones.
670 ones.
675 This cannot be used outside of the context of sidedata rewriting,
671 This cannot be used outside of the context of sidedata rewriting,
676 inside the transaction that creates the revision `rev`.
672 inside the transaction that creates the revision `rev`.
677 """
673 """
678 if rev < 0:
674 if rev < 0:
679 raise KeyError
675 raise KeyError
680 self._check_index(rev)
676 self._check_index(rev)
681 if rev < self._lgt:
677 if rev < self._lgt:
682 msg = b"cannot rewrite entries outside of this transaction"
678 msg = b"cannot rewrite entries outside of this transaction"
683 raise KeyError(msg)
679 raise KeyError(msg)
684 else:
680 else:
685 entry = list(self[rev])
681 entry = list(self[rev])
686 entry[0] = offset_flags
682 entry[0] = offset_flags
687 entry[8] = sidedata_offset
683 entry[8] = sidedata_offset
688 entry[9] = sidedata_length
684 entry[9] = sidedata_length
689 entry[11] = compression_mode
685 entry[11] = compression_mode
690 entry = tuple(entry)
686 entry = tuple(entry)
691 new = self._pack_entry(rev, entry)
687 new = self._pack_entry(rev, entry)
692 self._extra[rev - self._lgt] = new
688 self._extra[rev - self._lgt] = new
693
689
694 def _unpack_entry(self, rev, data):
690 def _unpack_entry(self, rev, data):
695 data = self.index_format.unpack(data)
691 data = self.index_format.unpack(data)
696 entry = data[:10]
692 entry = data[:10]
697 data_comp = data[10] & 3
693 data_comp = data[10] & 3
698 sidedata_comp = (data[10] & (3 << 2)) >> 2
694 sidedata_comp = (data[10] & (3 << 2)) >> 2
699 return entry + (data_comp, sidedata_comp)
695 return entry + (data_comp, sidedata_comp)
700
696
701 def _pack_entry(self, rev, entry):
697 def _pack_entry(self, rev, entry):
702 data = entry[:10]
698 data = entry[:10]
703 data_comp = entry[10] & 3
699 data_comp = entry[10] & 3
704 sidedata_comp = (entry[11] & 3) << 2
700 sidedata_comp = (entry[11] & 3) << 2
705 data += (data_comp | sidedata_comp,)
701 data += (data_comp | sidedata_comp,)
706
702
707 return self.index_format.pack(*data)
703 return self.index_format.pack(*data)
708
704
709 def entry_binary(self, rev):
705 def entry_binary(self, rev):
710 """return the raw binary string representing a revision"""
706 """return the raw binary string representing a revision"""
711 entry = self[rev]
707 entry = self[rev]
712 return self._pack_entry(rev, entry)
708 return self._pack_entry(rev, entry)
713
709
714 def pack_header(self, header):
710 def pack_header(self, header):
715 """pack header information as binary"""
711 """pack header information as binary"""
716 msg = 'version header should go in the docket, not the index: %d'
712 msg = 'version header should go in the docket, not the index: %d'
717 msg %= header
713 msg %= header
718 raise error.ProgrammingError(msg)
714 raise error.ProgrammingError(msg)
719
715
720
716
721 class IndexChangelogV2(IndexObject2):
717 class IndexChangelogV2(IndexObject2):
722 index_format = revlog_constants.INDEX_ENTRY_CL_V2
718 index_format = revlog_constants.INDEX_ENTRY_CL_V2
723
719
724 def _unpack_entry(self, rev, data, r=True):
720 def _unpack_entry(self, rev, data, r=True):
725 items = self.index_format.unpack(data)
721 items = self.index_format.unpack(data)
726 entry = items[:3] + (rev, rev) + items[3:8]
722 entry = items[:3] + (rev, rev) + items[3:8]
727 data_comp = items[8] & 3
723 data_comp = items[8] & 3
728 sidedata_comp = (items[8] >> 2) & 3
724 sidedata_comp = (items[8] >> 2) & 3
729 return entry + (data_comp, sidedata_comp)
725 return entry + (data_comp, sidedata_comp)
730
726
731 def _pack_entry(self, rev, entry):
727 def _pack_entry(self, rev, entry):
732 assert entry[3] == rev, entry[3]
728 assert entry[3] == rev, entry[3]
733 assert entry[4] == rev, entry[4]
729 assert entry[4] == rev, entry[4]
734 data = entry[:3] + entry[5:10]
730 data = entry[:3] + entry[5:10]
735 data_comp = entry[10] & 3
731 data_comp = entry[10] & 3
736 sidedata_comp = (entry[11] & 3) << 2
732 sidedata_comp = (entry[11] & 3) << 2
737 data += (data_comp | sidedata_comp,)
733 data += (data_comp | sidedata_comp,)
738 return self.index_format.pack(*data)
734 return self.index_format.pack(*data)
739
735
740
736
741 def parse_index_devel_nodemap(data, inline):
737 def parse_index_devel_nodemap(data, inline):
742 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
738 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
743 return PersistentNodeMapIndexObject(data), None
739 return PersistentNodeMapIndexObject(data), None
744
740
745
741
746 def parse_dirstate(dmap, copymap, st):
742 def parse_dirstate(dmap, copymap, st):
747 parents = [st[:20], st[20:40]]
743 parents = [st[:20], st[20:40]]
748 # dereference fields so they will be local in loop
744 # dereference fields so they will be local in loop
749 format = b">cllll"
745 format = b">cllll"
750 e_size = struct.calcsize(format)
746 e_size = struct.calcsize(format)
751 pos1 = 40
747 pos1 = 40
752 l = len(st)
748 l = len(st)
753
749
754 # the inner loop
750 # the inner loop
755 while pos1 < l:
751 while pos1 < l:
756 pos2 = pos1 + e_size
752 pos2 = pos1 + e_size
757 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
753 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
758 pos1 = pos2 + e[4]
754 pos1 = pos2 + e[4]
759 f = st[pos2:pos1]
755 f = st[pos2:pos1]
760 if b'\0' in f:
756 if b'\0' in f:
761 f, c = f.split(b'\0')
757 f, c = f.split(b'\0')
762 copymap[f] = c
758 copymap[f] = c
763 dmap[f] = DirstateItem.from_v1_data(*e[:4])
759 dmap[f] = DirstateItem.from_v1_data(*e[:4])
764 return parents
760 return parents
765
761
766
762
767 def pack_dirstate(dmap, copymap, pl, now):
763 def pack_dirstate(dmap, copymap, pl, now):
768 now = int(now)
764 now = int(now)
769 cs = stringio()
765 cs = stringio()
770 write = cs.write
766 write = cs.write
771 write(b"".join(pl))
767 write(b"".join(pl))
772 for f, e in pycompat.iteritems(dmap):
768 for f, e in pycompat.iteritems(dmap):
773 if e.need_delay(now):
769 if e.need_delay(now):
774 # The file was last modified "simultaneously" with the current
770 # The file was last modified "simultaneously" with the current
775 # write to dirstate (i.e. within the same second for file-
771 # write to dirstate (i.e. within the same second for file-
776 # systems with a granularity of 1 sec). This commonly happens
772 # systems with a granularity of 1 sec). This commonly happens
777 # for at least a couple of files on 'update'.
773 # for at least a couple of files on 'update'.
778 # The user could change the file without changing its size
774 # The user could change the file without changing its size
779 # within the same second. Invalidate the file's mtime in
775 # within the same second. Invalidate the file's mtime in
780 # dirstate, forcing future 'status' calls to compare the
776 # dirstate, forcing future 'status' calls to compare the
781 # contents of the file if the size is the same. This prevents
777 # contents of the file if the size is the same. This prevents
782 # mistakenly treating such files as clean.
778 # mistakenly treating such files as clean.
783 e.set_possibly_dirty()
779 e.set_possibly_dirty()
784
780
785 if f in copymap:
781 if f in copymap:
786 f = b"%s\0%s" % (f, copymap[f])
782 f = b"%s\0%s" % (f, copymap[f])
787 e = _pack(
783 e = _pack(
788 b">cllll",
784 b">cllll",
789 e.v1_state(),
785 e.v1_state(),
790 e.v1_mode(),
786 e.v1_mode(),
791 e.v1_size(),
787 e.v1_size(),
792 e.v1_mtime(),
788 e.v1_mtime(),
793 len(f),
789 len(f),
794 )
790 )
795 write(e)
791 write(e)
796 write(f)
792 write(f)
797 return cs.getvalue()
793 return cs.getvalue()
@@ -1,437 +1,433
1 use crate::errors::HgError;
1 use crate::errors::HgError;
2 use bitflags::bitflags;
2 use bitflags::bitflags;
3 use std::convert::TryFrom;
3 use std::convert::TryFrom;
4
4
5 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
5 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
6 pub enum EntryState {
6 pub enum EntryState {
7 Normal,
7 Normal,
8 Added,
8 Added,
9 Removed,
9 Removed,
10 Merged,
10 Merged,
11 }
11 }
12
12
13 /// The C implementation uses all signed types. This will be an issue
13 /// The C implementation uses all signed types. This will be an issue
14 /// either when 4GB+ source files are commonplace or in 2038, whichever
14 /// either when 4GB+ source files are commonplace or in 2038, whichever
15 /// comes first.
15 /// comes first.
16 #[derive(Debug, PartialEq, Copy, Clone)]
16 #[derive(Debug, PartialEq, Copy, Clone)]
17 pub struct DirstateEntry {
17 pub struct DirstateEntry {
18 pub(crate) flags: Flags,
18 pub(crate) flags: Flags,
19 mode_size: Option<(i32, i32)>,
19 mode_size: Option<(i32, i32)>,
20 mtime: Option<i32>,
20 mtime: Option<i32>,
21 }
21 }
22
22
23 bitflags! {
23 bitflags! {
24 pub(crate) struct Flags: u8 {
24 pub(crate) struct Flags: u8 {
25 const WDIR_TRACKED = 1 << 0;
25 const WDIR_TRACKED = 1 << 0;
26 const P1_TRACKED = 1 << 1;
26 const P1_TRACKED = 1 << 1;
27 const P2_INFO = 1 << 2;
27 const P2_INFO = 1 << 2;
28 }
28 }
29 }
29 }
30
30
31 pub const V1_RANGEMASK: i32 = 0x7FFFFFFF;
31 pub const V1_RANGEMASK: i32 = 0x7FFFFFFF;
32
32
33 pub const MTIME_UNSET: i32 = -1;
33 pub const MTIME_UNSET: i32 = -1;
34
34
35 /// A `DirstateEntry` with a size of `-2` means that it was merged from the
35 /// A `DirstateEntry` with a size of `-2` means that it was merged from the
36 /// other parent. This allows revert to pick the right status back during a
36 /// other parent. This allows revert to pick the right status back during a
37 /// merge.
37 /// merge.
38 pub const SIZE_FROM_OTHER_PARENT: i32 = -2;
38 pub const SIZE_FROM_OTHER_PARENT: i32 = -2;
39 /// A special value used for internal representation of special case in
39 /// A special value used for internal representation of special case in
40 /// dirstate v1 format.
40 /// dirstate v1 format.
41 pub const SIZE_NON_NORMAL: i32 = -1;
41 pub const SIZE_NON_NORMAL: i32 = -1;
42
42
43 impl DirstateEntry {
43 impl DirstateEntry {
44 pub fn from_v2_data(
44 pub fn from_v2_data(
45 wdir_tracked: bool,
45 wdir_tracked: bool,
46 p1_tracked: bool,
46 p1_tracked: bool,
47 p2_info: bool,
47 p2_info: bool,
48 mode_size: Option<(i32, i32)>,
48 mode_size: Option<(i32, i32)>,
49 mtime: Option<i32>,
49 mtime: Option<i32>,
50 ) -> Self {
50 ) -> Self {
51 let mut flags = Flags::empty();
51 let mut flags = Flags::empty();
52 flags.set(Flags::WDIR_TRACKED, wdir_tracked);
52 flags.set(Flags::WDIR_TRACKED, wdir_tracked);
53 flags.set(Flags::P1_TRACKED, p1_tracked);
53 flags.set(Flags::P1_TRACKED, p1_tracked);
54 flags.set(Flags::P2_INFO, p2_info);
54 flags.set(Flags::P2_INFO, p2_info);
55 Self {
55 Self {
56 flags,
56 flags,
57 mode_size,
57 mode_size,
58 mtime,
58 mtime,
59 }
59 }
60 }
60 }
61
61
62 pub fn from_v1_data(
62 pub fn from_v1_data(
63 state: EntryState,
63 state: EntryState,
64 mode: i32,
64 mode: i32,
65 size: i32,
65 size: i32,
66 mtime: i32,
66 mtime: i32,
67 ) -> Self {
67 ) -> Self {
68 match state {
68 match state {
69 EntryState::Normal => {
69 EntryState::Normal => {
70 if size == SIZE_FROM_OTHER_PARENT {
70 if size == SIZE_FROM_OTHER_PARENT {
71 Self::new_from_p2()
71 Self::new_from_p2()
72 } else if size == SIZE_NON_NORMAL {
72 } else if size == SIZE_NON_NORMAL {
73 Self::new_possibly_dirty()
73 Self::new_possibly_dirty()
74 } else if mtime == MTIME_UNSET {
74 } else if mtime == MTIME_UNSET {
75 Self {
75 Self {
76 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
76 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
77 mode_size: Some((mode, size)),
77 mode_size: Some((mode, size)),
78 mtime: None,
78 mtime: None,
79 }
79 }
80 } else {
80 } else {
81 Self::new_normal(mode, size, mtime)
81 Self::new_normal(mode, size, mtime)
82 }
82 }
83 }
83 }
84 EntryState::Added => Self::new_added(),
84 EntryState::Added => Self::new_added(),
85 EntryState::Removed => Self {
85 EntryState::Removed => Self {
86 flags: if size == SIZE_NON_NORMAL {
86 flags: if size == SIZE_NON_NORMAL {
87 Flags::P1_TRACKED | Flags::P2_INFO
87 Flags::P1_TRACKED | Flags::P2_INFO
88 } else if size == SIZE_FROM_OTHER_PARENT {
88 } else if size == SIZE_FROM_OTHER_PARENT {
89 // We don’t know if P1_TRACKED should be set (file history)
89 // We don’t know if P1_TRACKED should be set (file history)
90 Flags::P2_INFO
90 Flags::P2_INFO
91 } else {
91 } else {
92 Flags::P1_TRACKED
92 Flags::P1_TRACKED
93 },
93 },
94 mode_size: None,
94 mode_size: None,
95 mtime: None,
95 mtime: None,
96 },
96 },
97 EntryState::Merged => Self::new_merged(),
97 EntryState::Merged => Self::new_merged(),
98 }
98 }
99 }
99 }
100
100
101 pub fn new_from_p2() -> Self {
101 pub fn new_from_p2() -> Self {
102 Self {
102 Self {
103 // might be missing P1_TRACKED
103 // might be missing P1_TRACKED
104 flags: Flags::WDIR_TRACKED | Flags::P2_INFO,
104 flags: Flags::WDIR_TRACKED | Flags::P2_INFO,
105 mode_size: None,
105 mode_size: None,
106 mtime: None,
106 mtime: None,
107 }
107 }
108 }
108 }
109
109
110 pub fn new_possibly_dirty() -> Self {
110 pub fn new_possibly_dirty() -> Self {
111 Self {
111 Self {
112 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
112 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
113 mode_size: None,
113 mode_size: None,
114 mtime: None,
114 mtime: None,
115 }
115 }
116 }
116 }
117
117
118 pub fn new_added() -> Self {
118 pub fn new_added() -> Self {
119 Self {
119 Self {
120 flags: Flags::WDIR_TRACKED,
120 flags: Flags::WDIR_TRACKED,
121 mode_size: None,
121 mode_size: None,
122 mtime: None,
122 mtime: None,
123 }
123 }
124 }
124 }
125
125
126 pub fn new_merged() -> Self {
126 pub fn new_merged() -> Self {
127 Self {
127 Self {
128 flags: Flags::WDIR_TRACKED
128 flags: Flags::WDIR_TRACKED
129 | Flags::P1_TRACKED // might not be true because of rename ?
129 | Flags::P1_TRACKED // might not be true because of rename ?
130 | Flags::P2_INFO, // might not be true because of rename ?
130 | Flags::P2_INFO, // might not be true because of rename ?
131 mode_size: None,
131 mode_size: None,
132 mtime: None,
132 mtime: None,
133 }
133 }
134 }
134 }
135
135
136 pub fn new_normal(mode: i32, size: i32, mtime: i32) -> Self {
136 pub fn new_normal(mode: i32, size: i32, mtime: i32) -> Self {
137 Self {
137 Self {
138 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
138 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
139 mode_size: Some((mode, size)),
139 mode_size: Some((mode, size)),
140 mtime: Some(mtime),
140 mtime: Some(mtime),
141 }
141 }
142 }
142 }
143
143
144 /// Creates a new entry in "removed" state.
144 /// Creates a new entry in "removed" state.
145 ///
145 ///
146 /// `size` is expected to be zero, `SIZE_NON_NORMAL`, or
146 /// `size` is expected to be zero, `SIZE_NON_NORMAL`, or
147 /// `SIZE_FROM_OTHER_PARENT`
147 /// `SIZE_FROM_OTHER_PARENT`
148 pub fn new_removed(size: i32) -> Self {
148 pub fn new_removed(size: i32) -> Self {
149 Self::from_v1_data(EntryState::Removed, 0, size, 0)
149 Self::from_v1_data(EntryState::Removed, 0, size, 0)
150 }
150 }
151
151
152 pub fn tracked(&self) -> bool {
152 pub fn tracked(&self) -> bool {
153 self.flags.contains(Flags::WDIR_TRACKED)
153 self.flags.contains(Flags::WDIR_TRACKED)
154 }
154 }
155
155
156 pub fn p1_tracked(&self) -> bool {
156 pub fn p1_tracked(&self) -> bool {
157 self.flags.contains(Flags::P1_TRACKED)
157 self.flags.contains(Flags::P1_TRACKED)
158 }
158 }
159
159
160 fn in_either_parent(&self) -> bool {
160 fn in_either_parent(&self) -> bool {
161 self.flags.intersects(Flags::P1_TRACKED | Flags::P2_INFO)
161 self.flags.intersects(Flags::P1_TRACKED | Flags::P2_INFO)
162 }
162 }
163
163
164 pub fn removed(&self) -> bool {
164 pub fn removed(&self) -> bool {
165 self.in_either_parent() && !self.flags.contains(Flags::WDIR_TRACKED)
165 self.in_either_parent() && !self.flags.contains(Flags::WDIR_TRACKED)
166 }
166 }
167
167
168 pub fn p2_info(&self) -> bool {
168 pub fn p2_info(&self) -> bool {
169 self.flags.contains(Flags::WDIR_TRACKED | Flags::P2_INFO)
169 self.flags.contains(Flags::WDIR_TRACKED | Flags::P2_INFO)
170 }
170 }
171
171
172 pub fn merged(&self) -> bool {
172 pub fn merged(&self) -> bool {
173 self.flags
173 self.flags
174 .contains(Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO)
174 .contains(Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO)
175 }
175 }
176
176
177 pub fn added(&self) -> bool {
177 pub fn added(&self) -> bool {
178 self.flags.contains(Flags::WDIR_TRACKED) && !self.in_either_parent()
178 self.flags.contains(Flags::WDIR_TRACKED) && !self.in_either_parent()
179 }
179 }
180
180
181 pub fn from_p2(&self) -> bool {
181 pub fn from_p2(&self) -> bool {
182 self.flags.contains(Flags::WDIR_TRACKED | Flags::P2_INFO)
182 self.flags.contains(Flags::WDIR_TRACKED | Flags::P2_INFO)
183 && !self.flags.contains(Flags::P1_TRACKED)
183 && !self.flags.contains(Flags::P1_TRACKED)
184 }
184 }
185
185
186 pub fn maybe_clean(&self) -> bool {
186 pub fn maybe_clean(&self) -> bool {
187 if !self.flags.contains(Flags::WDIR_TRACKED) {
187 if !self.flags.contains(Flags::WDIR_TRACKED) {
188 false
188 false
189 } else if !self.flags.contains(Flags::P1_TRACKED) {
189 } else if !self.flags.contains(Flags::P1_TRACKED) {
190 false
190 false
191 } else if self.flags.contains(Flags::P2_INFO) {
191 } else if self.flags.contains(Flags::P2_INFO) {
192 false
192 false
193 } else {
193 } else {
194 true
194 true
195 }
195 }
196 }
196 }
197
197
198 pub fn any_tracked(&self) -> bool {
198 pub fn any_tracked(&self) -> bool {
199 self.flags.intersects(
199 self.flags.intersects(
200 Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO,
200 Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO,
201 )
201 )
202 }
202 }
203
203
204 /// Returns `(wdir_tracked, p1_tracked, p2_info, mode_size, mtime)`
204 /// Returns `(wdir_tracked, p1_tracked, p2_info, mode_size, mtime)`
205 pub(crate) fn v2_data(
205 pub(crate) fn v2_data(
206 &self,
206 &self,
207 ) -> (bool, bool, bool, Option<(i32, i32)>, Option<i32>) {
207 ) -> (bool, bool, bool, Option<(i32, i32)>, Option<i32>) {
208 if !self.any_tracked() {
208 if !self.any_tracked() {
209 // TODO: return an Option instead?
209 // TODO: return an Option instead?
210 panic!("Accessing v1_state of an untracked DirstateEntry")
210 panic!("Accessing v1_state of an untracked DirstateEntry")
211 }
211 }
212 let wdir_tracked = self.flags.contains(Flags::WDIR_TRACKED);
212 let wdir_tracked = self.flags.contains(Flags::WDIR_TRACKED);
213 let p1_tracked = self.flags.contains(Flags::P1_TRACKED);
213 let p1_tracked = self.flags.contains(Flags::P1_TRACKED);
214 let p2_info = self.flags.contains(Flags::P2_INFO);
214 let p2_info = self.flags.contains(Flags::P2_INFO);
215 let mode_size = self.mode_size;
215 let mode_size = self.mode_size;
216 let mtime = self.mtime;
216 let mtime = self.mtime;
217 (wdir_tracked, p1_tracked, p2_info, mode_size, mtime)
217 (wdir_tracked, p1_tracked, p2_info, mode_size, mtime)
218 }
218 }
219
219
220 fn v1_state(&self) -> EntryState {
220 fn v1_state(&self) -> EntryState {
221 if !self.any_tracked() {
221 if !self.any_tracked() {
222 // TODO: return an Option instead?
222 // TODO: return an Option instead?
223 panic!("Accessing v1_state of an untracked DirstateEntry")
223 panic!("Accessing v1_state of an untracked DirstateEntry")
224 }
224 }
225 if self.removed() {
225 if self.removed() {
226 EntryState::Removed
226 EntryState::Removed
227 } else if self.merged() {
227 } else if self.merged() {
228 EntryState::Merged
228 EntryState::Merged
229 } else if self.added() {
229 } else if self.added() {
230 EntryState::Added
230 EntryState::Added
231 } else {
231 } else {
232 EntryState::Normal
232 EntryState::Normal
233 }
233 }
234 }
234 }
235
235
236 fn v1_mode(&self) -> i32 {
236 fn v1_mode(&self) -> i32 {
237 if let Some((mode, _size)) = self.mode_size {
237 if let Some((mode, _size)) = self.mode_size {
238 mode
238 mode
239 } else {
239 } else {
240 0
240 0
241 }
241 }
242 }
242 }
243
243
244 fn v1_size(&self) -> i32 {
244 fn v1_size(&self) -> i32 {
245 if !self.any_tracked() {
245 if !self.any_tracked() {
246 // TODO: return an Option instead?
246 // TODO: return an Option instead?
247 panic!("Accessing v1_size of an untracked DirstateEntry")
247 panic!("Accessing v1_size of an untracked DirstateEntry")
248 }
248 }
249 if self.removed()
249 if self.removed()
250 && self.flags.contains(Flags::P1_TRACKED | Flags::P2_INFO)
250 && self.flags.contains(Flags::P1_TRACKED | Flags::P2_INFO)
251 {
251 {
252 SIZE_NON_NORMAL
252 SIZE_NON_NORMAL
253 } else if self.removed() && self.flags.contains(Flags::P2_INFO) {
253 } else if self.flags.contains(Flags::P2_INFO) {
254 SIZE_FROM_OTHER_PARENT
254 SIZE_FROM_OTHER_PARENT
255 } else if self.removed() {
255 } else if self.removed() {
256 0
256 0
257 } else if self.merged() {
258 SIZE_FROM_OTHER_PARENT
259 } else if self.added() {
257 } else if self.added() {
260 SIZE_NON_NORMAL
258 SIZE_NON_NORMAL
261 } else if self.from_p2() {
262 SIZE_FROM_OTHER_PARENT
263 } else if let Some((_mode, size)) = self.mode_size {
259 } else if let Some((_mode, size)) = self.mode_size {
264 size
260 size
265 } else {
261 } else {
266 SIZE_NON_NORMAL
262 SIZE_NON_NORMAL
267 }
263 }
268 }
264 }
269
265
270 fn v1_mtime(&self) -> i32 {
266 fn v1_mtime(&self) -> i32 {
271 if !self.any_tracked() {
267 if !self.any_tracked() {
272 // TODO: return an Option instead?
268 // TODO: return an Option instead?
273 panic!("Accessing v1_mtime of an untracked DirstateEntry")
269 panic!("Accessing v1_mtime of an untracked DirstateEntry")
274 }
270 }
275 if self.removed() {
271 if self.removed() {
276 0
272 0
277 } else if self.flags.contains(Flags::P2_INFO) {
273 } else if self.flags.contains(Flags::P2_INFO) {
278 MTIME_UNSET
274 MTIME_UNSET
279 } else if !self.flags.contains(Flags::P1_TRACKED) {
275 } else if !self.flags.contains(Flags::P1_TRACKED) {
280 MTIME_UNSET
276 MTIME_UNSET
281 } else {
277 } else {
282 self.mtime.unwrap_or(MTIME_UNSET)
278 self.mtime.unwrap_or(MTIME_UNSET)
283 }
279 }
284 }
280 }
285
281
286 // TODO: return `Option<EntryState>`? None when `!self.any_tracked`
282 // TODO: return `Option<EntryState>`? None when `!self.any_tracked`
287 pub fn state(&self) -> EntryState {
283 pub fn state(&self) -> EntryState {
288 self.v1_state()
284 self.v1_state()
289 }
285 }
290
286
291 // TODO: return Option?
287 // TODO: return Option?
292 pub fn mode(&self) -> i32 {
288 pub fn mode(&self) -> i32 {
293 self.v1_mode()
289 self.v1_mode()
294 }
290 }
295
291
296 // TODO: return Option?
292 // TODO: return Option?
297 pub fn size(&self) -> i32 {
293 pub fn size(&self) -> i32 {
298 self.v1_size()
294 self.v1_size()
299 }
295 }
300
296
301 // TODO: return Option?
297 // TODO: return Option?
302 pub fn mtime(&self) -> i32 {
298 pub fn mtime(&self) -> i32 {
303 self.v1_mtime()
299 self.v1_mtime()
304 }
300 }
305
301
306 pub fn drop_merge_data(&mut self) {
302 pub fn drop_merge_data(&mut self) {
307 if self.flags.contains(Flags::P2_INFO) {
303 if self.flags.contains(Flags::P2_INFO) {
308 self.flags.remove(Flags::P2_INFO);
304 self.flags.remove(Flags::P2_INFO);
309 self.mode_size = None;
305 self.mode_size = None;
310 self.mtime = None;
306 self.mtime = None;
311 }
307 }
312 }
308 }
313
309
314 pub fn set_possibly_dirty(&mut self) {
310 pub fn set_possibly_dirty(&mut self) {
315 self.mtime = None
311 self.mtime = None
316 }
312 }
317
313
318 pub fn set_clean(&mut self, mode: i32, size: i32, mtime: i32) {
314 pub fn set_clean(&mut self, mode: i32, size: i32, mtime: i32) {
319 self.flags.insert(Flags::WDIR_TRACKED | Flags::P1_TRACKED);
315 self.flags.insert(Flags::WDIR_TRACKED | Flags::P1_TRACKED);
320 self.mode_size = Some((mode, size));
316 self.mode_size = Some((mode, size));
321 self.mtime = Some(mtime);
317 self.mtime = Some(mtime);
322 }
318 }
323
319
324 pub fn set_tracked(&mut self) {
320 pub fn set_tracked(&mut self) {
325 self.flags.insert(Flags::WDIR_TRACKED);
321 self.flags.insert(Flags::WDIR_TRACKED);
326 // `set_tracked` is replacing various `normallookup` call. So we mark
322 // `set_tracked` is replacing various `normallookup` call. So we mark
327 // the files as needing lookup
323 // the files as needing lookup
328 //
324 //
329 // Consider dropping this in the future in favor of something less
325 // Consider dropping this in the future in favor of something less
330 // broad.
326 // broad.
331 self.mtime = None;
327 self.mtime = None;
332 }
328 }
333
329
334 pub fn set_untracked(&mut self) {
330 pub fn set_untracked(&mut self) {
335 self.flags.remove(Flags::WDIR_TRACKED);
331 self.flags.remove(Flags::WDIR_TRACKED);
336 self.mode_size = None;
332 self.mode_size = None;
337 self.mtime = None;
333 self.mtime = None;
338 }
334 }
339
335
340 /// Returns `(state, mode, size, mtime)` for the puprose of serialization
336 /// Returns `(state, mode, size, mtime)` for the puprose of serialization
341 /// in the dirstate-v1 format.
337 /// in the dirstate-v1 format.
342 ///
338 ///
343 /// This includes marker values such as `mtime == -1`. In the future we may
339 /// This includes marker values such as `mtime == -1`. In the future we may
344 /// want to not represent these cases that way in memory, but serialization
340 /// want to not represent these cases that way in memory, but serialization
345 /// will need to keep the same format.
341 /// will need to keep the same format.
346 pub fn v1_data(&self) -> (u8, i32, i32, i32) {
342 pub fn v1_data(&self) -> (u8, i32, i32, i32) {
347 (
343 (
348 self.v1_state().into(),
344 self.v1_state().into(),
349 self.v1_mode(),
345 self.v1_mode(),
350 self.v1_size(),
346 self.v1_size(),
351 self.v1_mtime(),
347 self.v1_mtime(),
352 )
348 )
353 }
349 }
354
350
355 pub(crate) fn is_from_other_parent(&self) -> bool {
351 pub(crate) fn is_from_other_parent(&self) -> bool {
356 self.state() == EntryState::Normal
352 self.state() == EntryState::Normal
357 && self.size() == SIZE_FROM_OTHER_PARENT
353 && self.size() == SIZE_FROM_OTHER_PARENT
358 }
354 }
359
355
360 // TODO: other platforms
356 // TODO: other platforms
361 #[cfg(unix)]
357 #[cfg(unix)]
362 pub fn mode_changed(
358 pub fn mode_changed(
363 &self,
359 &self,
364 filesystem_metadata: &std::fs::Metadata,
360 filesystem_metadata: &std::fs::Metadata,
365 ) -> bool {
361 ) -> bool {
366 use std::os::unix::fs::MetadataExt;
362 use std::os::unix::fs::MetadataExt;
367 const EXEC_BIT_MASK: u32 = 0o100;
363 const EXEC_BIT_MASK: u32 = 0o100;
368 let dirstate_exec_bit = (self.mode() as u32) & EXEC_BIT_MASK;
364 let dirstate_exec_bit = (self.mode() as u32) & EXEC_BIT_MASK;
369 let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK;
365 let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK;
370 dirstate_exec_bit != fs_exec_bit
366 dirstate_exec_bit != fs_exec_bit
371 }
367 }
372
368
373 /// Returns a `(state, mode, size, mtime)` tuple as for
369 /// Returns a `(state, mode, size, mtime)` tuple as for
374 /// `DirstateMapMethods::debug_iter`.
370 /// `DirstateMapMethods::debug_iter`.
375 pub fn debug_tuple(&self) -> (u8, i32, i32, i32) {
371 pub fn debug_tuple(&self) -> (u8, i32, i32, i32) {
376 (self.state().into(), self.mode(), self.size(), self.mtime())
372 (self.state().into(), self.mode(), self.size(), self.mtime())
377 }
373 }
378
374
379 pub fn mtime_is_ambiguous(&self, now: i32) -> bool {
375 pub fn mtime_is_ambiguous(&self, now: i32) -> bool {
380 self.state() == EntryState::Normal && self.mtime() == now
376 self.state() == EntryState::Normal && self.mtime() == now
381 }
377 }
382
378
383 pub fn clear_ambiguous_mtime(&mut self, now: i32) -> bool {
379 pub fn clear_ambiguous_mtime(&mut self, now: i32) -> bool {
384 let ambiguous = self.mtime_is_ambiguous(now);
380 let ambiguous = self.mtime_is_ambiguous(now);
385 if ambiguous {
381 if ambiguous {
386 // The file was last modified "simultaneously" with the current
382 // The file was last modified "simultaneously" with the current
387 // write to dirstate (i.e. within the same second for file-
383 // write to dirstate (i.e. within the same second for file-
388 // systems with a granularity of 1 sec). This commonly happens
384 // systems with a granularity of 1 sec). This commonly happens
389 // for at least a couple of files on 'update'.
385 // for at least a couple of files on 'update'.
390 // The user could change the file without changing its size
386 // The user could change the file without changing its size
391 // within the same second. Invalidate the file's mtime in
387 // within the same second. Invalidate the file's mtime in
392 // dirstate, forcing future 'status' calls to compare the
388 // dirstate, forcing future 'status' calls to compare the
393 // contents of the file if the size is the same. This prevents
389 // contents of the file if the size is the same. This prevents
394 // mistakenly treating such files as clean.
390 // mistakenly treating such files as clean.
395 self.set_possibly_dirty()
391 self.set_possibly_dirty()
396 }
392 }
397 ambiguous
393 ambiguous
398 }
394 }
399 }
395 }
400
396
401 impl EntryState {
397 impl EntryState {
402 pub fn is_tracked(self) -> bool {
398 pub fn is_tracked(self) -> bool {
403 use EntryState::*;
399 use EntryState::*;
404 match self {
400 match self {
405 Normal | Added | Merged => true,
401 Normal | Added | Merged => true,
406 Removed => false,
402 Removed => false,
407 }
403 }
408 }
404 }
409 }
405 }
410
406
411 impl TryFrom<u8> for EntryState {
407 impl TryFrom<u8> for EntryState {
412 type Error = HgError;
408 type Error = HgError;
413
409
414 fn try_from(value: u8) -> Result<Self, Self::Error> {
410 fn try_from(value: u8) -> Result<Self, Self::Error> {
415 match value {
411 match value {
416 b'n' => Ok(EntryState::Normal),
412 b'n' => Ok(EntryState::Normal),
417 b'a' => Ok(EntryState::Added),
413 b'a' => Ok(EntryState::Added),
418 b'r' => Ok(EntryState::Removed),
414 b'r' => Ok(EntryState::Removed),
419 b'm' => Ok(EntryState::Merged),
415 b'm' => Ok(EntryState::Merged),
420 _ => Err(HgError::CorruptedRepository(format!(
416 _ => Err(HgError::CorruptedRepository(format!(
421 "Incorrect dirstate entry state {}",
417 "Incorrect dirstate entry state {}",
422 value
418 value
423 ))),
419 ))),
424 }
420 }
425 }
421 }
426 }
422 }
427
423
428 impl Into<u8> for EntryState {
424 impl Into<u8> for EntryState {
429 fn into(self) -> u8 {
425 fn into(self) -> u8 {
430 match self {
426 match self {
431 EntryState::Normal => b'n',
427 EntryState::Normal => b'n',
432 EntryState::Added => b'a',
428 EntryState::Added => b'a',
433 EntryState::Removed => b'r',
429 EntryState::Removed => b'r',
434 EntryState::Merged => b'm',
430 EntryState::Merged => b'm',
435 }
431 }
436 }
432 }
437 }
433 }
General Comments 0
You need to be logged in to leave comments. Login now