##// END OF EJS Templates
dirstate-item: replace another usage of `merged`...
marmoute -
r48964:8f88307f default
parent child Browse files
Show More
@@ -1,783 +1,783 b''
1 # parsers.py - Python implementation of parsers.c
1 # parsers.py - Python implementation of parsers.c
2 #
2 #
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11 import zlib
11 import zlib
12
12
13 from ..node import (
13 from ..node import (
14 nullrev,
14 nullrev,
15 sha1nodeconstants,
15 sha1nodeconstants,
16 )
16 )
17 from ..thirdparty import attr
17 from ..thirdparty import attr
18 from .. import (
18 from .. import (
19 error,
19 error,
20 pycompat,
20 pycompat,
21 revlogutils,
21 revlogutils,
22 util,
22 util,
23 )
23 )
24
24
25 from ..revlogutils import nodemap as nodemaputil
25 from ..revlogutils import nodemap as nodemaputil
26 from ..revlogutils import constants as revlog_constants
26 from ..revlogutils import constants as revlog_constants
27
27
28 stringio = pycompat.bytesio
28 stringio = pycompat.bytesio
29
29
30
30
31 _pack = struct.pack
31 _pack = struct.pack
32 _unpack = struct.unpack
32 _unpack = struct.unpack
33 _compress = zlib.compress
33 _compress = zlib.compress
34 _decompress = zlib.decompress
34 _decompress = zlib.decompress
35
35
36
36
37 # a special value used internally for `size` if the file come from the other parent
37 # a special value used internally for `size` if the file come from the other parent
38 FROM_P2 = -2
38 FROM_P2 = -2
39
39
40 # a special value used internally for `size` if the file is modified/merged/added
40 # a special value used internally for `size` if the file is modified/merged/added
41 NONNORMAL = -1
41 NONNORMAL = -1
42
42
43 # a special value used internally for `time` if the time is ambigeous
43 # a special value used internally for `time` if the time is ambigeous
44 AMBIGUOUS_TIME = -1
44 AMBIGUOUS_TIME = -1
45
45
46
46
47 @attr.s(slots=True, init=False)
47 @attr.s(slots=True, init=False)
48 class DirstateItem(object):
48 class DirstateItem(object):
49 """represent a dirstate entry
49 """represent a dirstate entry
50
50
51 It hold multiple attributes
51 It hold multiple attributes
52
52
53 # about file tracking
53 # about file tracking
54 - wc_tracked: is the file tracked by the working copy
54 - wc_tracked: is the file tracked by the working copy
55 - p1_tracked: is the file tracked in working copy first parent
55 - p1_tracked: is the file tracked in working copy first parent
56 - p2_info: the file has been involved in some merge operation. Either
56 - p2_info: the file has been involved in some merge operation. Either
57 because it was actually merged, or because the p2 version was
57 because it was actually merged, or because the p2 version was
58 ahead, or because some renamed moved it there. In either case
58 ahead, or because some renamed moved it there. In either case
59 `hg status` will want it displayed as modified.
59 `hg status` will want it displayed as modified.
60
60
61 # about the file state expected from p1 manifest:
61 # about the file state expected from p1 manifest:
62 - mode: the file mode in p1
62 - mode: the file mode in p1
63 - size: the file size in p1
63 - size: the file size in p1
64
64
65 These value can be set to None, which mean we don't have a meaningful value
65 These value can be set to None, which mean we don't have a meaningful value
66 to compare with. Either because we don't really care about them as there
66 to compare with. Either because we don't really care about them as there
67 `status` is known without having to look at the disk or because we don't
67 `status` is known without having to look at the disk or because we don't
68 know these right now and a full comparison will be needed to find out if
68 know these right now and a full comparison will be needed to find out if
69 the file is clean.
69 the file is clean.
70
70
71 # about the file state on disk last time we saw it:
71 # about the file state on disk last time we saw it:
72 - mtime: the last known clean mtime for the file.
72 - mtime: the last known clean mtime for the file.
73
73
74 This value can be set to None if no cachable state exist. Either because we
74 This value can be set to None if no cachable state exist. Either because we
75 do not care (see previous section) or because we could not cache something
75 do not care (see previous section) or because we could not cache something
76 yet.
76 yet.
77 """
77 """
78
78
79 _wc_tracked = attr.ib()
79 _wc_tracked = attr.ib()
80 _p1_tracked = attr.ib()
80 _p1_tracked = attr.ib()
81 _p2_info = attr.ib()
81 _p2_info = attr.ib()
82 _mode = attr.ib()
82 _mode = attr.ib()
83 _size = attr.ib()
83 _size = attr.ib()
84 _mtime = attr.ib()
84 _mtime = attr.ib()
85
85
86 def __init__(
86 def __init__(
87 self,
87 self,
88 wc_tracked=False,
88 wc_tracked=False,
89 p1_tracked=False,
89 p1_tracked=False,
90 p2_info=False,
90 p2_info=False,
91 has_meaningful_data=True,
91 has_meaningful_data=True,
92 has_meaningful_mtime=True,
92 has_meaningful_mtime=True,
93 parentfiledata=None,
93 parentfiledata=None,
94 ):
94 ):
95 self._wc_tracked = wc_tracked
95 self._wc_tracked = wc_tracked
96 self._p1_tracked = p1_tracked
96 self._p1_tracked = p1_tracked
97 self._p2_info = p2_info
97 self._p2_info = p2_info
98
98
99 self._mode = None
99 self._mode = None
100 self._size = None
100 self._size = None
101 self._mtime = None
101 self._mtime = None
102 if parentfiledata is None:
102 if parentfiledata is None:
103 has_meaningful_mtime = False
103 has_meaningful_mtime = False
104 has_meaningful_data = False
104 has_meaningful_data = False
105 if has_meaningful_data:
105 if has_meaningful_data:
106 self._mode = parentfiledata[0]
106 self._mode = parentfiledata[0]
107 self._size = parentfiledata[1]
107 self._size = parentfiledata[1]
108 if has_meaningful_mtime:
108 if has_meaningful_mtime:
109 self._mtime = parentfiledata[2]
109 self._mtime = parentfiledata[2]
110
110
111 @classmethod
111 @classmethod
112 def new_added(cls):
112 def new_added(cls):
113 """constructor to help legacy API to build a new "added" item
113 """constructor to help legacy API to build a new "added" item
114
114
115 Should eventually be removed
115 Should eventually be removed
116 """
116 """
117 return cls(wc_tracked=True)
117 return cls(wc_tracked=True)
118
118
119 @classmethod
119 @classmethod
120 def new_merged(cls):
120 def new_merged(cls):
121 """constructor to help legacy API to build a new "merged" item
121 """constructor to help legacy API to build a new "merged" item
122
122
123 Should eventually be removed
123 Should eventually be removed
124 """
124 """
125 return cls(wc_tracked=True, p1_tracked=True, p2_info=True)
125 return cls(wc_tracked=True, p1_tracked=True, p2_info=True)
126
126
127 @classmethod
127 @classmethod
128 def new_from_p2(cls):
128 def new_from_p2(cls):
129 """constructor to help legacy API to build a new "from_p2" item
129 """constructor to help legacy API to build a new "from_p2" item
130
130
131 Should eventually be removed
131 Should eventually be removed
132 """
132 """
133 return cls(wc_tracked=True, p2_info=True)
133 return cls(wc_tracked=True, p2_info=True)
134
134
135 @classmethod
135 @classmethod
136 def new_possibly_dirty(cls):
136 def new_possibly_dirty(cls):
137 """constructor to help legacy API to build a new "possibly_dirty" item
137 """constructor to help legacy API to build a new "possibly_dirty" item
138
138
139 Should eventually be removed
139 Should eventually be removed
140 """
140 """
141 return cls(wc_tracked=True, p1_tracked=True)
141 return cls(wc_tracked=True, p1_tracked=True)
142
142
143 @classmethod
143 @classmethod
144 def new_normal(cls, mode, size, mtime):
144 def new_normal(cls, mode, size, mtime):
145 """constructor to help legacy API to build a new "normal" item
145 """constructor to help legacy API to build a new "normal" item
146
146
147 Should eventually be removed
147 Should eventually be removed
148 """
148 """
149 assert size != FROM_P2
149 assert size != FROM_P2
150 assert size != NONNORMAL
150 assert size != NONNORMAL
151 return cls(
151 return cls(
152 wc_tracked=True,
152 wc_tracked=True,
153 p1_tracked=True,
153 p1_tracked=True,
154 parentfiledata=(mode, size, mtime),
154 parentfiledata=(mode, size, mtime),
155 )
155 )
156
156
157 @classmethod
157 @classmethod
158 def from_v1_data(cls, state, mode, size, mtime):
158 def from_v1_data(cls, state, mode, size, mtime):
159 """Build a new DirstateItem object from V1 data
159 """Build a new DirstateItem object from V1 data
160
160
161 Since the dirstate-v1 format is frozen, the signature of this function
161 Since the dirstate-v1 format is frozen, the signature of this function
162 is not expected to change, unlike the __init__ one.
162 is not expected to change, unlike the __init__ one.
163 """
163 """
164 if state == b'm':
164 if state == b'm':
165 return cls.new_merged()
165 return cls.new_merged()
166 elif state == b'a':
166 elif state == b'a':
167 return cls.new_added()
167 return cls.new_added()
168 elif state == b'r':
168 elif state == b'r':
169 if size == NONNORMAL:
169 if size == NONNORMAL:
170 p1_tracked = True
170 p1_tracked = True
171 p2_info = True
171 p2_info = True
172 elif size == FROM_P2:
172 elif size == FROM_P2:
173 p1_tracked = False
173 p1_tracked = False
174 p2_info = True
174 p2_info = True
175 else:
175 else:
176 p1_tracked = True
176 p1_tracked = True
177 p2_info = False
177 p2_info = False
178 return cls(p1_tracked=p1_tracked, p2_info=p2_info)
178 return cls(p1_tracked=p1_tracked, p2_info=p2_info)
179 elif state == b'n':
179 elif state == b'n':
180 if size == FROM_P2:
180 if size == FROM_P2:
181 return cls.new_from_p2()
181 return cls.new_from_p2()
182 elif size == NONNORMAL:
182 elif size == NONNORMAL:
183 return cls.new_possibly_dirty()
183 return cls.new_possibly_dirty()
184 elif mtime == AMBIGUOUS_TIME:
184 elif mtime == AMBIGUOUS_TIME:
185 instance = cls.new_normal(mode, size, 42)
185 instance = cls.new_normal(mode, size, 42)
186 instance._mtime = None
186 instance._mtime = None
187 return instance
187 return instance
188 else:
188 else:
189 return cls.new_normal(mode, size, mtime)
189 return cls.new_normal(mode, size, mtime)
190 else:
190 else:
191 raise RuntimeError(b'unknown state: %s' % state)
191 raise RuntimeError(b'unknown state: %s' % state)
192
192
193 def set_possibly_dirty(self):
193 def set_possibly_dirty(self):
194 """Mark a file as "possibly dirty"
194 """Mark a file as "possibly dirty"
195
195
196 This means the next status call will have to actually check its content
196 This means the next status call will have to actually check its content
197 to make sure it is correct.
197 to make sure it is correct.
198 """
198 """
199 self._mtime = None
199 self._mtime = None
200
200
201 def set_clean(self, mode, size, mtime):
201 def set_clean(self, mode, size, mtime):
202 """mark a file as "clean" cancelling potential "possibly dirty call"
202 """mark a file as "clean" cancelling potential "possibly dirty call"
203
203
204 Note: this function is a descendant of `dirstate.normal` and is
204 Note: this function is a descendant of `dirstate.normal` and is
205 currently expected to be call on "normal" entry only. There are not
205 currently expected to be call on "normal" entry only. There are not
206 reason for this to not change in the future as long as the ccode is
206 reason for this to not change in the future as long as the ccode is
207 updated to preserve the proper state of the non-normal files.
207 updated to preserve the proper state of the non-normal files.
208 """
208 """
209 self._wc_tracked = True
209 self._wc_tracked = True
210 self._p1_tracked = True
210 self._p1_tracked = True
211 self._mode = mode
211 self._mode = mode
212 self._size = size
212 self._size = size
213 self._mtime = mtime
213 self._mtime = mtime
214
214
215 def set_tracked(self):
215 def set_tracked(self):
216 """mark a file as tracked in the working copy
216 """mark a file as tracked in the working copy
217
217
218 This will ultimately be called by command like `hg add`.
218 This will ultimately be called by command like `hg add`.
219 """
219 """
220 self._wc_tracked = True
220 self._wc_tracked = True
221 # `set_tracked` is replacing various `normallookup` call. So we mark
221 # `set_tracked` is replacing various `normallookup` call. So we mark
222 # the files as needing lookup
222 # the files as needing lookup
223 #
223 #
224 # Consider dropping this in the future in favor of something less broad.
224 # Consider dropping this in the future in favor of something less broad.
225 self._mtime = None
225 self._mtime = None
226
226
227 def set_untracked(self):
227 def set_untracked(self):
228 """mark a file as untracked in the working copy
228 """mark a file as untracked in the working copy
229
229
230 This will ultimately be called by command like `hg remove`.
230 This will ultimately be called by command like `hg remove`.
231 """
231 """
232 self._wc_tracked = False
232 self._wc_tracked = False
233 self._mode = None
233 self._mode = None
234 self._size = None
234 self._size = None
235 self._mtime = None
235 self._mtime = None
236
236
237 def drop_merge_data(self):
237 def drop_merge_data(self):
238 """remove all "merge-only" from a DirstateItem
238 """remove all "merge-only" from a DirstateItem
239
239
240 This is to be call by the dirstatemap code when the second parent is dropped
240 This is to be call by the dirstatemap code when the second parent is dropped
241 """
241 """
242 if self._p2_info:
242 if self._p2_info:
243 self._p2_info = False
243 self._p2_info = False
244 self._mode = None
244 self._mode = None
245 self._size = None
245 self._size = None
246 self._mtime = None
246 self._mtime = None
247
247
248 @property
248 @property
249 def mode(self):
249 def mode(self):
250 return self.v1_mode()
250 return self.v1_mode()
251
251
252 @property
252 @property
253 def size(self):
253 def size(self):
254 return self.v1_size()
254 return self.v1_size()
255
255
256 @property
256 @property
257 def mtime(self):
257 def mtime(self):
258 return self.v1_mtime()
258 return self.v1_mtime()
259
259
260 @property
260 @property
261 def state(self):
261 def state(self):
262 """
262 """
263 States are:
263 States are:
264 n normal
264 n normal
265 m needs merging
265 m needs merging
266 r marked for removal
266 r marked for removal
267 a marked for addition
267 a marked for addition
268
268
269 XXX This "state" is a bit obscure and mostly a direct expression of the
269 XXX This "state" is a bit obscure and mostly a direct expression of the
270 dirstatev1 format. It would make sense to ultimately deprecate it in
270 dirstatev1 format. It would make sense to ultimately deprecate it in
271 favor of the more "semantic" attributes.
271 favor of the more "semantic" attributes.
272 """
272 """
273 if not self.any_tracked:
273 if not self.any_tracked:
274 return b'?'
274 return b'?'
275 return self.v1_state()
275 return self.v1_state()
276
276
277 @property
277 @property
278 def tracked(self):
278 def tracked(self):
279 """True is the file is tracked in the working copy"""
279 """True is the file is tracked in the working copy"""
280 return self._wc_tracked
280 return self._wc_tracked
281
281
282 @property
282 @property
283 def any_tracked(self):
283 def any_tracked(self):
284 """True is the file is tracked anywhere (wc or parents)"""
284 """True is the file is tracked anywhere (wc or parents)"""
285 return self._wc_tracked or self._p1_tracked or self._p2_info
285 return self._wc_tracked or self._p1_tracked or self._p2_info
286
286
287 @property
287 @property
288 def added(self):
288 def added(self):
289 """True if the file has been added"""
289 """True if the file has been added"""
290 return self._wc_tracked and not (self._p1_tracked or self._p2_info)
290 return self._wc_tracked and not (self._p1_tracked or self._p2_info)
291
291
292 @property
292 @property
293 def maybe_clean(self):
293 def maybe_clean(self):
294 """True if the file has a chance to be in the "clean" state"""
294 """True if the file has a chance to be in the "clean" state"""
295 if not self._wc_tracked:
295 if not self._wc_tracked:
296 return False
296 return False
297 elif not self._p1_tracked:
297 elif not self._p1_tracked:
298 return False
298 return False
299 elif self._p2_info:
299 elif self._p2_info:
300 return False
300 return False
301 return True
301 return True
302
302
303 @property
303 @property
304 def p1_tracked(self):
304 def p1_tracked(self):
305 """True if the file is tracked in the first parent manifest"""
305 """True if the file is tracked in the first parent manifest"""
306 return self._p1_tracked
306 return self._p1_tracked
307
307
308 @property
308 @property
309 def p2_info(self):
309 def p2_info(self):
310 """True if the file needed to merge or apply any input from p2
310 """True if the file needed to merge or apply any input from p2
311
311
312 See the class documentation for details.
312 See the class documentation for details.
313 """
313 """
314 return self._wc_tracked and self._p2_info
314 return self._wc_tracked and self._p2_info
315
315
316 @property
316 @property
317 def merged(self):
317 def merged(self):
318 """True if the file has been merged
318 """True if the file has been merged
319
319
320 Should only be set if a merge is in progress in the dirstate
320 Should only be set if a merge is in progress in the dirstate
321 """
321 """
322 return self._wc_tracked and self._p1_tracked and self._p2_info
322 return self._wc_tracked and self._p1_tracked and self._p2_info
323
323
324 @property
324 @property
325 def removed(self):
325 def removed(self):
326 """True if the file has been removed"""
326 """True if the file has been removed"""
327 return not self._wc_tracked and (self._p1_tracked or self._p2_info)
327 return not self._wc_tracked and (self._p1_tracked or self._p2_info)
328
328
329 def v1_state(self):
329 def v1_state(self):
330 """return a "state" suitable for v1 serialization"""
330 """return a "state" suitable for v1 serialization"""
331 if not self.any_tracked:
331 if not self.any_tracked:
332 # the object has no state to record, this is -currently-
332 # the object has no state to record, this is -currently-
333 # unsupported
333 # unsupported
334 raise RuntimeError('untracked item')
334 raise RuntimeError('untracked item')
335 elif self.removed:
335 elif self.removed:
336 return b'r'
336 return b'r'
337 elif self.merged:
337 elif self._p1_tracked and self._p2_info:
338 return b'm'
338 return b'm'
339 elif self.added:
339 elif self.added:
340 return b'a'
340 return b'a'
341 else:
341 else:
342 return b'n'
342 return b'n'
343
343
344 def v1_mode(self):
344 def v1_mode(self):
345 """return a "mode" suitable for v1 serialization"""
345 """return a "mode" suitable for v1 serialization"""
346 return self._mode if self._mode is not None else 0
346 return self._mode if self._mode is not None else 0
347
347
348 def v1_size(self):
348 def v1_size(self):
349 """return a "size" suitable for v1 serialization"""
349 """return a "size" suitable for v1 serialization"""
350 if not self.any_tracked:
350 if not self.any_tracked:
351 # the object has no state to record, this is -currently-
351 # the object has no state to record, this is -currently-
352 # unsupported
352 # unsupported
353 raise RuntimeError('untracked item')
353 raise RuntimeError('untracked item')
354 elif self.removed and self._p1_tracked and self._p2_info:
354 elif self.removed and self._p1_tracked and self._p2_info:
355 return NONNORMAL
355 return NONNORMAL
356 elif self._p2_info:
356 elif self._p2_info:
357 return FROM_P2
357 return FROM_P2
358 elif self.removed:
358 elif self.removed:
359 return 0
359 return 0
360 elif self.added:
360 elif self.added:
361 return NONNORMAL
361 return NONNORMAL
362 elif self._size is None:
362 elif self._size is None:
363 return NONNORMAL
363 return NONNORMAL
364 else:
364 else:
365 return self._size
365 return self._size
366
366
367 def v1_mtime(self):
367 def v1_mtime(self):
368 """return a "mtime" suitable for v1 serialization"""
368 """return a "mtime" suitable for v1 serialization"""
369 if not self.any_tracked:
369 if not self.any_tracked:
370 # the object has no state to record, this is -currently-
370 # the object has no state to record, this is -currently-
371 # unsupported
371 # unsupported
372 raise RuntimeError('untracked item')
372 raise RuntimeError('untracked item')
373 elif self.removed:
373 elif self.removed:
374 return 0
374 return 0
375 elif self._mtime is None:
375 elif self._mtime is None:
376 return AMBIGUOUS_TIME
376 return AMBIGUOUS_TIME
377 elif self._p2_info:
377 elif self._p2_info:
378 return AMBIGUOUS_TIME
378 return AMBIGUOUS_TIME
379 elif not self._p1_tracked:
379 elif not self._p1_tracked:
380 return AMBIGUOUS_TIME
380 return AMBIGUOUS_TIME
381 else:
381 else:
382 return self._mtime
382 return self._mtime
383
383
384 def need_delay(self, now):
384 def need_delay(self, now):
385 """True if the stored mtime would be ambiguous with the current time"""
385 """True if the stored mtime would be ambiguous with the current time"""
386 return self.v1_state() == b'n' and self.v1_mtime() == now
386 return self.v1_state() == b'n' and self.v1_mtime() == now
387
387
388
388
389 def gettype(q):
389 def gettype(q):
390 return int(q & 0xFFFF)
390 return int(q & 0xFFFF)
391
391
392
392
393 class BaseIndexObject(object):
393 class BaseIndexObject(object):
394 # Can I be passed to an algorithme implemented in Rust ?
394 # Can I be passed to an algorithme implemented in Rust ?
395 rust_ext_compat = 0
395 rust_ext_compat = 0
396 # Format of an index entry according to Python's `struct` language
396 # Format of an index entry according to Python's `struct` language
397 index_format = revlog_constants.INDEX_ENTRY_V1
397 index_format = revlog_constants.INDEX_ENTRY_V1
398 # Size of a C unsigned long long int, platform independent
398 # Size of a C unsigned long long int, platform independent
399 big_int_size = struct.calcsize(b'>Q')
399 big_int_size = struct.calcsize(b'>Q')
400 # Size of a C long int, platform independent
400 # Size of a C long int, platform independent
401 int_size = struct.calcsize(b'>i')
401 int_size = struct.calcsize(b'>i')
402 # An empty index entry, used as a default value to be overridden, or nullrev
402 # An empty index entry, used as a default value to be overridden, or nullrev
403 null_item = (
403 null_item = (
404 0,
404 0,
405 0,
405 0,
406 0,
406 0,
407 -1,
407 -1,
408 -1,
408 -1,
409 -1,
409 -1,
410 -1,
410 -1,
411 sha1nodeconstants.nullid,
411 sha1nodeconstants.nullid,
412 0,
412 0,
413 0,
413 0,
414 revlog_constants.COMP_MODE_INLINE,
414 revlog_constants.COMP_MODE_INLINE,
415 revlog_constants.COMP_MODE_INLINE,
415 revlog_constants.COMP_MODE_INLINE,
416 )
416 )
417
417
418 @util.propertycache
418 @util.propertycache
419 def entry_size(self):
419 def entry_size(self):
420 return self.index_format.size
420 return self.index_format.size
421
421
422 @property
422 @property
423 def nodemap(self):
423 def nodemap(self):
424 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
424 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
425 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
425 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
426 return self._nodemap
426 return self._nodemap
427
427
428 @util.propertycache
428 @util.propertycache
429 def _nodemap(self):
429 def _nodemap(self):
430 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
430 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
431 for r in range(0, len(self)):
431 for r in range(0, len(self)):
432 n = self[r][7]
432 n = self[r][7]
433 nodemap[n] = r
433 nodemap[n] = r
434 return nodemap
434 return nodemap
435
435
436 def has_node(self, node):
436 def has_node(self, node):
437 """return True if the node exist in the index"""
437 """return True if the node exist in the index"""
438 return node in self._nodemap
438 return node in self._nodemap
439
439
440 def rev(self, node):
440 def rev(self, node):
441 """return a revision for a node
441 """return a revision for a node
442
442
443 If the node is unknown, raise a RevlogError"""
443 If the node is unknown, raise a RevlogError"""
444 return self._nodemap[node]
444 return self._nodemap[node]
445
445
446 def get_rev(self, node):
446 def get_rev(self, node):
447 """return a revision for a node
447 """return a revision for a node
448
448
449 If the node is unknown, return None"""
449 If the node is unknown, return None"""
450 return self._nodemap.get(node)
450 return self._nodemap.get(node)
451
451
452 def _stripnodes(self, start):
452 def _stripnodes(self, start):
453 if '_nodemap' in vars(self):
453 if '_nodemap' in vars(self):
454 for r in range(start, len(self)):
454 for r in range(start, len(self)):
455 n = self[r][7]
455 n = self[r][7]
456 del self._nodemap[n]
456 del self._nodemap[n]
457
457
458 def clearcaches(self):
458 def clearcaches(self):
459 self.__dict__.pop('_nodemap', None)
459 self.__dict__.pop('_nodemap', None)
460
460
461 def __len__(self):
461 def __len__(self):
462 return self._lgt + len(self._extra)
462 return self._lgt + len(self._extra)
463
463
464 def append(self, tup):
464 def append(self, tup):
465 if '_nodemap' in vars(self):
465 if '_nodemap' in vars(self):
466 self._nodemap[tup[7]] = len(self)
466 self._nodemap[tup[7]] = len(self)
467 data = self._pack_entry(len(self), tup)
467 data = self._pack_entry(len(self), tup)
468 self._extra.append(data)
468 self._extra.append(data)
469
469
470 def _pack_entry(self, rev, entry):
470 def _pack_entry(self, rev, entry):
471 assert entry[8] == 0
471 assert entry[8] == 0
472 assert entry[9] == 0
472 assert entry[9] == 0
473 return self.index_format.pack(*entry[:8])
473 return self.index_format.pack(*entry[:8])
474
474
475 def _check_index(self, i):
475 def _check_index(self, i):
476 if not isinstance(i, int):
476 if not isinstance(i, int):
477 raise TypeError(b"expecting int indexes")
477 raise TypeError(b"expecting int indexes")
478 if i < 0 or i >= len(self):
478 if i < 0 or i >= len(self):
479 raise IndexError
479 raise IndexError
480
480
481 def __getitem__(self, i):
481 def __getitem__(self, i):
482 if i == -1:
482 if i == -1:
483 return self.null_item
483 return self.null_item
484 self._check_index(i)
484 self._check_index(i)
485 if i >= self._lgt:
485 if i >= self._lgt:
486 data = self._extra[i - self._lgt]
486 data = self._extra[i - self._lgt]
487 else:
487 else:
488 index = self._calculate_index(i)
488 index = self._calculate_index(i)
489 data = self._data[index : index + self.entry_size]
489 data = self._data[index : index + self.entry_size]
490 r = self._unpack_entry(i, data)
490 r = self._unpack_entry(i, data)
491 if self._lgt and i == 0:
491 if self._lgt and i == 0:
492 offset = revlogutils.offset_type(0, gettype(r[0]))
492 offset = revlogutils.offset_type(0, gettype(r[0]))
493 r = (offset,) + r[1:]
493 r = (offset,) + r[1:]
494 return r
494 return r
495
495
496 def _unpack_entry(self, rev, data):
496 def _unpack_entry(self, rev, data):
497 r = self.index_format.unpack(data)
497 r = self.index_format.unpack(data)
498 r = r + (
498 r = r + (
499 0,
499 0,
500 0,
500 0,
501 revlog_constants.COMP_MODE_INLINE,
501 revlog_constants.COMP_MODE_INLINE,
502 revlog_constants.COMP_MODE_INLINE,
502 revlog_constants.COMP_MODE_INLINE,
503 )
503 )
504 return r
504 return r
505
505
506 def pack_header(self, header):
506 def pack_header(self, header):
507 """pack header information as binary"""
507 """pack header information as binary"""
508 v_fmt = revlog_constants.INDEX_HEADER
508 v_fmt = revlog_constants.INDEX_HEADER
509 return v_fmt.pack(header)
509 return v_fmt.pack(header)
510
510
511 def entry_binary(self, rev):
511 def entry_binary(self, rev):
512 """return the raw binary string representing a revision"""
512 """return the raw binary string representing a revision"""
513 entry = self[rev]
513 entry = self[rev]
514 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
514 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
515 if rev == 0:
515 if rev == 0:
516 p = p[revlog_constants.INDEX_HEADER.size :]
516 p = p[revlog_constants.INDEX_HEADER.size :]
517 return p
517 return p
518
518
519
519
520 class IndexObject(BaseIndexObject):
520 class IndexObject(BaseIndexObject):
521 def __init__(self, data):
521 def __init__(self, data):
522 assert len(data) % self.entry_size == 0, (
522 assert len(data) % self.entry_size == 0, (
523 len(data),
523 len(data),
524 self.entry_size,
524 self.entry_size,
525 len(data) % self.entry_size,
525 len(data) % self.entry_size,
526 )
526 )
527 self._data = data
527 self._data = data
528 self._lgt = len(data) // self.entry_size
528 self._lgt = len(data) // self.entry_size
529 self._extra = []
529 self._extra = []
530
530
531 def _calculate_index(self, i):
531 def _calculate_index(self, i):
532 return i * self.entry_size
532 return i * self.entry_size
533
533
534 def __delitem__(self, i):
534 def __delitem__(self, i):
535 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
535 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
536 raise ValueError(b"deleting slices only supports a:-1 with step 1")
536 raise ValueError(b"deleting slices only supports a:-1 with step 1")
537 i = i.start
537 i = i.start
538 self._check_index(i)
538 self._check_index(i)
539 self._stripnodes(i)
539 self._stripnodes(i)
540 if i < self._lgt:
540 if i < self._lgt:
541 self._data = self._data[: i * self.entry_size]
541 self._data = self._data[: i * self.entry_size]
542 self._lgt = i
542 self._lgt = i
543 self._extra = []
543 self._extra = []
544 else:
544 else:
545 self._extra = self._extra[: i - self._lgt]
545 self._extra = self._extra[: i - self._lgt]
546
546
547
547
548 class PersistentNodeMapIndexObject(IndexObject):
548 class PersistentNodeMapIndexObject(IndexObject):
549 """a Debug oriented class to test persistent nodemap
549 """a Debug oriented class to test persistent nodemap
550
550
551 We need a simple python object to test API and higher level behavior. See
551 We need a simple python object to test API and higher level behavior. See
552 the Rust implementation for more serious usage. This should be used only
552 the Rust implementation for more serious usage. This should be used only
553 through the dedicated `devel.persistent-nodemap` config.
553 through the dedicated `devel.persistent-nodemap` config.
554 """
554 """
555
555
556 def nodemap_data_all(self):
556 def nodemap_data_all(self):
557 """Return bytes containing a full serialization of a nodemap
557 """Return bytes containing a full serialization of a nodemap
558
558
559 The nodemap should be valid for the full set of revisions in the
559 The nodemap should be valid for the full set of revisions in the
560 index."""
560 index."""
561 return nodemaputil.persistent_data(self)
561 return nodemaputil.persistent_data(self)
562
562
563 def nodemap_data_incremental(self):
563 def nodemap_data_incremental(self):
564 """Return bytes containing a incremental update to persistent nodemap
564 """Return bytes containing a incremental update to persistent nodemap
565
565
566 This containst the data for an append-only update of the data provided
566 This containst the data for an append-only update of the data provided
567 in the last call to `update_nodemap_data`.
567 in the last call to `update_nodemap_data`.
568 """
568 """
569 if self._nm_root is None:
569 if self._nm_root is None:
570 return None
570 return None
571 docket = self._nm_docket
571 docket = self._nm_docket
572 changed, data = nodemaputil.update_persistent_data(
572 changed, data = nodemaputil.update_persistent_data(
573 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
573 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
574 )
574 )
575
575
576 self._nm_root = self._nm_max_idx = self._nm_docket = None
576 self._nm_root = self._nm_max_idx = self._nm_docket = None
577 return docket, changed, data
577 return docket, changed, data
578
578
579 def update_nodemap_data(self, docket, nm_data):
579 def update_nodemap_data(self, docket, nm_data):
580 """provide full block of persisted binary data for a nodemap
580 """provide full block of persisted binary data for a nodemap
581
581
582 The data are expected to come from disk. See `nodemap_data_all` for a
582 The data are expected to come from disk. See `nodemap_data_all` for a
583 produceur of such data."""
583 produceur of such data."""
584 if nm_data is not None:
584 if nm_data is not None:
585 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
585 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
586 if self._nm_root:
586 if self._nm_root:
587 self._nm_docket = docket
587 self._nm_docket = docket
588 else:
588 else:
589 self._nm_root = self._nm_max_idx = self._nm_docket = None
589 self._nm_root = self._nm_max_idx = self._nm_docket = None
590
590
591
591
592 class InlinedIndexObject(BaseIndexObject):
592 class InlinedIndexObject(BaseIndexObject):
593 def __init__(self, data, inline=0):
593 def __init__(self, data, inline=0):
594 self._data = data
594 self._data = data
595 self._lgt = self._inline_scan(None)
595 self._lgt = self._inline_scan(None)
596 self._inline_scan(self._lgt)
596 self._inline_scan(self._lgt)
597 self._extra = []
597 self._extra = []
598
598
599 def _inline_scan(self, lgt):
599 def _inline_scan(self, lgt):
600 off = 0
600 off = 0
601 if lgt is not None:
601 if lgt is not None:
602 self._offsets = [0] * lgt
602 self._offsets = [0] * lgt
603 count = 0
603 count = 0
604 while off <= len(self._data) - self.entry_size:
604 while off <= len(self._data) - self.entry_size:
605 start = off + self.big_int_size
605 start = off + self.big_int_size
606 (s,) = struct.unpack(
606 (s,) = struct.unpack(
607 b'>i',
607 b'>i',
608 self._data[start : start + self.int_size],
608 self._data[start : start + self.int_size],
609 )
609 )
610 if lgt is not None:
610 if lgt is not None:
611 self._offsets[count] = off
611 self._offsets[count] = off
612 count += 1
612 count += 1
613 off += self.entry_size + s
613 off += self.entry_size + s
614 if off != len(self._data):
614 if off != len(self._data):
615 raise ValueError(b"corrupted data")
615 raise ValueError(b"corrupted data")
616 return count
616 return count
617
617
618 def __delitem__(self, i):
618 def __delitem__(self, i):
619 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
619 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
620 raise ValueError(b"deleting slices only supports a:-1 with step 1")
620 raise ValueError(b"deleting slices only supports a:-1 with step 1")
621 i = i.start
621 i = i.start
622 self._check_index(i)
622 self._check_index(i)
623 self._stripnodes(i)
623 self._stripnodes(i)
624 if i < self._lgt:
624 if i < self._lgt:
625 self._offsets = self._offsets[:i]
625 self._offsets = self._offsets[:i]
626 self._lgt = i
626 self._lgt = i
627 self._extra = []
627 self._extra = []
628 else:
628 else:
629 self._extra = self._extra[: i - self._lgt]
629 self._extra = self._extra[: i - self._lgt]
630
630
631 def _calculate_index(self, i):
631 def _calculate_index(self, i):
632 return self._offsets[i]
632 return self._offsets[i]
633
633
634
634
635 def parse_index2(data, inline, revlogv2=False):
635 def parse_index2(data, inline, revlogv2=False):
636 if not inline:
636 if not inline:
637 cls = IndexObject2 if revlogv2 else IndexObject
637 cls = IndexObject2 if revlogv2 else IndexObject
638 return cls(data), None
638 return cls(data), None
639 cls = InlinedIndexObject
639 cls = InlinedIndexObject
640 return cls(data, inline), (0, data)
640 return cls(data, inline), (0, data)
641
641
642
642
643 def parse_index_cl_v2(data):
643 def parse_index_cl_v2(data):
644 return IndexChangelogV2(data), None
644 return IndexChangelogV2(data), None
645
645
646
646
647 class IndexObject2(IndexObject):
647 class IndexObject2(IndexObject):
648 index_format = revlog_constants.INDEX_ENTRY_V2
648 index_format = revlog_constants.INDEX_ENTRY_V2
649
649
650 def replace_sidedata_info(
650 def replace_sidedata_info(
651 self,
651 self,
652 rev,
652 rev,
653 sidedata_offset,
653 sidedata_offset,
654 sidedata_length,
654 sidedata_length,
655 offset_flags,
655 offset_flags,
656 compression_mode,
656 compression_mode,
657 ):
657 ):
658 """
658 """
659 Replace an existing index entry's sidedata offset and length with new
659 Replace an existing index entry's sidedata offset and length with new
660 ones.
660 ones.
661 This cannot be used outside of the context of sidedata rewriting,
661 This cannot be used outside of the context of sidedata rewriting,
662 inside the transaction that creates the revision `rev`.
662 inside the transaction that creates the revision `rev`.
663 """
663 """
664 if rev < 0:
664 if rev < 0:
665 raise KeyError
665 raise KeyError
666 self._check_index(rev)
666 self._check_index(rev)
667 if rev < self._lgt:
667 if rev < self._lgt:
668 msg = b"cannot rewrite entries outside of this transaction"
668 msg = b"cannot rewrite entries outside of this transaction"
669 raise KeyError(msg)
669 raise KeyError(msg)
670 else:
670 else:
671 entry = list(self[rev])
671 entry = list(self[rev])
672 entry[0] = offset_flags
672 entry[0] = offset_flags
673 entry[8] = sidedata_offset
673 entry[8] = sidedata_offset
674 entry[9] = sidedata_length
674 entry[9] = sidedata_length
675 entry[11] = compression_mode
675 entry[11] = compression_mode
676 entry = tuple(entry)
676 entry = tuple(entry)
677 new = self._pack_entry(rev, entry)
677 new = self._pack_entry(rev, entry)
678 self._extra[rev - self._lgt] = new
678 self._extra[rev - self._lgt] = new
679
679
680 def _unpack_entry(self, rev, data):
680 def _unpack_entry(self, rev, data):
681 data = self.index_format.unpack(data)
681 data = self.index_format.unpack(data)
682 entry = data[:10]
682 entry = data[:10]
683 data_comp = data[10] & 3
683 data_comp = data[10] & 3
684 sidedata_comp = (data[10] & (3 << 2)) >> 2
684 sidedata_comp = (data[10] & (3 << 2)) >> 2
685 return entry + (data_comp, sidedata_comp)
685 return entry + (data_comp, sidedata_comp)
686
686
687 def _pack_entry(self, rev, entry):
687 def _pack_entry(self, rev, entry):
688 data = entry[:10]
688 data = entry[:10]
689 data_comp = entry[10] & 3
689 data_comp = entry[10] & 3
690 sidedata_comp = (entry[11] & 3) << 2
690 sidedata_comp = (entry[11] & 3) << 2
691 data += (data_comp | sidedata_comp,)
691 data += (data_comp | sidedata_comp,)
692
692
693 return self.index_format.pack(*data)
693 return self.index_format.pack(*data)
694
694
695 def entry_binary(self, rev):
695 def entry_binary(self, rev):
696 """return the raw binary string representing a revision"""
696 """return the raw binary string representing a revision"""
697 entry = self[rev]
697 entry = self[rev]
698 return self._pack_entry(rev, entry)
698 return self._pack_entry(rev, entry)
699
699
700 def pack_header(self, header):
700 def pack_header(self, header):
701 """pack header information as binary"""
701 """pack header information as binary"""
702 msg = 'version header should go in the docket, not the index: %d'
702 msg = 'version header should go in the docket, not the index: %d'
703 msg %= header
703 msg %= header
704 raise error.ProgrammingError(msg)
704 raise error.ProgrammingError(msg)
705
705
706
706
707 class IndexChangelogV2(IndexObject2):
707 class IndexChangelogV2(IndexObject2):
708 index_format = revlog_constants.INDEX_ENTRY_CL_V2
708 index_format = revlog_constants.INDEX_ENTRY_CL_V2
709
709
710 def _unpack_entry(self, rev, data, r=True):
710 def _unpack_entry(self, rev, data, r=True):
711 items = self.index_format.unpack(data)
711 items = self.index_format.unpack(data)
712 entry = items[:3] + (rev, rev) + items[3:8]
712 entry = items[:3] + (rev, rev) + items[3:8]
713 data_comp = items[8] & 3
713 data_comp = items[8] & 3
714 sidedata_comp = (items[8] >> 2) & 3
714 sidedata_comp = (items[8] >> 2) & 3
715 return entry + (data_comp, sidedata_comp)
715 return entry + (data_comp, sidedata_comp)
716
716
717 def _pack_entry(self, rev, entry):
717 def _pack_entry(self, rev, entry):
718 assert entry[3] == rev, entry[3]
718 assert entry[3] == rev, entry[3]
719 assert entry[4] == rev, entry[4]
719 assert entry[4] == rev, entry[4]
720 data = entry[:3] + entry[5:10]
720 data = entry[:3] + entry[5:10]
721 data_comp = entry[10] & 3
721 data_comp = entry[10] & 3
722 sidedata_comp = (entry[11] & 3) << 2
722 sidedata_comp = (entry[11] & 3) << 2
723 data += (data_comp | sidedata_comp,)
723 data += (data_comp | sidedata_comp,)
724 return self.index_format.pack(*data)
724 return self.index_format.pack(*data)
725
725
726
726
727 def parse_index_devel_nodemap(data, inline):
727 def parse_index_devel_nodemap(data, inline):
728 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
728 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
729 return PersistentNodeMapIndexObject(data), None
729 return PersistentNodeMapIndexObject(data), None
730
730
731
731
732 def parse_dirstate(dmap, copymap, st):
732 def parse_dirstate(dmap, copymap, st):
733 parents = [st[:20], st[20:40]]
733 parents = [st[:20], st[20:40]]
734 # dereference fields so they will be local in loop
734 # dereference fields so they will be local in loop
735 format = b">cllll"
735 format = b">cllll"
736 e_size = struct.calcsize(format)
736 e_size = struct.calcsize(format)
737 pos1 = 40
737 pos1 = 40
738 l = len(st)
738 l = len(st)
739
739
740 # the inner loop
740 # the inner loop
741 while pos1 < l:
741 while pos1 < l:
742 pos2 = pos1 + e_size
742 pos2 = pos1 + e_size
743 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
743 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
744 pos1 = pos2 + e[4]
744 pos1 = pos2 + e[4]
745 f = st[pos2:pos1]
745 f = st[pos2:pos1]
746 if b'\0' in f:
746 if b'\0' in f:
747 f, c = f.split(b'\0')
747 f, c = f.split(b'\0')
748 copymap[f] = c
748 copymap[f] = c
749 dmap[f] = DirstateItem.from_v1_data(*e[:4])
749 dmap[f] = DirstateItem.from_v1_data(*e[:4])
750 return parents
750 return parents
751
751
752
752
753 def pack_dirstate(dmap, copymap, pl, now):
753 def pack_dirstate(dmap, copymap, pl, now):
754 now = int(now)
754 now = int(now)
755 cs = stringio()
755 cs = stringio()
756 write = cs.write
756 write = cs.write
757 write(b"".join(pl))
757 write(b"".join(pl))
758 for f, e in pycompat.iteritems(dmap):
758 for f, e in pycompat.iteritems(dmap):
759 if e.need_delay(now):
759 if e.need_delay(now):
760 # The file was last modified "simultaneously" with the current
760 # The file was last modified "simultaneously" with the current
761 # write to dirstate (i.e. within the same second for file-
761 # write to dirstate (i.e. within the same second for file-
762 # systems with a granularity of 1 sec). This commonly happens
762 # systems with a granularity of 1 sec). This commonly happens
763 # for at least a couple of files on 'update'.
763 # for at least a couple of files on 'update'.
764 # The user could change the file without changing its size
764 # The user could change the file without changing its size
765 # within the same second. Invalidate the file's mtime in
765 # within the same second. Invalidate the file's mtime in
766 # dirstate, forcing future 'status' calls to compare the
766 # dirstate, forcing future 'status' calls to compare the
767 # contents of the file if the size is the same. This prevents
767 # contents of the file if the size is the same. This prevents
768 # mistakenly treating such files as clean.
768 # mistakenly treating such files as clean.
769 e.set_possibly_dirty()
769 e.set_possibly_dirty()
770
770
771 if f in copymap:
771 if f in copymap:
772 f = b"%s\0%s" % (f, copymap[f])
772 f = b"%s\0%s" % (f, copymap[f])
773 e = _pack(
773 e = _pack(
774 b">cllll",
774 b">cllll",
775 e.v1_state(),
775 e.v1_state(),
776 e.v1_mode(),
776 e.v1_mode(),
777 e.v1_size(),
777 e.v1_size(),
778 e.v1_mtime(),
778 e.v1_mtime(),
779 len(f),
779 len(f),
780 )
780 )
781 write(e)
781 write(e)
782 write(f)
782 write(f)
783 return cs.getvalue()
783 return cs.getvalue()
@@ -1,428 +1,431 b''
1 use crate::errors::HgError;
1 use crate::errors::HgError;
2 use bitflags::bitflags;
2 use bitflags::bitflags;
3 use std::convert::TryFrom;
3 use std::convert::TryFrom;
4
4
5 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
5 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
6 pub enum EntryState {
6 pub enum EntryState {
7 Normal,
7 Normal,
8 Added,
8 Added,
9 Removed,
9 Removed,
10 Merged,
10 Merged,
11 }
11 }
12
12
13 /// The C implementation uses all signed types. This will be an issue
13 /// The C implementation uses all signed types. This will be an issue
14 /// either when 4GB+ source files are commonplace or in 2038, whichever
14 /// either when 4GB+ source files are commonplace or in 2038, whichever
15 /// comes first.
15 /// comes first.
16 #[derive(Debug, PartialEq, Copy, Clone)]
16 #[derive(Debug, PartialEq, Copy, Clone)]
17 pub struct DirstateEntry {
17 pub struct DirstateEntry {
18 pub(crate) flags: Flags,
18 pub(crate) flags: Flags,
19 mode_size: Option<(i32, i32)>,
19 mode_size: Option<(i32, i32)>,
20 mtime: Option<i32>,
20 mtime: Option<i32>,
21 }
21 }
22
22
23 bitflags! {
23 bitflags! {
24 pub(crate) struct Flags: u8 {
24 pub(crate) struct Flags: u8 {
25 const WDIR_TRACKED = 1 << 0;
25 const WDIR_TRACKED = 1 << 0;
26 const P1_TRACKED = 1 << 1;
26 const P1_TRACKED = 1 << 1;
27 const P2_INFO = 1 << 2;
27 const P2_INFO = 1 << 2;
28 }
28 }
29 }
29 }
30
30
31 pub const V1_RANGEMASK: i32 = 0x7FFFFFFF;
31 pub const V1_RANGEMASK: i32 = 0x7FFFFFFF;
32
32
33 pub const MTIME_UNSET: i32 = -1;
33 pub const MTIME_UNSET: i32 = -1;
34
34
35 /// A `DirstateEntry` with a size of `-2` means that it was merged from the
35 /// A `DirstateEntry` with a size of `-2` means that it was merged from the
36 /// other parent. This allows revert to pick the right status back during a
36 /// other parent. This allows revert to pick the right status back during a
37 /// merge.
37 /// merge.
38 pub const SIZE_FROM_OTHER_PARENT: i32 = -2;
38 pub const SIZE_FROM_OTHER_PARENT: i32 = -2;
39 /// A special value used for internal representation of special case in
39 /// A special value used for internal representation of special case in
40 /// dirstate v1 format.
40 /// dirstate v1 format.
41 pub const SIZE_NON_NORMAL: i32 = -1;
41 pub const SIZE_NON_NORMAL: i32 = -1;
42
42
43 impl DirstateEntry {
43 impl DirstateEntry {
44 pub fn from_v2_data(
44 pub fn from_v2_data(
45 wdir_tracked: bool,
45 wdir_tracked: bool,
46 p1_tracked: bool,
46 p1_tracked: bool,
47 p2_info: bool,
47 p2_info: bool,
48 mode_size: Option<(i32, i32)>,
48 mode_size: Option<(i32, i32)>,
49 mtime: Option<i32>,
49 mtime: Option<i32>,
50 ) -> Self {
50 ) -> Self {
51 let mut flags = Flags::empty();
51 let mut flags = Flags::empty();
52 flags.set(Flags::WDIR_TRACKED, wdir_tracked);
52 flags.set(Flags::WDIR_TRACKED, wdir_tracked);
53 flags.set(Flags::P1_TRACKED, p1_tracked);
53 flags.set(Flags::P1_TRACKED, p1_tracked);
54 flags.set(Flags::P2_INFO, p2_info);
54 flags.set(Flags::P2_INFO, p2_info);
55 Self {
55 Self {
56 flags,
56 flags,
57 mode_size,
57 mode_size,
58 mtime,
58 mtime,
59 }
59 }
60 }
60 }
61
61
62 pub fn from_v1_data(
62 pub fn from_v1_data(
63 state: EntryState,
63 state: EntryState,
64 mode: i32,
64 mode: i32,
65 size: i32,
65 size: i32,
66 mtime: i32,
66 mtime: i32,
67 ) -> Self {
67 ) -> Self {
68 match state {
68 match state {
69 EntryState::Normal => {
69 EntryState::Normal => {
70 if size == SIZE_FROM_OTHER_PARENT {
70 if size == SIZE_FROM_OTHER_PARENT {
71 Self::new_from_p2()
71 Self::new_from_p2()
72 } else if size == SIZE_NON_NORMAL {
72 } else if size == SIZE_NON_NORMAL {
73 Self::new_possibly_dirty()
73 Self::new_possibly_dirty()
74 } else if mtime == MTIME_UNSET {
74 } else if mtime == MTIME_UNSET {
75 Self {
75 Self {
76 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
76 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
77 mode_size: Some((mode, size)),
77 mode_size: Some((mode, size)),
78 mtime: None,
78 mtime: None,
79 }
79 }
80 } else {
80 } else {
81 Self::new_normal(mode, size, mtime)
81 Self::new_normal(mode, size, mtime)
82 }
82 }
83 }
83 }
84 EntryState::Added => Self::new_added(),
84 EntryState::Added => Self::new_added(),
85 EntryState::Removed => Self {
85 EntryState::Removed => Self {
86 flags: if size == SIZE_NON_NORMAL {
86 flags: if size == SIZE_NON_NORMAL {
87 Flags::P1_TRACKED | Flags::P2_INFO
87 Flags::P1_TRACKED | Flags::P2_INFO
88 } else if size == SIZE_FROM_OTHER_PARENT {
88 } else if size == SIZE_FROM_OTHER_PARENT {
89 // We don’t know if P1_TRACKED should be set (file history)
89 // We don’t know if P1_TRACKED should be set (file history)
90 Flags::P2_INFO
90 Flags::P2_INFO
91 } else {
91 } else {
92 Flags::P1_TRACKED
92 Flags::P1_TRACKED
93 },
93 },
94 mode_size: None,
94 mode_size: None,
95 mtime: None,
95 mtime: None,
96 },
96 },
97 EntryState::Merged => Self::new_merged(),
97 EntryState::Merged => Self::new_merged(),
98 }
98 }
99 }
99 }
100
100
101 pub fn new_from_p2() -> Self {
101 pub fn new_from_p2() -> Self {
102 Self {
102 Self {
103 // might be missing P1_TRACKED
103 // might be missing P1_TRACKED
104 flags: Flags::WDIR_TRACKED | Flags::P2_INFO,
104 flags: Flags::WDIR_TRACKED | Flags::P2_INFO,
105 mode_size: None,
105 mode_size: None,
106 mtime: None,
106 mtime: None,
107 }
107 }
108 }
108 }
109
109
110 pub fn new_possibly_dirty() -> Self {
110 pub fn new_possibly_dirty() -> Self {
111 Self {
111 Self {
112 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
112 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
113 mode_size: None,
113 mode_size: None,
114 mtime: None,
114 mtime: None,
115 }
115 }
116 }
116 }
117
117
118 pub fn new_added() -> Self {
118 pub fn new_added() -> Self {
119 Self {
119 Self {
120 flags: Flags::WDIR_TRACKED,
120 flags: Flags::WDIR_TRACKED,
121 mode_size: None,
121 mode_size: None,
122 mtime: None,
122 mtime: None,
123 }
123 }
124 }
124 }
125
125
126 pub fn new_merged() -> Self {
126 pub fn new_merged() -> Self {
127 Self {
127 Self {
128 flags: Flags::WDIR_TRACKED
128 flags: Flags::WDIR_TRACKED
129 | Flags::P1_TRACKED // might not be true because of rename ?
129 | Flags::P1_TRACKED // might not be true because of rename ?
130 | Flags::P2_INFO, // might not be true because of rename ?
130 | Flags::P2_INFO, // might not be true because of rename ?
131 mode_size: None,
131 mode_size: None,
132 mtime: None,
132 mtime: None,
133 }
133 }
134 }
134 }
135
135
136 pub fn new_normal(mode: i32, size: i32, mtime: i32) -> Self {
136 pub fn new_normal(mode: i32, size: i32, mtime: i32) -> Self {
137 Self {
137 Self {
138 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
138 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
139 mode_size: Some((mode, size)),
139 mode_size: Some((mode, size)),
140 mtime: Some(mtime),
140 mtime: Some(mtime),
141 }
141 }
142 }
142 }
143
143
144 /// Creates a new entry in "removed" state.
144 /// Creates a new entry in "removed" state.
145 ///
145 ///
146 /// `size` is expected to be zero, `SIZE_NON_NORMAL`, or
146 /// `size` is expected to be zero, `SIZE_NON_NORMAL`, or
147 /// `SIZE_FROM_OTHER_PARENT`
147 /// `SIZE_FROM_OTHER_PARENT`
148 pub fn new_removed(size: i32) -> Self {
148 pub fn new_removed(size: i32) -> Self {
149 Self::from_v1_data(EntryState::Removed, 0, size, 0)
149 Self::from_v1_data(EntryState::Removed, 0, size, 0)
150 }
150 }
151
151
152 pub fn tracked(&self) -> bool {
152 pub fn tracked(&self) -> bool {
153 self.flags.contains(Flags::WDIR_TRACKED)
153 self.flags.contains(Flags::WDIR_TRACKED)
154 }
154 }
155
155
156 pub fn p1_tracked(&self) -> bool {
156 pub fn p1_tracked(&self) -> bool {
157 self.flags.contains(Flags::P1_TRACKED)
157 self.flags.contains(Flags::P1_TRACKED)
158 }
158 }
159
159
160 fn in_either_parent(&self) -> bool {
160 fn in_either_parent(&self) -> bool {
161 self.flags.intersects(Flags::P1_TRACKED | Flags::P2_INFO)
161 self.flags.intersects(Flags::P1_TRACKED | Flags::P2_INFO)
162 }
162 }
163
163
164 pub fn removed(&self) -> bool {
164 pub fn removed(&self) -> bool {
165 self.in_either_parent() && !self.flags.contains(Flags::WDIR_TRACKED)
165 self.in_either_parent() && !self.flags.contains(Flags::WDIR_TRACKED)
166 }
166 }
167
167
168 pub fn p2_info(&self) -> bool {
168 pub fn p2_info(&self) -> bool {
169 self.flags.contains(Flags::WDIR_TRACKED | Flags::P2_INFO)
169 self.flags.contains(Flags::WDIR_TRACKED | Flags::P2_INFO)
170 }
170 }
171
171
172 pub fn merged(&self) -> bool {
172 pub fn merged(&self) -> bool {
173 self.flags
173 self.flags
174 .contains(Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO)
174 .contains(Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO)
175 }
175 }
176
176
177 pub fn added(&self) -> bool {
177 pub fn added(&self) -> bool {
178 self.flags.contains(Flags::WDIR_TRACKED) && !self.in_either_parent()
178 self.flags.contains(Flags::WDIR_TRACKED) && !self.in_either_parent()
179 }
179 }
180
180
181 pub fn maybe_clean(&self) -> bool {
181 pub fn maybe_clean(&self) -> bool {
182 if !self.flags.contains(Flags::WDIR_TRACKED) {
182 if !self.flags.contains(Flags::WDIR_TRACKED) {
183 false
183 false
184 } else if !self.flags.contains(Flags::P1_TRACKED) {
184 } else if !self.flags.contains(Flags::P1_TRACKED) {
185 false
185 false
186 } else if self.flags.contains(Flags::P2_INFO) {
186 } else if self.flags.contains(Flags::P2_INFO) {
187 false
187 false
188 } else {
188 } else {
189 true
189 true
190 }
190 }
191 }
191 }
192
192
193 pub fn any_tracked(&self) -> bool {
193 pub fn any_tracked(&self) -> bool {
194 self.flags.intersects(
194 self.flags.intersects(
195 Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO,
195 Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO,
196 )
196 )
197 }
197 }
198
198
199 /// Returns `(wdir_tracked, p1_tracked, p2_info, mode_size, mtime)`
199 /// Returns `(wdir_tracked, p1_tracked, p2_info, mode_size, mtime)`
200 pub(crate) fn v2_data(
200 pub(crate) fn v2_data(
201 &self,
201 &self,
202 ) -> (bool, bool, bool, Option<(i32, i32)>, Option<i32>) {
202 ) -> (bool, bool, bool, Option<(i32, i32)>, Option<i32>) {
203 if !self.any_tracked() {
203 if !self.any_tracked() {
204 // TODO: return an Option instead?
204 // TODO: return an Option instead?
205 panic!("Accessing v1_state of an untracked DirstateEntry")
205 panic!("Accessing v1_state of an untracked DirstateEntry")
206 }
206 }
207 let wdir_tracked = self.flags.contains(Flags::WDIR_TRACKED);
207 let wdir_tracked = self.flags.contains(Flags::WDIR_TRACKED);
208 let p1_tracked = self.flags.contains(Flags::P1_TRACKED);
208 let p1_tracked = self.flags.contains(Flags::P1_TRACKED);
209 let p2_info = self.flags.contains(Flags::P2_INFO);
209 let p2_info = self.flags.contains(Flags::P2_INFO);
210 let mode_size = self.mode_size;
210 let mode_size = self.mode_size;
211 let mtime = self.mtime;
211 let mtime = self.mtime;
212 (wdir_tracked, p1_tracked, p2_info, mode_size, mtime)
212 (wdir_tracked, p1_tracked, p2_info, mode_size, mtime)
213 }
213 }
214
214
215 fn v1_state(&self) -> EntryState {
215 fn v1_state(&self) -> EntryState {
216 if !self.any_tracked() {
216 if !self.any_tracked() {
217 // TODO: return an Option instead?
217 // TODO: return an Option instead?
218 panic!("Accessing v1_state of an untracked DirstateEntry")
218 panic!("Accessing v1_state of an untracked DirstateEntry")
219 }
219 }
220 if self.removed() {
220 if self.removed() {
221 EntryState::Removed
221 EntryState::Removed
222 } else if self.merged() {
222 } else if self
223 .flags
224 .contains(Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO)
225 {
223 EntryState::Merged
226 EntryState::Merged
224 } else if self.added() {
227 } else if self.added() {
225 EntryState::Added
228 EntryState::Added
226 } else {
229 } else {
227 EntryState::Normal
230 EntryState::Normal
228 }
231 }
229 }
232 }
230
233
231 fn v1_mode(&self) -> i32 {
234 fn v1_mode(&self) -> i32 {
232 if let Some((mode, _size)) = self.mode_size {
235 if let Some((mode, _size)) = self.mode_size {
233 mode
236 mode
234 } else {
237 } else {
235 0
238 0
236 }
239 }
237 }
240 }
238
241
239 fn v1_size(&self) -> i32 {
242 fn v1_size(&self) -> i32 {
240 if !self.any_tracked() {
243 if !self.any_tracked() {
241 // TODO: return an Option instead?
244 // TODO: return an Option instead?
242 panic!("Accessing v1_size of an untracked DirstateEntry")
245 panic!("Accessing v1_size of an untracked DirstateEntry")
243 }
246 }
244 if self.removed()
247 if self.removed()
245 && self.flags.contains(Flags::P1_TRACKED | Flags::P2_INFO)
248 && self.flags.contains(Flags::P1_TRACKED | Flags::P2_INFO)
246 {
249 {
247 SIZE_NON_NORMAL
250 SIZE_NON_NORMAL
248 } else if self.flags.contains(Flags::P2_INFO) {
251 } else if self.flags.contains(Flags::P2_INFO) {
249 SIZE_FROM_OTHER_PARENT
252 SIZE_FROM_OTHER_PARENT
250 } else if self.removed() {
253 } else if self.removed() {
251 0
254 0
252 } else if self.added() {
255 } else if self.added() {
253 SIZE_NON_NORMAL
256 SIZE_NON_NORMAL
254 } else if let Some((_mode, size)) = self.mode_size {
257 } else if let Some((_mode, size)) = self.mode_size {
255 size
258 size
256 } else {
259 } else {
257 SIZE_NON_NORMAL
260 SIZE_NON_NORMAL
258 }
261 }
259 }
262 }
260
263
261 fn v1_mtime(&self) -> i32 {
264 fn v1_mtime(&self) -> i32 {
262 if !self.any_tracked() {
265 if !self.any_tracked() {
263 // TODO: return an Option instead?
266 // TODO: return an Option instead?
264 panic!("Accessing v1_mtime of an untracked DirstateEntry")
267 panic!("Accessing v1_mtime of an untracked DirstateEntry")
265 }
268 }
266 if self.removed() {
269 if self.removed() {
267 0
270 0
268 } else if self.flags.contains(Flags::P2_INFO) {
271 } else if self.flags.contains(Flags::P2_INFO) {
269 MTIME_UNSET
272 MTIME_UNSET
270 } else if !self.flags.contains(Flags::P1_TRACKED) {
273 } else if !self.flags.contains(Flags::P1_TRACKED) {
271 MTIME_UNSET
274 MTIME_UNSET
272 } else {
275 } else {
273 self.mtime.unwrap_or(MTIME_UNSET)
276 self.mtime.unwrap_or(MTIME_UNSET)
274 }
277 }
275 }
278 }
276
279
277 // TODO: return `Option<EntryState>`? None when `!self.any_tracked`
280 // TODO: return `Option<EntryState>`? None when `!self.any_tracked`
278 pub fn state(&self) -> EntryState {
281 pub fn state(&self) -> EntryState {
279 self.v1_state()
282 self.v1_state()
280 }
283 }
281
284
282 // TODO: return Option?
285 // TODO: return Option?
283 pub fn mode(&self) -> i32 {
286 pub fn mode(&self) -> i32 {
284 self.v1_mode()
287 self.v1_mode()
285 }
288 }
286
289
287 // TODO: return Option?
290 // TODO: return Option?
288 pub fn size(&self) -> i32 {
291 pub fn size(&self) -> i32 {
289 self.v1_size()
292 self.v1_size()
290 }
293 }
291
294
292 // TODO: return Option?
295 // TODO: return Option?
293 pub fn mtime(&self) -> i32 {
296 pub fn mtime(&self) -> i32 {
294 self.v1_mtime()
297 self.v1_mtime()
295 }
298 }
296
299
297 pub fn drop_merge_data(&mut self) {
300 pub fn drop_merge_data(&mut self) {
298 if self.flags.contains(Flags::P2_INFO) {
301 if self.flags.contains(Flags::P2_INFO) {
299 self.flags.remove(Flags::P2_INFO);
302 self.flags.remove(Flags::P2_INFO);
300 self.mode_size = None;
303 self.mode_size = None;
301 self.mtime = None;
304 self.mtime = None;
302 }
305 }
303 }
306 }
304
307
305 pub fn set_possibly_dirty(&mut self) {
308 pub fn set_possibly_dirty(&mut self) {
306 self.mtime = None
309 self.mtime = None
307 }
310 }
308
311
309 pub fn set_clean(&mut self, mode: i32, size: i32, mtime: i32) {
312 pub fn set_clean(&mut self, mode: i32, size: i32, mtime: i32) {
310 self.flags.insert(Flags::WDIR_TRACKED | Flags::P1_TRACKED);
313 self.flags.insert(Flags::WDIR_TRACKED | Flags::P1_TRACKED);
311 self.mode_size = Some((mode, size));
314 self.mode_size = Some((mode, size));
312 self.mtime = Some(mtime);
315 self.mtime = Some(mtime);
313 }
316 }
314
317
315 pub fn set_tracked(&mut self) {
318 pub fn set_tracked(&mut self) {
316 self.flags.insert(Flags::WDIR_TRACKED);
319 self.flags.insert(Flags::WDIR_TRACKED);
317 // `set_tracked` is replacing various `normallookup` call. So we mark
320 // `set_tracked` is replacing various `normallookup` call. So we mark
318 // the files as needing lookup
321 // the files as needing lookup
319 //
322 //
320 // Consider dropping this in the future in favor of something less
323 // Consider dropping this in the future in favor of something less
321 // broad.
324 // broad.
322 self.mtime = None;
325 self.mtime = None;
323 }
326 }
324
327
325 pub fn set_untracked(&mut self) {
328 pub fn set_untracked(&mut self) {
326 self.flags.remove(Flags::WDIR_TRACKED);
329 self.flags.remove(Flags::WDIR_TRACKED);
327 self.mode_size = None;
330 self.mode_size = None;
328 self.mtime = None;
331 self.mtime = None;
329 }
332 }
330
333
331 /// Returns `(state, mode, size, mtime)` for the puprose of serialization
334 /// Returns `(state, mode, size, mtime)` for the puprose of serialization
332 /// in the dirstate-v1 format.
335 /// in the dirstate-v1 format.
333 ///
336 ///
334 /// This includes marker values such as `mtime == -1`. In the future we may
337 /// This includes marker values such as `mtime == -1`. In the future we may
335 /// want to not represent these cases that way in memory, but serialization
338 /// want to not represent these cases that way in memory, but serialization
336 /// will need to keep the same format.
339 /// will need to keep the same format.
337 pub fn v1_data(&self) -> (u8, i32, i32, i32) {
340 pub fn v1_data(&self) -> (u8, i32, i32, i32) {
338 (
341 (
339 self.v1_state().into(),
342 self.v1_state().into(),
340 self.v1_mode(),
343 self.v1_mode(),
341 self.v1_size(),
344 self.v1_size(),
342 self.v1_mtime(),
345 self.v1_mtime(),
343 )
346 )
344 }
347 }
345
348
346 pub(crate) fn is_from_other_parent(&self) -> bool {
349 pub(crate) fn is_from_other_parent(&self) -> bool {
347 self.state() == EntryState::Normal
350 self.state() == EntryState::Normal
348 && self.size() == SIZE_FROM_OTHER_PARENT
351 && self.size() == SIZE_FROM_OTHER_PARENT
349 }
352 }
350
353
351 // TODO: other platforms
354 // TODO: other platforms
352 #[cfg(unix)]
355 #[cfg(unix)]
353 pub fn mode_changed(
356 pub fn mode_changed(
354 &self,
357 &self,
355 filesystem_metadata: &std::fs::Metadata,
358 filesystem_metadata: &std::fs::Metadata,
356 ) -> bool {
359 ) -> bool {
357 use std::os::unix::fs::MetadataExt;
360 use std::os::unix::fs::MetadataExt;
358 const EXEC_BIT_MASK: u32 = 0o100;
361 const EXEC_BIT_MASK: u32 = 0o100;
359 let dirstate_exec_bit = (self.mode() as u32) & EXEC_BIT_MASK;
362 let dirstate_exec_bit = (self.mode() as u32) & EXEC_BIT_MASK;
360 let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK;
363 let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK;
361 dirstate_exec_bit != fs_exec_bit
364 dirstate_exec_bit != fs_exec_bit
362 }
365 }
363
366
364 /// Returns a `(state, mode, size, mtime)` tuple as for
367 /// Returns a `(state, mode, size, mtime)` tuple as for
365 /// `DirstateMapMethods::debug_iter`.
368 /// `DirstateMapMethods::debug_iter`.
366 pub fn debug_tuple(&self) -> (u8, i32, i32, i32) {
369 pub fn debug_tuple(&self) -> (u8, i32, i32, i32) {
367 (self.state().into(), self.mode(), self.size(), self.mtime())
370 (self.state().into(), self.mode(), self.size(), self.mtime())
368 }
371 }
369
372
370 pub fn mtime_is_ambiguous(&self, now: i32) -> bool {
373 pub fn mtime_is_ambiguous(&self, now: i32) -> bool {
371 self.state() == EntryState::Normal && self.mtime() == now
374 self.state() == EntryState::Normal && self.mtime() == now
372 }
375 }
373
376
374 pub fn clear_ambiguous_mtime(&mut self, now: i32) -> bool {
377 pub fn clear_ambiguous_mtime(&mut self, now: i32) -> bool {
375 let ambiguous = self.mtime_is_ambiguous(now);
378 let ambiguous = self.mtime_is_ambiguous(now);
376 if ambiguous {
379 if ambiguous {
377 // The file was last modified "simultaneously" with the current
380 // The file was last modified "simultaneously" with the current
378 // write to dirstate (i.e. within the same second for file-
381 // write to dirstate (i.e. within the same second for file-
379 // systems with a granularity of 1 sec). This commonly happens
382 // systems with a granularity of 1 sec). This commonly happens
380 // for at least a couple of files on 'update'.
383 // for at least a couple of files on 'update'.
381 // The user could change the file without changing its size
384 // The user could change the file without changing its size
382 // within the same second. Invalidate the file's mtime in
385 // within the same second. Invalidate the file's mtime in
383 // dirstate, forcing future 'status' calls to compare the
386 // dirstate, forcing future 'status' calls to compare the
384 // contents of the file if the size is the same. This prevents
387 // contents of the file if the size is the same. This prevents
385 // mistakenly treating such files as clean.
388 // mistakenly treating such files as clean.
386 self.set_possibly_dirty()
389 self.set_possibly_dirty()
387 }
390 }
388 ambiguous
391 ambiguous
389 }
392 }
390 }
393 }
391
394
392 impl EntryState {
395 impl EntryState {
393 pub fn is_tracked(self) -> bool {
396 pub fn is_tracked(self) -> bool {
394 use EntryState::*;
397 use EntryState::*;
395 match self {
398 match self {
396 Normal | Added | Merged => true,
399 Normal | Added | Merged => true,
397 Removed => false,
400 Removed => false,
398 }
401 }
399 }
402 }
400 }
403 }
401
404
402 impl TryFrom<u8> for EntryState {
405 impl TryFrom<u8> for EntryState {
403 type Error = HgError;
406 type Error = HgError;
404
407
405 fn try_from(value: u8) -> Result<Self, Self::Error> {
408 fn try_from(value: u8) -> Result<Self, Self::Error> {
406 match value {
409 match value {
407 b'n' => Ok(EntryState::Normal),
410 b'n' => Ok(EntryState::Normal),
408 b'a' => Ok(EntryState::Added),
411 b'a' => Ok(EntryState::Added),
409 b'r' => Ok(EntryState::Removed),
412 b'r' => Ok(EntryState::Removed),
410 b'm' => Ok(EntryState::Merged),
413 b'm' => Ok(EntryState::Merged),
411 _ => Err(HgError::CorruptedRepository(format!(
414 _ => Err(HgError::CorruptedRepository(format!(
412 "Incorrect dirstate entry state {}",
415 "Incorrect dirstate entry state {}",
413 value
416 value
414 ))),
417 ))),
415 }
418 }
416 }
419 }
417 }
420 }
418
421
419 impl Into<u8> for EntryState {
422 impl Into<u8> for EntryState {
420 fn into(self) -> u8 {
423 fn into(self) -> u8 {
421 match self {
424 match self {
422 EntryState::Normal => b'n',
425 EntryState::Normal => b'n',
423 EntryState::Added => b'a',
426 EntryState::Added => b'a',
424 EntryState::Removed => b'r',
427 EntryState::Removed => b'r',
425 EntryState::Merged => b'm',
428 EntryState::Merged => b'm',
426 }
429 }
427 }
430 }
428 }
431 }
General Comments 0
You need to be logged in to leave comments. Login now