##// END OF EJS Templates
dirstate-item: replace call to new_merged...
marmoute -
r48966:7a8c9869 default
parent child Browse files
Show More
@@ -1,775 +1,775
1 # parsers.py - Python implementation of parsers.c
1 # parsers.py - Python implementation of parsers.c
2 #
2 #
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11 import zlib
11 import zlib
12
12
13 from ..node import (
13 from ..node import (
14 nullrev,
14 nullrev,
15 sha1nodeconstants,
15 sha1nodeconstants,
16 )
16 )
17 from ..thirdparty import attr
17 from ..thirdparty import attr
18 from .. import (
18 from .. import (
19 error,
19 error,
20 pycompat,
20 pycompat,
21 revlogutils,
21 revlogutils,
22 util,
22 util,
23 )
23 )
24
24
25 from ..revlogutils import nodemap as nodemaputil
25 from ..revlogutils import nodemap as nodemaputil
26 from ..revlogutils import constants as revlog_constants
26 from ..revlogutils import constants as revlog_constants
27
27
28 stringio = pycompat.bytesio
28 stringio = pycompat.bytesio
29
29
30
30
31 _pack = struct.pack
31 _pack = struct.pack
32 _unpack = struct.unpack
32 _unpack = struct.unpack
33 _compress = zlib.compress
33 _compress = zlib.compress
34 _decompress = zlib.decompress
34 _decompress = zlib.decompress
35
35
36
36
37 # a special value used internally for `size` if the file come from the other parent
37 # a special value used internally for `size` if the file come from the other parent
38 FROM_P2 = -2
38 FROM_P2 = -2
39
39
40 # a special value used internally for `size` if the file is modified/merged/added
40 # a special value used internally for `size` if the file is modified/merged/added
41 NONNORMAL = -1
41 NONNORMAL = -1
42
42
43 # a special value used internally for `time` if the time is ambigeous
43 # a special value used internally for `time` if the time is ambigeous
44 AMBIGUOUS_TIME = -1
44 AMBIGUOUS_TIME = -1
45
45
46
46
47 @attr.s(slots=True, init=False)
47 @attr.s(slots=True, init=False)
48 class DirstateItem(object):
48 class DirstateItem(object):
49 """represent a dirstate entry
49 """represent a dirstate entry
50
50
51 It hold multiple attributes
51 It hold multiple attributes
52
52
53 # about file tracking
53 # about file tracking
54 - wc_tracked: is the file tracked by the working copy
54 - wc_tracked: is the file tracked by the working copy
55 - p1_tracked: is the file tracked in working copy first parent
55 - p1_tracked: is the file tracked in working copy first parent
56 - p2_info: the file has been involved in some merge operation. Either
56 - p2_info: the file has been involved in some merge operation. Either
57 because it was actually merged, or because the p2 version was
57 because it was actually merged, or because the p2 version was
58 ahead, or because some renamed moved it there. In either case
58 ahead, or because some renamed moved it there. In either case
59 `hg status` will want it displayed as modified.
59 `hg status` will want it displayed as modified.
60
60
61 # about the file state expected from p1 manifest:
61 # about the file state expected from p1 manifest:
62 - mode: the file mode in p1
62 - mode: the file mode in p1
63 - size: the file size in p1
63 - size: the file size in p1
64
64
65 These value can be set to None, which mean we don't have a meaningful value
65 These value can be set to None, which mean we don't have a meaningful value
66 to compare with. Either because we don't really care about them as there
66 to compare with. Either because we don't really care about them as there
67 `status` is known without having to look at the disk or because we don't
67 `status` is known without having to look at the disk or because we don't
68 know these right now and a full comparison will be needed to find out if
68 know these right now and a full comparison will be needed to find out if
69 the file is clean.
69 the file is clean.
70
70
71 # about the file state on disk last time we saw it:
71 # about the file state on disk last time we saw it:
72 - mtime: the last known clean mtime for the file.
72 - mtime: the last known clean mtime for the file.
73
73
74 This value can be set to None if no cachable state exist. Either because we
74 This value can be set to None if no cachable state exist. Either because we
75 do not care (see previous section) or because we could not cache something
75 do not care (see previous section) or because we could not cache something
76 yet.
76 yet.
77 """
77 """
78
78
79 _wc_tracked = attr.ib()
79 _wc_tracked = attr.ib()
80 _p1_tracked = attr.ib()
80 _p1_tracked = attr.ib()
81 _p2_info = attr.ib()
81 _p2_info = attr.ib()
82 _mode = attr.ib()
82 _mode = attr.ib()
83 _size = attr.ib()
83 _size = attr.ib()
84 _mtime = attr.ib()
84 _mtime = attr.ib()
85
85
86 def __init__(
86 def __init__(
87 self,
87 self,
88 wc_tracked=False,
88 wc_tracked=False,
89 p1_tracked=False,
89 p1_tracked=False,
90 p2_info=False,
90 p2_info=False,
91 has_meaningful_data=True,
91 has_meaningful_data=True,
92 has_meaningful_mtime=True,
92 has_meaningful_mtime=True,
93 parentfiledata=None,
93 parentfiledata=None,
94 ):
94 ):
95 self._wc_tracked = wc_tracked
95 self._wc_tracked = wc_tracked
96 self._p1_tracked = p1_tracked
96 self._p1_tracked = p1_tracked
97 self._p2_info = p2_info
97 self._p2_info = p2_info
98
98
99 self._mode = None
99 self._mode = None
100 self._size = None
100 self._size = None
101 self._mtime = None
101 self._mtime = None
102 if parentfiledata is None:
102 if parentfiledata is None:
103 has_meaningful_mtime = False
103 has_meaningful_mtime = False
104 has_meaningful_data = False
104 has_meaningful_data = False
105 if has_meaningful_data:
105 if has_meaningful_data:
106 self._mode = parentfiledata[0]
106 self._mode = parentfiledata[0]
107 self._size = parentfiledata[1]
107 self._size = parentfiledata[1]
108 if has_meaningful_mtime:
108 if has_meaningful_mtime:
109 self._mtime = parentfiledata[2]
109 self._mtime = parentfiledata[2]
110
110
111 @classmethod
111 @classmethod
112 def new_added(cls):
112 def new_added(cls):
113 """constructor to help legacy API to build a new "added" item
113 """constructor to help legacy API to build a new "added" item
114
114
115 Should eventually be removed
115 Should eventually be removed
116 """
116 """
117 return cls(wc_tracked=True)
117 return cls(wc_tracked=True)
118
118
119 @classmethod
119 @classmethod
120 def new_merged(cls):
120 def new_merged(cls):
121 """constructor to help legacy API to build a new "merged" item
121 """constructor to help legacy API to build a new "merged" item
122
122
123 Should eventually be removed
123 Should eventually be removed
124 """
124 """
125 return cls(wc_tracked=True, p1_tracked=True, p2_info=True)
125 return cls(wc_tracked=True, p1_tracked=True, p2_info=True)
126
126
127 @classmethod
127 @classmethod
128 def new_from_p2(cls):
128 def new_from_p2(cls):
129 """constructor to help legacy API to build a new "from_p2" item
129 """constructor to help legacy API to build a new "from_p2" item
130
130
131 Should eventually be removed
131 Should eventually be removed
132 """
132 """
133 return cls(wc_tracked=True, p2_info=True)
133 return cls(wc_tracked=True, p2_info=True)
134
134
135 @classmethod
135 @classmethod
136 def new_possibly_dirty(cls):
136 def new_possibly_dirty(cls):
137 """constructor to help legacy API to build a new "possibly_dirty" item
137 """constructor to help legacy API to build a new "possibly_dirty" item
138
138
139 Should eventually be removed
139 Should eventually be removed
140 """
140 """
141 return cls(wc_tracked=True, p1_tracked=True)
141 return cls(wc_tracked=True, p1_tracked=True)
142
142
143 @classmethod
143 @classmethod
144 def new_normal(cls, mode, size, mtime):
144 def new_normal(cls, mode, size, mtime):
145 """constructor to help legacy API to build a new "normal" item
145 """constructor to help legacy API to build a new "normal" item
146
146
147 Should eventually be removed
147 Should eventually be removed
148 """
148 """
149 assert size != FROM_P2
149 assert size != FROM_P2
150 assert size != NONNORMAL
150 assert size != NONNORMAL
151 return cls(
151 return cls(
152 wc_tracked=True,
152 wc_tracked=True,
153 p1_tracked=True,
153 p1_tracked=True,
154 parentfiledata=(mode, size, mtime),
154 parentfiledata=(mode, size, mtime),
155 )
155 )
156
156
157 @classmethod
157 @classmethod
158 def from_v1_data(cls, state, mode, size, mtime):
158 def from_v1_data(cls, state, mode, size, mtime):
159 """Build a new DirstateItem object from V1 data
159 """Build a new DirstateItem object from V1 data
160
160
161 Since the dirstate-v1 format is frozen, the signature of this function
161 Since the dirstate-v1 format is frozen, the signature of this function
162 is not expected to change, unlike the __init__ one.
162 is not expected to change, unlike the __init__ one.
163 """
163 """
164 if state == b'm':
164 if state == b'm':
165 return cls.new_merged()
165 return cls(wc_tracked=True, p1_tracked=True, p2_info=True)
166 elif state == b'a':
166 elif state == b'a':
167 return cls.new_added()
167 return cls.new_added()
168 elif state == b'r':
168 elif state == b'r':
169 if size == NONNORMAL:
169 if size == NONNORMAL:
170 p1_tracked = True
170 p1_tracked = True
171 p2_info = True
171 p2_info = True
172 elif size == FROM_P2:
172 elif size == FROM_P2:
173 p1_tracked = False
173 p1_tracked = False
174 p2_info = True
174 p2_info = True
175 else:
175 else:
176 p1_tracked = True
176 p1_tracked = True
177 p2_info = False
177 p2_info = False
178 return cls(p1_tracked=p1_tracked, p2_info=p2_info)
178 return cls(p1_tracked=p1_tracked, p2_info=p2_info)
179 elif state == b'n':
179 elif state == b'n':
180 if size == FROM_P2:
180 if size == FROM_P2:
181 return cls.new_from_p2()
181 return cls.new_from_p2()
182 elif size == NONNORMAL:
182 elif size == NONNORMAL:
183 return cls.new_possibly_dirty()
183 return cls.new_possibly_dirty()
184 elif mtime == AMBIGUOUS_TIME:
184 elif mtime == AMBIGUOUS_TIME:
185 instance = cls.new_normal(mode, size, 42)
185 instance = cls.new_normal(mode, size, 42)
186 instance._mtime = None
186 instance._mtime = None
187 return instance
187 return instance
188 else:
188 else:
189 return cls.new_normal(mode, size, mtime)
189 return cls.new_normal(mode, size, mtime)
190 else:
190 else:
191 raise RuntimeError(b'unknown state: %s' % state)
191 raise RuntimeError(b'unknown state: %s' % state)
192
192
193 def set_possibly_dirty(self):
193 def set_possibly_dirty(self):
194 """Mark a file as "possibly dirty"
194 """Mark a file as "possibly dirty"
195
195
196 This means the next status call will have to actually check its content
196 This means the next status call will have to actually check its content
197 to make sure it is correct.
197 to make sure it is correct.
198 """
198 """
199 self._mtime = None
199 self._mtime = None
200
200
201 def set_clean(self, mode, size, mtime):
201 def set_clean(self, mode, size, mtime):
202 """mark a file as "clean" cancelling potential "possibly dirty call"
202 """mark a file as "clean" cancelling potential "possibly dirty call"
203
203
204 Note: this function is a descendant of `dirstate.normal` and is
204 Note: this function is a descendant of `dirstate.normal` and is
205 currently expected to be call on "normal" entry only. There are not
205 currently expected to be call on "normal" entry only. There are not
206 reason for this to not change in the future as long as the ccode is
206 reason for this to not change in the future as long as the ccode is
207 updated to preserve the proper state of the non-normal files.
207 updated to preserve the proper state of the non-normal files.
208 """
208 """
209 self._wc_tracked = True
209 self._wc_tracked = True
210 self._p1_tracked = True
210 self._p1_tracked = True
211 self._mode = mode
211 self._mode = mode
212 self._size = size
212 self._size = size
213 self._mtime = mtime
213 self._mtime = mtime
214
214
215 def set_tracked(self):
215 def set_tracked(self):
216 """mark a file as tracked in the working copy
216 """mark a file as tracked in the working copy
217
217
218 This will ultimately be called by command like `hg add`.
218 This will ultimately be called by command like `hg add`.
219 """
219 """
220 self._wc_tracked = True
220 self._wc_tracked = True
221 # `set_tracked` is replacing various `normallookup` call. So we mark
221 # `set_tracked` is replacing various `normallookup` call. So we mark
222 # the files as needing lookup
222 # the files as needing lookup
223 #
223 #
224 # Consider dropping this in the future in favor of something less broad.
224 # Consider dropping this in the future in favor of something less broad.
225 self._mtime = None
225 self._mtime = None
226
226
227 def set_untracked(self):
227 def set_untracked(self):
228 """mark a file as untracked in the working copy
228 """mark a file as untracked in the working copy
229
229
230 This will ultimately be called by command like `hg remove`.
230 This will ultimately be called by command like `hg remove`.
231 """
231 """
232 self._wc_tracked = False
232 self._wc_tracked = False
233 self._mode = None
233 self._mode = None
234 self._size = None
234 self._size = None
235 self._mtime = None
235 self._mtime = None
236
236
237 def drop_merge_data(self):
237 def drop_merge_data(self):
238 """remove all "merge-only" from a DirstateItem
238 """remove all "merge-only" from a DirstateItem
239
239
240 This is to be call by the dirstatemap code when the second parent is dropped
240 This is to be call by the dirstatemap code when the second parent is dropped
241 """
241 """
242 if self._p2_info:
242 if self._p2_info:
243 self._p2_info = False
243 self._p2_info = False
244 self._mode = None
244 self._mode = None
245 self._size = None
245 self._size = None
246 self._mtime = None
246 self._mtime = None
247
247
248 @property
248 @property
249 def mode(self):
249 def mode(self):
250 return self.v1_mode()
250 return self.v1_mode()
251
251
252 @property
252 @property
253 def size(self):
253 def size(self):
254 return self.v1_size()
254 return self.v1_size()
255
255
256 @property
256 @property
257 def mtime(self):
257 def mtime(self):
258 return self.v1_mtime()
258 return self.v1_mtime()
259
259
260 @property
260 @property
261 def state(self):
261 def state(self):
262 """
262 """
263 States are:
263 States are:
264 n normal
264 n normal
265 m needs merging
265 m needs merging
266 r marked for removal
266 r marked for removal
267 a marked for addition
267 a marked for addition
268
268
269 XXX This "state" is a bit obscure and mostly a direct expression of the
269 XXX This "state" is a bit obscure and mostly a direct expression of the
270 dirstatev1 format. It would make sense to ultimately deprecate it in
270 dirstatev1 format. It would make sense to ultimately deprecate it in
271 favor of the more "semantic" attributes.
271 favor of the more "semantic" attributes.
272 """
272 """
273 if not self.any_tracked:
273 if not self.any_tracked:
274 return b'?'
274 return b'?'
275 return self.v1_state()
275 return self.v1_state()
276
276
277 @property
277 @property
278 def tracked(self):
278 def tracked(self):
279 """True is the file is tracked in the working copy"""
279 """True is the file is tracked in the working copy"""
280 return self._wc_tracked
280 return self._wc_tracked
281
281
282 @property
282 @property
283 def any_tracked(self):
283 def any_tracked(self):
284 """True is the file is tracked anywhere (wc or parents)"""
284 """True is the file is tracked anywhere (wc or parents)"""
285 return self._wc_tracked or self._p1_tracked or self._p2_info
285 return self._wc_tracked or self._p1_tracked or self._p2_info
286
286
287 @property
287 @property
288 def added(self):
288 def added(self):
289 """True if the file has been added"""
289 """True if the file has been added"""
290 return self._wc_tracked and not (self._p1_tracked or self._p2_info)
290 return self._wc_tracked and not (self._p1_tracked or self._p2_info)
291
291
292 @property
292 @property
293 def maybe_clean(self):
293 def maybe_clean(self):
294 """True if the file has a chance to be in the "clean" state"""
294 """True if the file has a chance to be in the "clean" state"""
295 if not self._wc_tracked:
295 if not self._wc_tracked:
296 return False
296 return False
297 elif not self._p1_tracked:
297 elif not self._p1_tracked:
298 return False
298 return False
299 elif self._p2_info:
299 elif self._p2_info:
300 return False
300 return False
301 return True
301 return True
302
302
303 @property
303 @property
304 def p1_tracked(self):
304 def p1_tracked(self):
305 """True if the file is tracked in the first parent manifest"""
305 """True if the file is tracked in the first parent manifest"""
306 return self._p1_tracked
306 return self._p1_tracked
307
307
308 @property
308 @property
309 def p2_info(self):
309 def p2_info(self):
310 """True if the file needed to merge or apply any input from p2
310 """True if the file needed to merge or apply any input from p2
311
311
312 See the class documentation for details.
312 See the class documentation for details.
313 """
313 """
314 return self._wc_tracked and self._p2_info
314 return self._wc_tracked and self._p2_info
315
315
316 @property
316 @property
317 def removed(self):
317 def removed(self):
318 """True if the file has been removed"""
318 """True if the file has been removed"""
319 return not self._wc_tracked and (self._p1_tracked or self._p2_info)
319 return not self._wc_tracked and (self._p1_tracked or self._p2_info)
320
320
321 def v1_state(self):
321 def v1_state(self):
322 """return a "state" suitable for v1 serialization"""
322 """return a "state" suitable for v1 serialization"""
323 if not self.any_tracked:
323 if not self.any_tracked:
324 # the object has no state to record, this is -currently-
324 # the object has no state to record, this is -currently-
325 # unsupported
325 # unsupported
326 raise RuntimeError('untracked item')
326 raise RuntimeError('untracked item')
327 elif self.removed:
327 elif self.removed:
328 return b'r'
328 return b'r'
329 elif self._p1_tracked and self._p2_info:
329 elif self._p1_tracked and self._p2_info:
330 return b'm'
330 return b'm'
331 elif self.added:
331 elif self.added:
332 return b'a'
332 return b'a'
333 else:
333 else:
334 return b'n'
334 return b'n'
335
335
336 def v1_mode(self):
336 def v1_mode(self):
337 """return a "mode" suitable for v1 serialization"""
337 """return a "mode" suitable for v1 serialization"""
338 return self._mode if self._mode is not None else 0
338 return self._mode if self._mode is not None else 0
339
339
340 def v1_size(self):
340 def v1_size(self):
341 """return a "size" suitable for v1 serialization"""
341 """return a "size" suitable for v1 serialization"""
342 if not self.any_tracked:
342 if not self.any_tracked:
343 # the object has no state to record, this is -currently-
343 # the object has no state to record, this is -currently-
344 # unsupported
344 # unsupported
345 raise RuntimeError('untracked item')
345 raise RuntimeError('untracked item')
346 elif self.removed and self._p1_tracked and self._p2_info:
346 elif self.removed and self._p1_tracked and self._p2_info:
347 return NONNORMAL
347 return NONNORMAL
348 elif self._p2_info:
348 elif self._p2_info:
349 return FROM_P2
349 return FROM_P2
350 elif self.removed:
350 elif self.removed:
351 return 0
351 return 0
352 elif self.added:
352 elif self.added:
353 return NONNORMAL
353 return NONNORMAL
354 elif self._size is None:
354 elif self._size is None:
355 return NONNORMAL
355 return NONNORMAL
356 else:
356 else:
357 return self._size
357 return self._size
358
358
359 def v1_mtime(self):
359 def v1_mtime(self):
360 """return a "mtime" suitable for v1 serialization"""
360 """return a "mtime" suitable for v1 serialization"""
361 if not self.any_tracked:
361 if not self.any_tracked:
362 # the object has no state to record, this is -currently-
362 # the object has no state to record, this is -currently-
363 # unsupported
363 # unsupported
364 raise RuntimeError('untracked item')
364 raise RuntimeError('untracked item')
365 elif self.removed:
365 elif self.removed:
366 return 0
366 return 0
367 elif self._mtime is None:
367 elif self._mtime is None:
368 return AMBIGUOUS_TIME
368 return AMBIGUOUS_TIME
369 elif self._p2_info:
369 elif self._p2_info:
370 return AMBIGUOUS_TIME
370 return AMBIGUOUS_TIME
371 elif not self._p1_tracked:
371 elif not self._p1_tracked:
372 return AMBIGUOUS_TIME
372 return AMBIGUOUS_TIME
373 else:
373 else:
374 return self._mtime
374 return self._mtime
375
375
376 def need_delay(self, now):
376 def need_delay(self, now):
377 """True if the stored mtime would be ambiguous with the current time"""
377 """True if the stored mtime would be ambiguous with the current time"""
378 return self.v1_state() == b'n' and self.v1_mtime() == now
378 return self.v1_state() == b'n' and self.v1_mtime() == now
379
379
380
380
381 def gettype(q):
381 def gettype(q):
382 return int(q & 0xFFFF)
382 return int(q & 0xFFFF)
383
383
384
384
385 class BaseIndexObject(object):
385 class BaseIndexObject(object):
386 # Can I be passed to an algorithme implemented in Rust ?
386 # Can I be passed to an algorithme implemented in Rust ?
387 rust_ext_compat = 0
387 rust_ext_compat = 0
388 # Format of an index entry according to Python's `struct` language
388 # Format of an index entry according to Python's `struct` language
389 index_format = revlog_constants.INDEX_ENTRY_V1
389 index_format = revlog_constants.INDEX_ENTRY_V1
390 # Size of a C unsigned long long int, platform independent
390 # Size of a C unsigned long long int, platform independent
391 big_int_size = struct.calcsize(b'>Q')
391 big_int_size = struct.calcsize(b'>Q')
392 # Size of a C long int, platform independent
392 # Size of a C long int, platform independent
393 int_size = struct.calcsize(b'>i')
393 int_size = struct.calcsize(b'>i')
394 # An empty index entry, used as a default value to be overridden, or nullrev
394 # An empty index entry, used as a default value to be overridden, or nullrev
395 null_item = (
395 null_item = (
396 0,
396 0,
397 0,
397 0,
398 0,
398 0,
399 -1,
399 -1,
400 -1,
400 -1,
401 -1,
401 -1,
402 -1,
402 -1,
403 sha1nodeconstants.nullid,
403 sha1nodeconstants.nullid,
404 0,
404 0,
405 0,
405 0,
406 revlog_constants.COMP_MODE_INLINE,
406 revlog_constants.COMP_MODE_INLINE,
407 revlog_constants.COMP_MODE_INLINE,
407 revlog_constants.COMP_MODE_INLINE,
408 )
408 )
409
409
410 @util.propertycache
410 @util.propertycache
411 def entry_size(self):
411 def entry_size(self):
412 return self.index_format.size
412 return self.index_format.size
413
413
414 @property
414 @property
415 def nodemap(self):
415 def nodemap(self):
416 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
416 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
417 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
417 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
418 return self._nodemap
418 return self._nodemap
419
419
420 @util.propertycache
420 @util.propertycache
421 def _nodemap(self):
421 def _nodemap(self):
422 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
422 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
423 for r in range(0, len(self)):
423 for r in range(0, len(self)):
424 n = self[r][7]
424 n = self[r][7]
425 nodemap[n] = r
425 nodemap[n] = r
426 return nodemap
426 return nodemap
427
427
428 def has_node(self, node):
428 def has_node(self, node):
429 """return True if the node exist in the index"""
429 """return True if the node exist in the index"""
430 return node in self._nodemap
430 return node in self._nodemap
431
431
432 def rev(self, node):
432 def rev(self, node):
433 """return a revision for a node
433 """return a revision for a node
434
434
435 If the node is unknown, raise a RevlogError"""
435 If the node is unknown, raise a RevlogError"""
436 return self._nodemap[node]
436 return self._nodemap[node]
437
437
438 def get_rev(self, node):
438 def get_rev(self, node):
439 """return a revision for a node
439 """return a revision for a node
440
440
441 If the node is unknown, return None"""
441 If the node is unknown, return None"""
442 return self._nodemap.get(node)
442 return self._nodemap.get(node)
443
443
444 def _stripnodes(self, start):
444 def _stripnodes(self, start):
445 if '_nodemap' in vars(self):
445 if '_nodemap' in vars(self):
446 for r in range(start, len(self)):
446 for r in range(start, len(self)):
447 n = self[r][7]
447 n = self[r][7]
448 del self._nodemap[n]
448 del self._nodemap[n]
449
449
450 def clearcaches(self):
450 def clearcaches(self):
451 self.__dict__.pop('_nodemap', None)
451 self.__dict__.pop('_nodemap', None)
452
452
453 def __len__(self):
453 def __len__(self):
454 return self._lgt + len(self._extra)
454 return self._lgt + len(self._extra)
455
455
456 def append(self, tup):
456 def append(self, tup):
457 if '_nodemap' in vars(self):
457 if '_nodemap' in vars(self):
458 self._nodemap[tup[7]] = len(self)
458 self._nodemap[tup[7]] = len(self)
459 data = self._pack_entry(len(self), tup)
459 data = self._pack_entry(len(self), tup)
460 self._extra.append(data)
460 self._extra.append(data)
461
461
462 def _pack_entry(self, rev, entry):
462 def _pack_entry(self, rev, entry):
463 assert entry[8] == 0
463 assert entry[8] == 0
464 assert entry[9] == 0
464 assert entry[9] == 0
465 return self.index_format.pack(*entry[:8])
465 return self.index_format.pack(*entry[:8])
466
466
467 def _check_index(self, i):
467 def _check_index(self, i):
468 if not isinstance(i, int):
468 if not isinstance(i, int):
469 raise TypeError(b"expecting int indexes")
469 raise TypeError(b"expecting int indexes")
470 if i < 0 or i >= len(self):
470 if i < 0 or i >= len(self):
471 raise IndexError
471 raise IndexError
472
472
473 def __getitem__(self, i):
473 def __getitem__(self, i):
474 if i == -1:
474 if i == -1:
475 return self.null_item
475 return self.null_item
476 self._check_index(i)
476 self._check_index(i)
477 if i >= self._lgt:
477 if i >= self._lgt:
478 data = self._extra[i - self._lgt]
478 data = self._extra[i - self._lgt]
479 else:
479 else:
480 index = self._calculate_index(i)
480 index = self._calculate_index(i)
481 data = self._data[index : index + self.entry_size]
481 data = self._data[index : index + self.entry_size]
482 r = self._unpack_entry(i, data)
482 r = self._unpack_entry(i, data)
483 if self._lgt and i == 0:
483 if self._lgt and i == 0:
484 offset = revlogutils.offset_type(0, gettype(r[0]))
484 offset = revlogutils.offset_type(0, gettype(r[0]))
485 r = (offset,) + r[1:]
485 r = (offset,) + r[1:]
486 return r
486 return r
487
487
488 def _unpack_entry(self, rev, data):
488 def _unpack_entry(self, rev, data):
489 r = self.index_format.unpack(data)
489 r = self.index_format.unpack(data)
490 r = r + (
490 r = r + (
491 0,
491 0,
492 0,
492 0,
493 revlog_constants.COMP_MODE_INLINE,
493 revlog_constants.COMP_MODE_INLINE,
494 revlog_constants.COMP_MODE_INLINE,
494 revlog_constants.COMP_MODE_INLINE,
495 )
495 )
496 return r
496 return r
497
497
498 def pack_header(self, header):
498 def pack_header(self, header):
499 """pack header information as binary"""
499 """pack header information as binary"""
500 v_fmt = revlog_constants.INDEX_HEADER
500 v_fmt = revlog_constants.INDEX_HEADER
501 return v_fmt.pack(header)
501 return v_fmt.pack(header)
502
502
503 def entry_binary(self, rev):
503 def entry_binary(self, rev):
504 """return the raw binary string representing a revision"""
504 """return the raw binary string representing a revision"""
505 entry = self[rev]
505 entry = self[rev]
506 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
506 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
507 if rev == 0:
507 if rev == 0:
508 p = p[revlog_constants.INDEX_HEADER.size :]
508 p = p[revlog_constants.INDEX_HEADER.size :]
509 return p
509 return p
510
510
511
511
512 class IndexObject(BaseIndexObject):
512 class IndexObject(BaseIndexObject):
513 def __init__(self, data):
513 def __init__(self, data):
514 assert len(data) % self.entry_size == 0, (
514 assert len(data) % self.entry_size == 0, (
515 len(data),
515 len(data),
516 self.entry_size,
516 self.entry_size,
517 len(data) % self.entry_size,
517 len(data) % self.entry_size,
518 )
518 )
519 self._data = data
519 self._data = data
520 self._lgt = len(data) // self.entry_size
520 self._lgt = len(data) // self.entry_size
521 self._extra = []
521 self._extra = []
522
522
523 def _calculate_index(self, i):
523 def _calculate_index(self, i):
524 return i * self.entry_size
524 return i * self.entry_size
525
525
526 def __delitem__(self, i):
526 def __delitem__(self, i):
527 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
527 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
528 raise ValueError(b"deleting slices only supports a:-1 with step 1")
528 raise ValueError(b"deleting slices only supports a:-1 with step 1")
529 i = i.start
529 i = i.start
530 self._check_index(i)
530 self._check_index(i)
531 self._stripnodes(i)
531 self._stripnodes(i)
532 if i < self._lgt:
532 if i < self._lgt:
533 self._data = self._data[: i * self.entry_size]
533 self._data = self._data[: i * self.entry_size]
534 self._lgt = i
534 self._lgt = i
535 self._extra = []
535 self._extra = []
536 else:
536 else:
537 self._extra = self._extra[: i - self._lgt]
537 self._extra = self._extra[: i - self._lgt]
538
538
539
539
540 class PersistentNodeMapIndexObject(IndexObject):
540 class PersistentNodeMapIndexObject(IndexObject):
541 """a Debug oriented class to test persistent nodemap
541 """a Debug oriented class to test persistent nodemap
542
542
543 We need a simple python object to test API and higher level behavior. See
543 We need a simple python object to test API and higher level behavior. See
544 the Rust implementation for more serious usage. This should be used only
544 the Rust implementation for more serious usage. This should be used only
545 through the dedicated `devel.persistent-nodemap` config.
545 through the dedicated `devel.persistent-nodemap` config.
546 """
546 """
547
547
548 def nodemap_data_all(self):
548 def nodemap_data_all(self):
549 """Return bytes containing a full serialization of a nodemap
549 """Return bytes containing a full serialization of a nodemap
550
550
551 The nodemap should be valid for the full set of revisions in the
551 The nodemap should be valid for the full set of revisions in the
552 index."""
552 index."""
553 return nodemaputil.persistent_data(self)
553 return nodemaputil.persistent_data(self)
554
554
555 def nodemap_data_incremental(self):
555 def nodemap_data_incremental(self):
556 """Return bytes containing a incremental update to persistent nodemap
556 """Return bytes containing a incremental update to persistent nodemap
557
557
558 This containst the data for an append-only update of the data provided
558 This containst the data for an append-only update of the data provided
559 in the last call to `update_nodemap_data`.
559 in the last call to `update_nodemap_data`.
560 """
560 """
561 if self._nm_root is None:
561 if self._nm_root is None:
562 return None
562 return None
563 docket = self._nm_docket
563 docket = self._nm_docket
564 changed, data = nodemaputil.update_persistent_data(
564 changed, data = nodemaputil.update_persistent_data(
565 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
565 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
566 )
566 )
567
567
568 self._nm_root = self._nm_max_idx = self._nm_docket = None
568 self._nm_root = self._nm_max_idx = self._nm_docket = None
569 return docket, changed, data
569 return docket, changed, data
570
570
571 def update_nodemap_data(self, docket, nm_data):
571 def update_nodemap_data(self, docket, nm_data):
572 """provide full block of persisted binary data for a nodemap
572 """provide full block of persisted binary data for a nodemap
573
573
574 The data are expected to come from disk. See `nodemap_data_all` for a
574 The data are expected to come from disk. See `nodemap_data_all` for a
575 produceur of such data."""
575 produceur of such data."""
576 if nm_data is not None:
576 if nm_data is not None:
577 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
577 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
578 if self._nm_root:
578 if self._nm_root:
579 self._nm_docket = docket
579 self._nm_docket = docket
580 else:
580 else:
581 self._nm_root = self._nm_max_idx = self._nm_docket = None
581 self._nm_root = self._nm_max_idx = self._nm_docket = None
582
582
583
583
584 class InlinedIndexObject(BaseIndexObject):
584 class InlinedIndexObject(BaseIndexObject):
585 def __init__(self, data, inline=0):
585 def __init__(self, data, inline=0):
586 self._data = data
586 self._data = data
587 self._lgt = self._inline_scan(None)
587 self._lgt = self._inline_scan(None)
588 self._inline_scan(self._lgt)
588 self._inline_scan(self._lgt)
589 self._extra = []
589 self._extra = []
590
590
591 def _inline_scan(self, lgt):
591 def _inline_scan(self, lgt):
592 off = 0
592 off = 0
593 if lgt is not None:
593 if lgt is not None:
594 self._offsets = [0] * lgt
594 self._offsets = [0] * lgt
595 count = 0
595 count = 0
596 while off <= len(self._data) - self.entry_size:
596 while off <= len(self._data) - self.entry_size:
597 start = off + self.big_int_size
597 start = off + self.big_int_size
598 (s,) = struct.unpack(
598 (s,) = struct.unpack(
599 b'>i',
599 b'>i',
600 self._data[start : start + self.int_size],
600 self._data[start : start + self.int_size],
601 )
601 )
602 if lgt is not None:
602 if lgt is not None:
603 self._offsets[count] = off
603 self._offsets[count] = off
604 count += 1
604 count += 1
605 off += self.entry_size + s
605 off += self.entry_size + s
606 if off != len(self._data):
606 if off != len(self._data):
607 raise ValueError(b"corrupted data")
607 raise ValueError(b"corrupted data")
608 return count
608 return count
609
609
610 def __delitem__(self, i):
610 def __delitem__(self, i):
611 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
611 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
612 raise ValueError(b"deleting slices only supports a:-1 with step 1")
612 raise ValueError(b"deleting slices only supports a:-1 with step 1")
613 i = i.start
613 i = i.start
614 self._check_index(i)
614 self._check_index(i)
615 self._stripnodes(i)
615 self._stripnodes(i)
616 if i < self._lgt:
616 if i < self._lgt:
617 self._offsets = self._offsets[:i]
617 self._offsets = self._offsets[:i]
618 self._lgt = i
618 self._lgt = i
619 self._extra = []
619 self._extra = []
620 else:
620 else:
621 self._extra = self._extra[: i - self._lgt]
621 self._extra = self._extra[: i - self._lgt]
622
622
623 def _calculate_index(self, i):
623 def _calculate_index(self, i):
624 return self._offsets[i]
624 return self._offsets[i]
625
625
626
626
627 def parse_index2(data, inline, revlogv2=False):
627 def parse_index2(data, inline, revlogv2=False):
628 if not inline:
628 if not inline:
629 cls = IndexObject2 if revlogv2 else IndexObject
629 cls = IndexObject2 if revlogv2 else IndexObject
630 return cls(data), None
630 return cls(data), None
631 cls = InlinedIndexObject
631 cls = InlinedIndexObject
632 return cls(data, inline), (0, data)
632 return cls(data, inline), (0, data)
633
633
634
634
635 def parse_index_cl_v2(data):
635 def parse_index_cl_v2(data):
636 return IndexChangelogV2(data), None
636 return IndexChangelogV2(data), None
637
637
638
638
639 class IndexObject2(IndexObject):
639 class IndexObject2(IndexObject):
640 index_format = revlog_constants.INDEX_ENTRY_V2
640 index_format = revlog_constants.INDEX_ENTRY_V2
641
641
642 def replace_sidedata_info(
642 def replace_sidedata_info(
643 self,
643 self,
644 rev,
644 rev,
645 sidedata_offset,
645 sidedata_offset,
646 sidedata_length,
646 sidedata_length,
647 offset_flags,
647 offset_flags,
648 compression_mode,
648 compression_mode,
649 ):
649 ):
650 """
650 """
651 Replace an existing index entry's sidedata offset and length with new
651 Replace an existing index entry's sidedata offset and length with new
652 ones.
652 ones.
653 This cannot be used outside of the context of sidedata rewriting,
653 This cannot be used outside of the context of sidedata rewriting,
654 inside the transaction that creates the revision `rev`.
654 inside the transaction that creates the revision `rev`.
655 """
655 """
656 if rev < 0:
656 if rev < 0:
657 raise KeyError
657 raise KeyError
658 self._check_index(rev)
658 self._check_index(rev)
659 if rev < self._lgt:
659 if rev < self._lgt:
660 msg = b"cannot rewrite entries outside of this transaction"
660 msg = b"cannot rewrite entries outside of this transaction"
661 raise KeyError(msg)
661 raise KeyError(msg)
662 else:
662 else:
663 entry = list(self[rev])
663 entry = list(self[rev])
664 entry[0] = offset_flags
664 entry[0] = offset_flags
665 entry[8] = sidedata_offset
665 entry[8] = sidedata_offset
666 entry[9] = sidedata_length
666 entry[9] = sidedata_length
667 entry[11] = compression_mode
667 entry[11] = compression_mode
668 entry = tuple(entry)
668 entry = tuple(entry)
669 new = self._pack_entry(rev, entry)
669 new = self._pack_entry(rev, entry)
670 self._extra[rev - self._lgt] = new
670 self._extra[rev - self._lgt] = new
671
671
672 def _unpack_entry(self, rev, data):
672 def _unpack_entry(self, rev, data):
673 data = self.index_format.unpack(data)
673 data = self.index_format.unpack(data)
674 entry = data[:10]
674 entry = data[:10]
675 data_comp = data[10] & 3
675 data_comp = data[10] & 3
676 sidedata_comp = (data[10] & (3 << 2)) >> 2
676 sidedata_comp = (data[10] & (3 << 2)) >> 2
677 return entry + (data_comp, sidedata_comp)
677 return entry + (data_comp, sidedata_comp)
678
678
679 def _pack_entry(self, rev, entry):
679 def _pack_entry(self, rev, entry):
680 data = entry[:10]
680 data = entry[:10]
681 data_comp = entry[10] & 3
681 data_comp = entry[10] & 3
682 sidedata_comp = (entry[11] & 3) << 2
682 sidedata_comp = (entry[11] & 3) << 2
683 data += (data_comp | sidedata_comp,)
683 data += (data_comp | sidedata_comp,)
684
684
685 return self.index_format.pack(*data)
685 return self.index_format.pack(*data)
686
686
687 def entry_binary(self, rev):
687 def entry_binary(self, rev):
688 """return the raw binary string representing a revision"""
688 """return the raw binary string representing a revision"""
689 entry = self[rev]
689 entry = self[rev]
690 return self._pack_entry(rev, entry)
690 return self._pack_entry(rev, entry)
691
691
692 def pack_header(self, header):
692 def pack_header(self, header):
693 """pack header information as binary"""
693 """pack header information as binary"""
694 msg = 'version header should go in the docket, not the index: %d'
694 msg = 'version header should go in the docket, not the index: %d'
695 msg %= header
695 msg %= header
696 raise error.ProgrammingError(msg)
696 raise error.ProgrammingError(msg)
697
697
698
698
699 class IndexChangelogV2(IndexObject2):
699 class IndexChangelogV2(IndexObject2):
700 index_format = revlog_constants.INDEX_ENTRY_CL_V2
700 index_format = revlog_constants.INDEX_ENTRY_CL_V2
701
701
702 def _unpack_entry(self, rev, data, r=True):
702 def _unpack_entry(self, rev, data, r=True):
703 items = self.index_format.unpack(data)
703 items = self.index_format.unpack(data)
704 entry = items[:3] + (rev, rev) + items[3:8]
704 entry = items[:3] + (rev, rev) + items[3:8]
705 data_comp = items[8] & 3
705 data_comp = items[8] & 3
706 sidedata_comp = (items[8] >> 2) & 3
706 sidedata_comp = (items[8] >> 2) & 3
707 return entry + (data_comp, sidedata_comp)
707 return entry + (data_comp, sidedata_comp)
708
708
709 def _pack_entry(self, rev, entry):
709 def _pack_entry(self, rev, entry):
710 assert entry[3] == rev, entry[3]
710 assert entry[3] == rev, entry[3]
711 assert entry[4] == rev, entry[4]
711 assert entry[4] == rev, entry[4]
712 data = entry[:3] + entry[5:10]
712 data = entry[:3] + entry[5:10]
713 data_comp = entry[10] & 3
713 data_comp = entry[10] & 3
714 sidedata_comp = (entry[11] & 3) << 2
714 sidedata_comp = (entry[11] & 3) << 2
715 data += (data_comp | sidedata_comp,)
715 data += (data_comp | sidedata_comp,)
716 return self.index_format.pack(*data)
716 return self.index_format.pack(*data)
717
717
718
718
719 def parse_index_devel_nodemap(data, inline):
719 def parse_index_devel_nodemap(data, inline):
720 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
720 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
721 return PersistentNodeMapIndexObject(data), None
721 return PersistentNodeMapIndexObject(data), None
722
722
723
723
724 def parse_dirstate(dmap, copymap, st):
724 def parse_dirstate(dmap, copymap, st):
725 parents = [st[:20], st[20:40]]
725 parents = [st[:20], st[20:40]]
726 # dereference fields so they will be local in loop
726 # dereference fields so they will be local in loop
727 format = b">cllll"
727 format = b">cllll"
728 e_size = struct.calcsize(format)
728 e_size = struct.calcsize(format)
729 pos1 = 40
729 pos1 = 40
730 l = len(st)
730 l = len(st)
731
731
732 # the inner loop
732 # the inner loop
733 while pos1 < l:
733 while pos1 < l:
734 pos2 = pos1 + e_size
734 pos2 = pos1 + e_size
735 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
735 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
736 pos1 = pos2 + e[4]
736 pos1 = pos2 + e[4]
737 f = st[pos2:pos1]
737 f = st[pos2:pos1]
738 if b'\0' in f:
738 if b'\0' in f:
739 f, c = f.split(b'\0')
739 f, c = f.split(b'\0')
740 copymap[f] = c
740 copymap[f] = c
741 dmap[f] = DirstateItem.from_v1_data(*e[:4])
741 dmap[f] = DirstateItem.from_v1_data(*e[:4])
742 return parents
742 return parents
743
743
744
744
745 def pack_dirstate(dmap, copymap, pl, now):
745 def pack_dirstate(dmap, copymap, pl, now):
746 now = int(now)
746 now = int(now)
747 cs = stringio()
747 cs = stringio()
748 write = cs.write
748 write = cs.write
749 write(b"".join(pl))
749 write(b"".join(pl))
750 for f, e in pycompat.iteritems(dmap):
750 for f, e in pycompat.iteritems(dmap):
751 if e.need_delay(now):
751 if e.need_delay(now):
752 # The file was last modified "simultaneously" with the current
752 # The file was last modified "simultaneously" with the current
753 # write to dirstate (i.e. within the same second for file-
753 # write to dirstate (i.e. within the same second for file-
754 # systems with a granularity of 1 sec). This commonly happens
754 # systems with a granularity of 1 sec). This commonly happens
755 # for at least a couple of files on 'update'.
755 # for at least a couple of files on 'update'.
756 # The user could change the file without changing its size
756 # The user could change the file without changing its size
757 # within the same second. Invalidate the file's mtime in
757 # within the same second. Invalidate the file's mtime in
758 # dirstate, forcing future 'status' calls to compare the
758 # dirstate, forcing future 'status' calls to compare the
759 # contents of the file if the size is the same. This prevents
759 # contents of the file if the size is the same. This prevents
760 # mistakenly treating such files as clean.
760 # mistakenly treating such files as clean.
761 e.set_possibly_dirty()
761 e.set_possibly_dirty()
762
762
763 if f in copymap:
763 if f in copymap:
764 f = b"%s\0%s" % (f, copymap[f])
764 f = b"%s\0%s" % (f, copymap[f])
765 e = _pack(
765 e = _pack(
766 b">cllll",
766 b">cllll",
767 e.v1_state(),
767 e.v1_state(),
768 e.v1_mode(),
768 e.v1_mode(),
769 e.v1_size(),
769 e.v1_size(),
770 e.v1_mtime(),
770 e.v1_mtime(),
771 len(f),
771 len(f),
772 )
772 )
773 write(e)
773 write(e)
774 write(f)
774 write(f)
775 return cs.getvalue()
775 return cs.getvalue()
@@ -1,426 +1,432
1 use crate::errors::HgError;
1 use crate::errors::HgError;
2 use bitflags::bitflags;
2 use bitflags::bitflags;
3 use std::convert::TryFrom;
3 use std::convert::TryFrom;
4
4
5 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
5 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
6 pub enum EntryState {
6 pub enum EntryState {
7 Normal,
7 Normal,
8 Added,
8 Added,
9 Removed,
9 Removed,
10 Merged,
10 Merged,
11 }
11 }
12
12
13 /// The C implementation uses all signed types. This will be an issue
13 /// The C implementation uses all signed types. This will be an issue
14 /// either when 4GB+ source files are commonplace or in 2038, whichever
14 /// either when 4GB+ source files are commonplace or in 2038, whichever
15 /// comes first.
15 /// comes first.
16 #[derive(Debug, PartialEq, Copy, Clone)]
16 #[derive(Debug, PartialEq, Copy, Clone)]
17 pub struct DirstateEntry {
17 pub struct DirstateEntry {
18 pub(crate) flags: Flags,
18 pub(crate) flags: Flags,
19 mode_size: Option<(i32, i32)>,
19 mode_size: Option<(i32, i32)>,
20 mtime: Option<i32>,
20 mtime: Option<i32>,
21 }
21 }
22
22
23 bitflags! {
23 bitflags! {
24 pub(crate) struct Flags: u8 {
24 pub(crate) struct Flags: u8 {
25 const WDIR_TRACKED = 1 << 0;
25 const WDIR_TRACKED = 1 << 0;
26 const P1_TRACKED = 1 << 1;
26 const P1_TRACKED = 1 << 1;
27 const P2_INFO = 1 << 2;
27 const P2_INFO = 1 << 2;
28 }
28 }
29 }
29 }
30
30
31 pub const V1_RANGEMASK: i32 = 0x7FFFFFFF;
31 pub const V1_RANGEMASK: i32 = 0x7FFFFFFF;
32
32
33 pub const MTIME_UNSET: i32 = -1;
33 pub const MTIME_UNSET: i32 = -1;
34
34
35 /// A `DirstateEntry` with a size of `-2` means that it was merged from the
35 /// A `DirstateEntry` with a size of `-2` means that it was merged from the
36 /// other parent. This allows revert to pick the right status back during a
36 /// other parent. This allows revert to pick the right status back during a
37 /// merge.
37 /// merge.
38 pub const SIZE_FROM_OTHER_PARENT: i32 = -2;
38 pub const SIZE_FROM_OTHER_PARENT: i32 = -2;
39 /// A special value used for internal representation of special case in
39 /// A special value used for internal representation of special case in
40 /// dirstate v1 format.
40 /// dirstate v1 format.
41 pub const SIZE_NON_NORMAL: i32 = -1;
41 pub const SIZE_NON_NORMAL: i32 = -1;
42
42
43 impl DirstateEntry {
43 impl DirstateEntry {
44 pub fn from_v2_data(
44 pub fn from_v2_data(
45 wdir_tracked: bool,
45 wdir_tracked: bool,
46 p1_tracked: bool,
46 p1_tracked: bool,
47 p2_info: bool,
47 p2_info: bool,
48 mode_size: Option<(i32, i32)>,
48 mode_size: Option<(i32, i32)>,
49 mtime: Option<i32>,
49 mtime: Option<i32>,
50 ) -> Self {
50 ) -> Self {
51 let mut flags = Flags::empty();
51 let mut flags = Flags::empty();
52 flags.set(Flags::WDIR_TRACKED, wdir_tracked);
52 flags.set(Flags::WDIR_TRACKED, wdir_tracked);
53 flags.set(Flags::P1_TRACKED, p1_tracked);
53 flags.set(Flags::P1_TRACKED, p1_tracked);
54 flags.set(Flags::P2_INFO, p2_info);
54 flags.set(Flags::P2_INFO, p2_info);
55 Self {
55 Self {
56 flags,
56 flags,
57 mode_size,
57 mode_size,
58 mtime,
58 mtime,
59 }
59 }
60 }
60 }
61
61
62 pub fn from_v1_data(
62 pub fn from_v1_data(
63 state: EntryState,
63 state: EntryState,
64 mode: i32,
64 mode: i32,
65 size: i32,
65 size: i32,
66 mtime: i32,
66 mtime: i32,
67 ) -> Self {
67 ) -> Self {
68 match state {
68 match state {
69 EntryState::Normal => {
69 EntryState::Normal => {
70 if size == SIZE_FROM_OTHER_PARENT {
70 if size == SIZE_FROM_OTHER_PARENT {
71 Self::new_from_p2()
71 Self::new_from_p2()
72 } else if size == SIZE_NON_NORMAL {
72 } else if size == SIZE_NON_NORMAL {
73 Self::new_possibly_dirty()
73 Self::new_possibly_dirty()
74 } else if mtime == MTIME_UNSET {
74 } else if mtime == MTIME_UNSET {
75 Self {
75 Self {
76 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
76 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
77 mode_size: Some((mode, size)),
77 mode_size: Some((mode, size)),
78 mtime: None,
78 mtime: None,
79 }
79 }
80 } else {
80 } else {
81 Self::new_normal(mode, size, mtime)
81 Self::new_normal(mode, size, mtime)
82 }
82 }
83 }
83 }
84 EntryState::Added => Self::new_added(),
84 EntryState::Added => Self::new_added(),
85 EntryState::Removed => Self {
85 EntryState::Removed => Self {
86 flags: if size == SIZE_NON_NORMAL {
86 flags: if size == SIZE_NON_NORMAL {
87 Flags::P1_TRACKED | Flags::P2_INFO
87 Flags::P1_TRACKED | Flags::P2_INFO
88 } else if size == SIZE_FROM_OTHER_PARENT {
88 } else if size == SIZE_FROM_OTHER_PARENT {
89 // We don’t know if P1_TRACKED should be set (file history)
89 // We don’t know if P1_TRACKED should be set (file history)
90 Flags::P2_INFO
90 Flags::P2_INFO
91 } else {
91 } else {
92 Flags::P1_TRACKED
92 Flags::P1_TRACKED
93 },
93 },
94 mode_size: None,
94 mode_size: None,
95 mtime: None,
95 mtime: None,
96 },
96 },
97 EntryState::Merged => Self::new_merged(),
97 EntryState::Merged => Self {
98 flags: Flags::WDIR_TRACKED
99 | Flags::P1_TRACKED // might not be true because of rename ?
100 | Flags::P2_INFO, // might not be true because of rename ?
101 mode_size: None,
102 mtime: None,
103 },
98 }
104 }
99 }
105 }
100
106
101 pub fn new_from_p2() -> Self {
107 pub fn new_from_p2() -> Self {
102 Self {
108 Self {
103 // might be missing P1_TRACKED
109 // might be missing P1_TRACKED
104 flags: Flags::WDIR_TRACKED | Flags::P2_INFO,
110 flags: Flags::WDIR_TRACKED | Flags::P2_INFO,
105 mode_size: None,
111 mode_size: None,
106 mtime: None,
112 mtime: None,
107 }
113 }
108 }
114 }
109
115
110 pub fn new_possibly_dirty() -> Self {
116 pub fn new_possibly_dirty() -> Self {
111 Self {
117 Self {
112 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
118 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
113 mode_size: None,
119 mode_size: None,
114 mtime: None,
120 mtime: None,
115 }
121 }
116 }
122 }
117
123
118 pub fn new_added() -> Self {
124 pub fn new_added() -> Self {
119 Self {
125 Self {
120 flags: Flags::WDIR_TRACKED,
126 flags: Flags::WDIR_TRACKED,
121 mode_size: None,
127 mode_size: None,
122 mtime: None,
128 mtime: None,
123 }
129 }
124 }
130 }
125
131
126 pub fn new_merged() -> Self {
132 pub fn new_merged() -> Self {
127 Self {
133 Self {
128 flags: Flags::WDIR_TRACKED
134 flags: Flags::WDIR_TRACKED
129 | Flags::P1_TRACKED // might not be true because of rename ?
135 | Flags::P1_TRACKED // might not be true because of rename ?
130 | Flags::P2_INFO, // might not be true because of rename ?
136 | Flags::P2_INFO, // might not be true because of rename ?
131 mode_size: None,
137 mode_size: None,
132 mtime: None,
138 mtime: None,
133 }
139 }
134 }
140 }
135
141
136 pub fn new_normal(mode: i32, size: i32, mtime: i32) -> Self {
142 pub fn new_normal(mode: i32, size: i32, mtime: i32) -> Self {
137 Self {
143 Self {
138 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
144 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
139 mode_size: Some((mode, size)),
145 mode_size: Some((mode, size)),
140 mtime: Some(mtime),
146 mtime: Some(mtime),
141 }
147 }
142 }
148 }
143
149
144 /// Creates a new entry in "removed" state.
150 /// Creates a new entry in "removed" state.
145 ///
151 ///
146 /// `size` is expected to be zero, `SIZE_NON_NORMAL`, or
152 /// `size` is expected to be zero, `SIZE_NON_NORMAL`, or
147 /// `SIZE_FROM_OTHER_PARENT`
153 /// `SIZE_FROM_OTHER_PARENT`
148 pub fn new_removed(size: i32) -> Self {
154 pub fn new_removed(size: i32) -> Self {
149 Self::from_v1_data(EntryState::Removed, 0, size, 0)
155 Self::from_v1_data(EntryState::Removed, 0, size, 0)
150 }
156 }
151
157
152 pub fn tracked(&self) -> bool {
158 pub fn tracked(&self) -> bool {
153 self.flags.contains(Flags::WDIR_TRACKED)
159 self.flags.contains(Flags::WDIR_TRACKED)
154 }
160 }
155
161
156 pub fn p1_tracked(&self) -> bool {
162 pub fn p1_tracked(&self) -> bool {
157 self.flags.contains(Flags::P1_TRACKED)
163 self.flags.contains(Flags::P1_TRACKED)
158 }
164 }
159
165
160 fn in_either_parent(&self) -> bool {
166 fn in_either_parent(&self) -> bool {
161 self.flags.intersects(Flags::P1_TRACKED | Flags::P2_INFO)
167 self.flags.intersects(Flags::P1_TRACKED | Flags::P2_INFO)
162 }
168 }
163
169
164 pub fn removed(&self) -> bool {
170 pub fn removed(&self) -> bool {
165 self.in_either_parent() && !self.flags.contains(Flags::WDIR_TRACKED)
171 self.in_either_parent() && !self.flags.contains(Flags::WDIR_TRACKED)
166 }
172 }
167
173
168 pub fn p2_info(&self) -> bool {
174 pub fn p2_info(&self) -> bool {
169 self.flags.contains(Flags::WDIR_TRACKED | Flags::P2_INFO)
175 self.flags.contains(Flags::WDIR_TRACKED | Flags::P2_INFO)
170 }
176 }
171
177
172 pub fn added(&self) -> bool {
178 pub fn added(&self) -> bool {
173 self.flags.contains(Flags::WDIR_TRACKED) && !self.in_either_parent()
179 self.flags.contains(Flags::WDIR_TRACKED) && !self.in_either_parent()
174 }
180 }
175
181
176 pub fn maybe_clean(&self) -> bool {
182 pub fn maybe_clean(&self) -> bool {
177 if !self.flags.contains(Flags::WDIR_TRACKED) {
183 if !self.flags.contains(Flags::WDIR_TRACKED) {
178 false
184 false
179 } else if !self.flags.contains(Flags::P1_TRACKED) {
185 } else if !self.flags.contains(Flags::P1_TRACKED) {
180 false
186 false
181 } else if self.flags.contains(Flags::P2_INFO) {
187 } else if self.flags.contains(Flags::P2_INFO) {
182 false
188 false
183 } else {
189 } else {
184 true
190 true
185 }
191 }
186 }
192 }
187
193
188 pub fn any_tracked(&self) -> bool {
194 pub fn any_tracked(&self) -> bool {
189 self.flags.intersects(
195 self.flags.intersects(
190 Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO,
196 Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO,
191 )
197 )
192 }
198 }
193
199
194 /// Returns `(wdir_tracked, p1_tracked, p2_info, mode_size, mtime)`
200 /// Returns `(wdir_tracked, p1_tracked, p2_info, mode_size, mtime)`
195 pub(crate) fn v2_data(
201 pub(crate) fn v2_data(
196 &self,
202 &self,
197 ) -> (bool, bool, bool, Option<(i32, i32)>, Option<i32>) {
203 ) -> (bool, bool, bool, Option<(i32, i32)>, Option<i32>) {
198 if !self.any_tracked() {
204 if !self.any_tracked() {
199 // TODO: return an Option instead?
205 // TODO: return an Option instead?
200 panic!("Accessing v1_state of an untracked DirstateEntry")
206 panic!("Accessing v1_state of an untracked DirstateEntry")
201 }
207 }
202 let wdir_tracked = self.flags.contains(Flags::WDIR_TRACKED);
208 let wdir_tracked = self.flags.contains(Flags::WDIR_TRACKED);
203 let p1_tracked = self.flags.contains(Flags::P1_TRACKED);
209 let p1_tracked = self.flags.contains(Flags::P1_TRACKED);
204 let p2_info = self.flags.contains(Flags::P2_INFO);
210 let p2_info = self.flags.contains(Flags::P2_INFO);
205 let mode_size = self.mode_size;
211 let mode_size = self.mode_size;
206 let mtime = self.mtime;
212 let mtime = self.mtime;
207 (wdir_tracked, p1_tracked, p2_info, mode_size, mtime)
213 (wdir_tracked, p1_tracked, p2_info, mode_size, mtime)
208 }
214 }
209
215
210 fn v1_state(&self) -> EntryState {
216 fn v1_state(&self) -> EntryState {
211 if !self.any_tracked() {
217 if !self.any_tracked() {
212 // TODO: return an Option instead?
218 // TODO: return an Option instead?
213 panic!("Accessing v1_state of an untracked DirstateEntry")
219 panic!("Accessing v1_state of an untracked DirstateEntry")
214 }
220 }
215 if self.removed() {
221 if self.removed() {
216 EntryState::Removed
222 EntryState::Removed
217 } else if self
223 } else if self
218 .flags
224 .flags
219 .contains(Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO)
225 .contains(Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO)
220 {
226 {
221 EntryState::Merged
227 EntryState::Merged
222 } else if self.added() {
228 } else if self.added() {
223 EntryState::Added
229 EntryState::Added
224 } else {
230 } else {
225 EntryState::Normal
231 EntryState::Normal
226 }
232 }
227 }
233 }
228
234
229 fn v1_mode(&self) -> i32 {
235 fn v1_mode(&self) -> i32 {
230 if let Some((mode, _size)) = self.mode_size {
236 if let Some((mode, _size)) = self.mode_size {
231 mode
237 mode
232 } else {
238 } else {
233 0
239 0
234 }
240 }
235 }
241 }
236
242
237 fn v1_size(&self) -> i32 {
243 fn v1_size(&self) -> i32 {
238 if !self.any_tracked() {
244 if !self.any_tracked() {
239 // TODO: return an Option instead?
245 // TODO: return an Option instead?
240 panic!("Accessing v1_size of an untracked DirstateEntry")
246 panic!("Accessing v1_size of an untracked DirstateEntry")
241 }
247 }
242 if self.removed()
248 if self.removed()
243 && self.flags.contains(Flags::P1_TRACKED | Flags::P2_INFO)
249 && self.flags.contains(Flags::P1_TRACKED | Flags::P2_INFO)
244 {
250 {
245 SIZE_NON_NORMAL
251 SIZE_NON_NORMAL
246 } else if self.flags.contains(Flags::P2_INFO) {
252 } else if self.flags.contains(Flags::P2_INFO) {
247 SIZE_FROM_OTHER_PARENT
253 SIZE_FROM_OTHER_PARENT
248 } else if self.removed() {
254 } else if self.removed() {
249 0
255 0
250 } else if self.added() {
256 } else if self.added() {
251 SIZE_NON_NORMAL
257 SIZE_NON_NORMAL
252 } else if let Some((_mode, size)) = self.mode_size {
258 } else if let Some((_mode, size)) = self.mode_size {
253 size
259 size
254 } else {
260 } else {
255 SIZE_NON_NORMAL
261 SIZE_NON_NORMAL
256 }
262 }
257 }
263 }
258
264
259 fn v1_mtime(&self) -> i32 {
265 fn v1_mtime(&self) -> i32 {
260 if !self.any_tracked() {
266 if !self.any_tracked() {
261 // TODO: return an Option instead?
267 // TODO: return an Option instead?
262 panic!("Accessing v1_mtime of an untracked DirstateEntry")
268 panic!("Accessing v1_mtime of an untracked DirstateEntry")
263 }
269 }
264 if self.removed() {
270 if self.removed() {
265 0
271 0
266 } else if self.flags.contains(Flags::P2_INFO) {
272 } else if self.flags.contains(Flags::P2_INFO) {
267 MTIME_UNSET
273 MTIME_UNSET
268 } else if !self.flags.contains(Flags::P1_TRACKED) {
274 } else if !self.flags.contains(Flags::P1_TRACKED) {
269 MTIME_UNSET
275 MTIME_UNSET
270 } else {
276 } else {
271 self.mtime.unwrap_or(MTIME_UNSET)
277 self.mtime.unwrap_or(MTIME_UNSET)
272 }
278 }
273 }
279 }
274
280
275 // TODO: return `Option<EntryState>`? None when `!self.any_tracked`
281 // TODO: return `Option<EntryState>`? None when `!self.any_tracked`
276 pub fn state(&self) -> EntryState {
282 pub fn state(&self) -> EntryState {
277 self.v1_state()
283 self.v1_state()
278 }
284 }
279
285
280 // TODO: return Option?
286 // TODO: return Option?
281 pub fn mode(&self) -> i32 {
287 pub fn mode(&self) -> i32 {
282 self.v1_mode()
288 self.v1_mode()
283 }
289 }
284
290
285 // TODO: return Option?
291 // TODO: return Option?
286 pub fn size(&self) -> i32 {
292 pub fn size(&self) -> i32 {
287 self.v1_size()
293 self.v1_size()
288 }
294 }
289
295
290 // TODO: return Option?
296 // TODO: return Option?
291 pub fn mtime(&self) -> i32 {
297 pub fn mtime(&self) -> i32 {
292 self.v1_mtime()
298 self.v1_mtime()
293 }
299 }
294
300
295 pub fn drop_merge_data(&mut self) {
301 pub fn drop_merge_data(&mut self) {
296 if self.flags.contains(Flags::P2_INFO) {
302 if self.flags.contains(Flags::P2_INFO) {
297 self.flags.remove(Flags::P2_INFO);
303 self.flags.remove(Flags::P2_INFO);
298 self.mode_size = None;
304 self.mode_size = None;
299 self.mtime = None;
305 self.mtime = None;
300 }
306 }
301 }
307 }
302
308
303 pub fn set_possibly_dirty(&mut self) {
309 pub fn set_possibly_dirty(&mut self) {
304 self.mtime = None
310 self.mtime = None
305 }
311 }
306
312
307 pub fn set_clean(&mut self, mode: i32, size: i32, mtime: i32) {
313 pub fn set_clean(&mut self, mode: i32, size: i32, mtime: i32) {
308 self.flags.insert(Flags::WDIR_TRACKED | Flags::P1_TRACKED);
314 self.flags.insert(Flags::WDIR_TRACKED | Flags::P1_TRACKED);
309 self.mode_size = Some((mode, size));
315 self.mode_size = Some((mode, size));
310 self.mtime = Some(mtime);
316 self.mtime = Some(mtime);
311 }
317 }
312
318
313 pub fn set_tracked(&mut self) {
319 pub fn set_tracked(&mut self) {
314 self.flags.insert(Flags::WDIR_TRACKED);
320 self.flags.insert(Flags::WDIR_TRACKED);
315 // `set_tracked` is replacing various `normallookup` call. So we mark
321 // `set_tracked` is replacing various `normallookup` call. So we mark
316 // the files as needing lookup
322 // the files as needing lookup
317 //
323 //
318 // Consider dropping this in the future in favor of something less
324 // Consider dropping this in the future in favor of something less
319 // broad.
325 // broad.
320 self.mtime = None;
326 self.mtime = None;
321 }
327 }
322
328
323 pub fn set_untracked(&mut self) {
329 pub fn set_untracked(&mut self) {
324 self.flags.remove(Flags::WDIR_TRACKED);
330 self.flags.remove(Flags::WDIR_TRACKED);
325 self.mode_size = None;
331 self.mode_size = None;
326 self.mtime = None;
332 self.mtime = None;
327 }
333 }
328
334
329 /// Returns `(state, mode, size, mtime)` for the puprose of serialization
335 /// Returns `(state, mode, size, mtime)` for the puprose of serialization
330 /// in the dirstate-v1 format.
336 /// in the dirstate-v1 format.
331 ///
337 ///
332 /// This includes marker values such as `mtime == -1`. In the future we may
338 /// This includes marker values such as `mtime == -1`. In the future we may
333 /// want to not represent these cases that way in memory, but serialization
339 /// want to not represent these cases that way in memory, but serialization
334 /// will need to keep the same format.
340 /// will need to keep the same format.
335 pub fn v1_data(&self) -> (u8, i32, i32, i32) {
341 pub fn v1_data(&self) -> (u8, i32, i32, i32) {
336 (
342 (
337 self.v1_state().into(),
343 self.v1_state().into(),
338 self.v1_mode(),
344 self.v1_mode(),
339 self.v1_size(),
345 self.v1_size(),
340 self.v1_mtime(),
346 self.v1_mtime(),
341 )
347 )
342 }
348 }
343
349
344 pub(crate) fn is_from_other_parent(&self) -> bool {
350 pub(crate) fn is_from_other_parent(&self) -> bool {
345 self.state() == EntryState::Normal
351 self.state() == EntryState::Normal
346 && self.size() == SIZE_FROM_OTHER_PARENT
352 && self.size() == SIZE_FROM_OTHER_PARENT
347 }
353 }
348
354
349 // TODO: other platforms
355 // TODO: other platforms
350 #[cfg(unix)]
356 #[cfg(unix)]
351 pub fn mode_changed(
357 pub fn mode_changed(
352 &self,
358 &self,
353 filesystem_metadata: &std::fs::Metadata,
359 filesystem_metadata: &std::fs::Metadata,
354 ) -> bool {
360 ) -> bool {
355 use std::os::unix::fs::MetadataExt;
361 use std::os::unix::fs::MetadataExt;
356 const EXEC_BIT_MASK: u32 = 0o100;
362 const EXEC_BIT_MASK: u32 = 0o100;
357 let dirstate_exec_bit = (self.mode() as u32) & EXEC_BIT_MASK;
363 let dirstate_exec_bit = (self.mode() as u32) & EXEC_BIT_MASK;
358 let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK;
364 let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK;
359 dirstate_exec_bit != fs_exec_bit
365 dirstate_exec_bit != fs_exec_bit
360 }
366 }
361
367
362 /// Returns a `(state, mode, size, mtime)` tuple as for
368 /// Returns a `(state, mode, size, mtime)` tuple as for
363 /// `DirstateMapMethods::debug_iter`.
369 /// `DirstateMapMethods::debug_iter`.
364 pub fn debug_tuple(&self) -> (u8, i32, i32, i32) {
370 pub fn debug_tuple(&self) -> (u8, i32, i32, i32) {
365 (self.state().into(), self.mode(), self.size(), self.mtime())
371 (self.state().into(), self.mode(), self.size(), self.mtime())
366 }
372 }
367
373
368 pub fn mtime_is_ambiguous(&self, now: i32) -> bool {
374 pub fn mtime_is_ambiguous(&self, now: i32) -> bool {
369 self.state() == EntryState::Normal && self.mtime() == now
375 self.state() == EntryState::Normal && self.mtime() == now
370 }
376 }
371
377
372 pub fn clear_ambiguous_mtime(&mut self, now: i32) -> bool {
378 pub fn clear_ambiguous_mtime(&mut self, now: i32) -> bool {
373 let ambiguous = self.mtime_is_ambiguous(now);
379 let ambiguous = self.mtime_is_ambiguous(now);
374 if ambiguous {
380 if ambiguous {
375 // The file was last modified "simultaneously" with the current
381 // The file was last modified "simultaneously" with the current
376 // write to dirstate (i.e. within the same second for file-
382 // write to dirstate (i.e. within the same second for file-
377 // systems with a granularity of 1 sec). This commonly happens
383 // systems with a granularity of 1 sec). This commonly happens
378 // for at least a couple of files on 'update'.
384 // for at least a couple of files on 'update'.
379 // The user could change the file without changing its size
385 // The user could change the file without changing its size
380 // within the same second. Invalidate the file's mtime in
386 // within the same second. Invalidate the file's mtime in
381 // dirstate, forcing future 'status' calls to compare the
387 // dirstate, forcing future 'status' calls to compare the
382 // contents of the file if the size is the same. This prevents
388 // contents of the file if the size is the same. This prevents
383 // mistakenly treating such files as clean.
389 // mistakenly treating such files as clean.
384 self.set_possibly_dirty()
390 self.set_possibly_dirty()
385 }
391 }
386 ambiguous
392 ambiguous
387 }
393 }
388 }
394 }
389
395
390 impl EntryState {
396 impl EntryState {
391 pub fn is_tracked(self) -> bool {
397 pub fn is_tracked(self) -> bool {
392 use EntryState::*;
398 use EntryState::*;
393 match self {
399 match self {
394 Normal | Added | Merged => true,
400 Normal | Added | Merged => true,
395 Removed => false,
401 Removed => false,
396 }
402 }
397 }
403 }
398 }
404 }
399
405
400 impl TryFrom<u8> for EntryState {
406 impl TryFrom<u8> for EntryState {
401 type Error = HgError;
407 type Error = HgError;
402
408
403 fn try_from(value: u8) -> Result<Self, Self::Error> {
409 fn try_from(value: u8) -> Result<Self, Self::Error> {
404 match value {
410 match value {
405 b'n' => Ok(EntryState::Normal),
411 b'n' => Ok(EntryState::Normal),
406 b'a' => Ok(EntryState::Added),
412 b'a' => Ok(EntryState::Added),
407 b'r' => Ok(EntryState::Removed),
413 b'r' => Ok(EntryState::Removed),
408 b'm' => Ok(EntryState::Merged),
414 b'm' => Ok(EntryState::Merged),
409 _ => Err(HgError::CorruptedRepository(format!(
415 _ => Err(HgError::CorruptedRepository(format!(
410 "Incorrect dirstate entry state {}",
416 "Incorrect dirstate entry state {}",
411 value
417 value
412 ))),
418 ))),
413 }
419 }
414 }
420 }
415 }
421 }
416
422
417 impl Into<u8> for EntryState {
423 impl Into<u8> for EntryState {
418 fn into(self) -> u8 {
424 fn into(self) -> u8 {
419 match self {
425 match self {
420 EntryState::Normal => b'n',
426 EntryState::Normal => b'n',
421 EntryState::Added => b'a',
427 EntryState::Added => b'a',
422 EntryState::Removed => b'r',
428 EntryState::Removed => b'r',
423 EntryState::Merged => b'm',
429 EntryState::Merged => b'm',
424 }
430 }
425 }
431 }
426 }
432 }
General Comments 0
You need to be logged in to leave comments. Login now