##// END OF EJS Templates
dirstate-item: replace call to new_normal...
marmoute -
r48975:d0081dbc default
parent child Browse files
Show More
@@ -1,746 +1,750 b''
1 # parsers.py - Python implementation of parsers.c
1 # parsers.py - Python implementation of parsers.c
2 #
2 #
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11 import zlib
11 import zlib
12
12
13 from ..node import (
13 from ..node import (
14 nullrev,
14 nullrev,
15 sha1nodeconstants,
15 sha1nodeconstants,
16 )
16 )
17 from ..thirdparty import attr
17 from ..thirdparty import attr
18 from .. import (
18 from .. import (
19 error,
19 error,
20 pycompat,
20 pycompat,
21 revlogutils,
21 revlogutils,
22 util,
22 util,
23 )
23 )
24
24
25 from ..revlogutils import nodemap as nodemaputil
25 from ..revlogutils import nodemap as nodemaputil
26 from ..revlogutils import constants as revlog_constants
26 from ..revlogutils import constants as revlog_constants
27
27
28 stringio = pycompat.bytesio
28 stringio = pycompat.bytesio
29
29
30
30
31 _pack = struct.pack
31 _pack = struct.pack
32 _unpack = struct.unpack
32 _unpack = struct.unpack
33 _compress = zlib.compress
33 _compress = zlib.compress
34 _decompress = zlib.decompress
34 _decompress = zlib.decompress
35
35
36
36
37 # a special value used internally for `size` if the file come from the other parent
37 # a special value used internally for `size` if the file come from the other parent
38 FROM_P2 = -2
38 FROM_P2 = -2
39
39
40 # a special value used internally for `size` if the file is modified/merged/added
40 # a special value used internally for `size` if the file is modified/merged/added
41 NONNORMAL = -1
41 NONNORMAL = -1
42
42
43 # a special value used internally for `time` if the time is ambigeous
43 # a special value used internally for `time` if the time is ambigeous
44 AMBIGUOUS_TIME = -1
44 AMBIGUOUS_TIME = -1
45
45
46
46
47 @attr.s(slots=True, init=False)
47 @attr.s(slots=True, init=False)
48 class DirstateItem(object):
48 class DirstateItem(object):
49 """represent a dirstate entry
49 """represent a dirstate entry
50
50
51 It hold multiple attributes
51 It hold multiple attributes
52
52
53 # about file tracking
53 # about file tracking
54 - wc_tracked: is the file tracked by the working copy
54 - wc_tracked: is the file tracked by the working copy
55 - p1_tracked: is the file tracked in working copy first parent
55 - p1_tracked: is the file tracked in working copy first parent
56 - p2_info: the file has been involved in some merge operation. Either
56 - p2_info: the file has been involved in some merge operation. Either
57 because it was actually merged, or because the p2 version was
57 because it was actually merged, or because the p2 version was
58 ahead, or because some renamed moved it there. In either case
58 ahead, or because some renamed moved it there. In either case
59 `hg status` will want it displayed as modified.
59 `hg status` will want it displayed as modified.
60
60
61 # about the file state expected from p1 manifest:
61 # about the file state expected from p1 manifest:
62 - mode: the file mode in p1
62 - mode: the file mode in p1
63 - size: the file size in p1
63 - size: the file size in p1
64
64
65 These value can be set to None, which mean we don't have a meaningful value
65 These value can be set to None, which mean we don't have a meaningful value
66 to compare with. Either because we don't really care about them as there
66 to compare with. Either because we don't really care about them as there
67 `status` is known without having to look at the disk or because we don't
67 `status` is known without having to look at the disk or because we don't
68 know these right now and a full comparison will be needed to find out if
68 know these right now and a full comparison will be needed to find out if
69 the file is clean.
69 the file is clean.
70
70
71 # about the file state on disk last time we saw it:
71 # about the file state on disk last time we saw it:
72 - mtime: the last known clean mtime for the file.
72 - mtime: the last known clean mtime for the file.
73
73
74 This value can be set to None if no cachable state exist. Either because we
74 This value can be set to None if no cachable state exist. Either because we
75 do not care (see previous section) or because we could not cache something
75 do not care (see previous section) or because we could not cache something
76 yet.
76 yet.
77 """
77 """
78
78
79 _wc_tracked = attr.ib()
79 _wc_tracked = attr.ib()
80 _p1_tracked = attr.ib()
80 _p1_tracked = attr.ib()
81 _p2_info = attr.ib()
81 _p2_info = attr.ib()
82 _mode = attr.ib()
82 _mode = attr.ib()
83 _size = attr.ib()
83 _size = attr.ib()
84 _mtime = attr.ib()
84 _mtime = attr.ib()
85
85
86 def __init__(
86 def __init__(
87 self,
87 self,
88 wc_tracked=False,
88 wc_tracked=False,
89 p1_tracked=False,
89 p1_tracked=False,
90 p2_info=False,
90 p2_info=False,
91 has_meaningful_data=True,
91 has_meaningful_data=True,
92 has_meaningful_mtime=True,
92 has_meaningful_mtime=True,
93 parentfiledata=None,
93 parentfiledata=None,
94 ):
94 ):
95 self._wc_tracked = wc_tracked
95 self._wc_tracked = wc_tracked
96 self._p1_tracked = p1_tracked
96 self._p1_tracked = p1_tracked
97 self._p2_info = p2_info
97 self._p2_info = p2_info
98
98
99 self._mode = None
99 self._mode = None
100 self._size = None
100 self._size = None
101 self._mtime = None
101 self._mtime = None
102 if parentfiledata is None:
102 if parentfiledata is None:
103 has_meaningful_mtime = False
103 has_meaningful_mtime = False
104 has_meaningful_data = False
104 has_meaningful_data = False
105 if has_meaningful_data:
105 if has_meaningful_data:
106 self._mode = parentfiledata[0]
106 self._mode = parentfiledata[0]
107 self._size = parentfiledata[1]
107 self._size = parentfiledata[1]
108 if has_meaningful_mtime:
108 if has_meaningful_mtime:
109 self._mtime = parentfiledata[2]
109 self._mtime = parentfiledata[2]
110
110
111 @classmethod
111 @classmethod
112 def new_normal(cls, mode, size, mtime):
112 def new_normal(cls, mode, size, mtime):
113 """constructor to help legacy API to build a new "normal" item
113 """constructor to help legacy API to build a new "normal" item
114
114
115 Should eventually be removed
115 Should eventually be removed
116 """
116 """
117 assert size != FROM_P2
117 assert size != FROM_P2
118 assert size != NONNORMAL
118 assert size != NONNORMAL
119 return cls(
119 return cls(
120 wc_tracked=True,
120 wc_tracked=True,
121 p1_tracked=True,
121 p1_tracked=True,
122 parentfiledata=(mode, size, mtime),
122 parentfiledata=(mode, size, mtime),
123 )
123 )
124
124
125 @classmethod
125 @classmethod
126 def from_v1_data(cls, state, mode, size, mtime):
126 def from_v1_data(cls, state, mode, size, mtime):
127 """Build a new DirstateItem object from V1 data
127 """Build a new DirstateItem object from V1 data
128
128
129 Since the dirstate-v1 format is frozen, the signature of this function
129 Since the dirstate-v1 format is frozen, the signature of this function
130 is not expected to change, unlike the __init__ one.
130 is not expected to change, unlike the __init__ one.
131 """
131 """
132 if state == b'm':
132 if state == b'm':
133 return cls(wc_tracked=True, p1_tracked=True, p2_info=True)
133 return cls(wc_tracked=True, p1_tracked=True, p2_info=True)
134 elif state == b'a':
134 elif state == b'a':
135 return cls(wc_tracked=True)
135 return cls(wc_tracked=True)
136 elif state == b'r':
136 elif state == b'r':
137 if size == NONNORMAL:
137 if size == NONNORMAL:
138 p1_tracked = True
138 p1_tracked = True
139 p2_info = True
139 p2_info = True
140 elif size == FROM_P2:
140 elif size == FROM_P2:
141 p1_tracked = False
141 p1_tracked = False
142 p2_info = True
142 p2_info = True
143 else:
143 else:
144 p1_tracked = True
144 p1_tracked = True
145 p2_info = False
145 p2_info = False
146 return cls(p1_tracked=p1_tracked, p2_info=p2_info)
146 return cls(p1_tracked=p1_tracked, p2_info=p2_info)
147 elif state == b'n':
147 elif state == b'n':
148 if size == FROM_P2:
148 if size == FROM_P2:
149 return cls(wc_tracked=True, p2_info=True)
149 return cls(wc_tracked=True, p2_info=True)
150 elif size == NONNORMAL:
150 elif size == NONNORMAL:
151 return cls(wc_tracked=True, p1_tracked=True)
151 return cls(wc_tracked=True, p1_tracked=True)
152 elif mtime == AMBIGUOUS_TIME:
152 elif mtime == AMBIGUOUS_TIME:
153 return cls(
153 return cls(
154 wc_tracked=True,
154 wc_tracked=True,
155 p1_tracked=True,
155 p1_tracked=True,
156 has_meaningful_mtime=False,
156 has_meaningful_mtime=False,
157 parentfiledata=(mode, size, 42),
157 parentfiledata=(mode, size, 42),
158 )
158 )
159 else:
159 else:
160 return cls.new_normal(mode, size, mtime)
160 return cls(
161 wc_tracked=True,
162 p1_tracked=True,
163 parentfiledata=(mode, size, mtime),
164 )
161 else:
165 else:
162 raise RuntimeError(b'unknown state: %s' % state)
166 raise RuntimeError(b'unknown state: %s' % state)
163
167
164 def set_possibly_dirty(self):
168 def set_possibly_dirty(self):
165 """Mark a file as "possibly dirty"
169 """Mark a file as "possibly dirty"
166
170
167 This means the next status call will have to actually check its content
171 This means the next status call will have to actually check its content
168 to make sure it is correct.
172 to make sure it is correct.
169 """
173 """
170 self._mtime = None
174 self._mtime = None
171
175
172 def set_clean(self, mode, size, mtime):
176 def set_clean(self, mode, size, mtime):
173 """mark a file as "clean" cancelling potential "possibly dirty call"
177 """mark a file as "clean" cancelling potential "possibly dirty call"
174
178
175 Note: this function is a descendant of `dirstate.normal` and is
179 Note: this function is a descendant of `dirstate.normal` and is
176 currently expected to be call on "normal" entry only. There are not
180 currently expected to be call on "normal" entry only. There are not
177 reason for this to not change in the future as long as the ccode is
181 reason for this to not change in the future as long as the ccode is
178 updated to preserve the proper state of the non-normal files.
182 updated to preserve the proper state of the non-normal files.
179 """
183 """
180 self._wc_tracked = True
184 self._wc_tracked = True
181 self._p1_tracked = True
185 self._p1_tracked = True
182 self._mode = mode
186 self._mode = mode
183 self._size = size
187 self._size = size
184 self._mtime = mtime
188 self._mtime = mtime
185
189
186 def set_tracked(self):
190 def set_tracked(self):
187 """mark a file as tracked in the working copy
191 """mark a file as tracked in the working copy
188
192
189 This will ultimately be called by command like `hg add`.
193 This will ultimately be called by command like `hg add`.
190 """
194 """
191 self._wc_tracked = True
195 self._wc_tracked = True
192 # `set_tracked` is replacing various `normallookup` call. So we mark
196 # `set_tracked` is replacing various `normallookup` call. So we mark
193 # the files as needing lookup
197 # the files as needing lookup
194 #
198 #
195 # Consider dropping this in the future in favor of something less broad.
199 # Consider dropping this in the future in favor of something less broad.
196 self._mtime = None
200 self._mtime = None
197
201
198 def set_untracked(self):
202 def set_untracked(self):
199 """mark a file as untracked in the working copy
203 """mark a file as untracked in the working copy
200
204
201 This will ultimately be called by command like `hg remove`.
205 This will ultimately be called by command like `hg remove`.
202 """
206 """
203 self._wc_tracked = False
207 self._wc_tracked = False
204 self._mode = None
208 self._mode = None
205 self._size = None
209 self._size = None
206 self._mtime = None
210 self._mtime = None
207
211
208 def drop_merge_data(self):
212 def drop_merge_data(self):
209 """remove all "merge-only" from a DirstateItem
213 """remove all "merge-only" from a DirstateItem
210
214
211 This is to be call by the dirstatemap code when the second parent is dropped
215 This is to be call by the dirstatemap code when the second parent is dropped
212 """
216 """
213 if self._p2_info:
217 if self._p2_info:
214 self._p2_info = False
218 self._p2_info = False
215 self._mode = None
219 self._mode = None
216 self._size = None
220 self._size = None
217 self._mtime = None
221 self._mtime = None
218
222
219 @property
223 @property
220 def mode(self):
224 def mode(self):
221 return self.v1_mode()
225 return self.v1_mode()
222
226
223 @property
227 @property
224 def size(self):
228 def size(self):
225 return self.v1_size()
229 return self.v1_size()
226
230
227 @property
231 @property
228 def mtime(self):
232 def mtime(self):
229 return self.v1_mtime()
233 return self.v1_mtime()
230
234
231 @property
235 @property
232 def state(self):
236 def state(self):
233 """
237 """
234 States are:
238 States are:
235 n normal
239 n normal
236 m needs merging
240 m needs merging
237 r marked for removal
241 r marked for removal
238 a marked for addition
242 a marked for addition
239
243
240 XXX This "state" is a bit obscure and mostly a direct expression of the
244 XXX This "state" is a bit obscure and mostly a direct expression of the
241 dirstatev1 format. It would make sense to ultimately deprecate it in
245 dirstatev1 format. It would make sense to ultimately deprecate it in
242 favor of the more "semantic" attributes.
246 favor of the more "semantic" attributes.
243 """
247 """
244 if not self.any_tracked:
248 if not self.any_tracked:
245 return b'?'
249 return b'?'
246 return self.v1_state()
250 return self.v1_state()
247
251
248 @property
252 @property
249 def tracked(self):
253 def tracked(self):
250 """True is the file is tracked in the working copy"""
254 """True is the file is tracked in the working copy"""
251 return self._wc_tracked
255 return self._wc_tracked
252
256
253 @property
257 @property
254 def any_tracked(self):
258 def any_tracked(self):
255 """True is the file is tracked anywhere (wc or parents)"""
259 """True is the file is tracked anywhere (wc or parents)"""
256 return self._wc_tracked or self._p1_tracked or self._p2_info
260 return self._wc_tracked or self._p1_tracked or self._p2_info
257
261
258 @property
262 @property
259 def added(self):
263 def added(self):
260 """True if the file has been added"""
264 """True if the file has been added"""
261 return self._wc_tracked and not (self._p1_tracked or self._p2_info)
265 return self._wc_tracked and not (self._p1_tracked or self._p2_info)
262
266
263 @property
267 @property
264 def maybe_clean(self):
268 def maybe_clean(self):
265 """True if the file has a chance to be in the "clean" state"""
269 """True if the file has a chance to be in the "clean" state"""
266 if not self._wc_tracked:
270 if not self._wc_tracked:
267 return False
271 return False
268 elif not self._p1_tracked:
272 elif not self._p1_tracked:
269 return False
273 return False
270 elif self._p2_info:
274 elif self._p2_info:
271 return False
275 return False
272 return True
276 return True
273
277
274 @property
278 @property
275 def p1_tracked(self):
279 def p1_tracked(self):
276 """True if the file is tracked in the first parent manifest"""
280 """True if the file is tracked in the first parent manifest"""
277 return self._p1_tracked
281 return self._p1_tracked
278
282
279 @property
283 @property
280 def p2_info(self):
284 def p2_info(self):
281 """True if the file needed to merge or apply any input from p2
285 """True if the file needed to merge or apply any input from p2
282
286
283 See the class documentation for details.
287 See the class documentation for details.
284 """
288 """
285 return self._wc_tracked and self._p2_info
289 return self._wc_tracked and self._p2_info
286
290
287 @property
291 @property
288 def removed(self):
292 def removed(self):
289 """True if the file has been removed"""
293 """True if the file has been removed"""
290 return not self._wc_tracked and (self._p1_tracked or self._p2_info)
294 return not self._wc_tracked and (self._p1_tracked or self._p2_info)
291
295
292 def v1_state(self):
296 def v1_state(self):
293 """return a "state" suitable for v1 serialization"""
297 """return a "state" suitable for v1 serialization"""
294 if not self.any_tracked:
298 if not self.any_tracked:
295 # the object has no state to record, this is -currently-
299 # the object has no state to record, this is -currently-
296 # unsupported
300 # unsupported
297 raise RuntimeError('untracked item')
301 raise RuntimeError('untracked item')
298 elif self.removed:
302 elif self.removed:
299 return b'r'
303 return b'r'
300 elif self._p1_tracked and self._p2_info:
304 elif self._p1_tracked and self._p2_info:
301 return b'm'
305 return b'm'
302 elif self.added:
306 elif self.added:
303 return b'a'
307 return b'a'
304 else:
308 else:
305 return b'n'
309 return b'n'
306
310
307 def v1_mode(self):
311 def v1_mode(self):
308 """return a "mode" suitable for v1 serialization"""
312 """return a "mode" suitable for v1 serialization"""
309 return self._mode if self._mode is not None else 0
313 return self._mode if self._mode is not None else 0
310
314
311 def v1_size(self):
315 def v1_size(self):
312 """return a "size" suitable for v1 serialization"""
316 """return a "size" suitable for v1 serialization"""
313 if not self.any_tracked:
317 if not self.any_tracked:
314 # the object has no state to record, this is -currently-
318 # the object has no state to record, this is -currently-
315 # unsupported
319 # unsupported
316 raise RuntimeError('untracked item')
320 raise RuntimeError('untracked item')
317 elif self.removed and self._p1_tracked and self._p2_info:
321 elif self.removed and self._p1_tracked and self._p2_info:
318 return NONNORMAL
322 return NONNORMAL
319 elif self._p2_info:
323 elif self._p2_info:
320 return FROM_P2
324 return FROM_P2
321 elif self.removed:
325 elif self.removed:
322 return 0
326 return 0
323 elif self.added:
327 elif self.added:
324 return NONNORMAL
328 return NONNORMAL
325 elif self._size is None:
329 elif self._size is None:
326 return NONNORMAL
330 return NONNORMAL
327 else:
331 else:
328 return self._size
332 return self._size
329
333
330 def v1_mtime(self):
334 def v1_mtime(self):
331 """return a "mtime" suitable for v1 serialization"""
335 """return a "mtime" suitable for v1 serialization"""
332 if not self.any_tracked:
336 if not self.any_tracked:
333 # the object has no state to record, this is -currently-
337 # the object has no state to record, this is -currently-
334 # unsupported
338 # unsupported
335 raise RuntimeError('untracked item')
339 raise RuntimeError('untracked item')
336 elif self.removed:
340 elif self.removed:
337 return 0
341 return 0
338 elif self._mtime is None:
342 elif self._mtime is None:
339 return AMBIGUOUS_TIME
343 return AMBIGUOUS_TIME
340 elif self._p2_info:
344 elif self._p2_info:
341 return AMBIGUOUS_TIME
345 return AMBIGUOUS_TIME
342 elif not self._p1_tracked:
346 elif not self._p1_tracked:
343 return AMBIGUOUS_TIME
347 return AMBIGUOUS_TIME
344 else:
348 else:
345 return self._mtime
349 return self._mtime
346
350
347 def need_delay(self, now):
351 def need_delay(self, now):
348 """True if the stored mtime would be ambiguous with the current time"""
352 """True if the stored mtime would be ambiguous with the current time"""
349 return self.v1_state() == b'n' and self.v1_mtime() == now
353 return self.v1_state() == b'n' and self.v1_mtime() == now
350
354
351
355
352 def gettype(q):
356 def gettype(q):
353 return int(q & 0xFFFF)
357 return int(q & 0xFFFF)
354
358
355
359
356 class BaseIndexObject(object):
360 class BaseIndexObject(object):
357 # Can I be passed to an algorithme implemented in Rust ?
361 # Can I be passed to an algorithme implemented in Rust ?
358 rust_ext_compat = 0
362 rust_ext_compat = 0
359 # Format of an index entry according to Python's `struct` language
363 # Format of an index entry according to Python's `struct` language
360 index_format = revlog_constants.INDEX_ENTRY_V1
364 index_format = revlog_constants.INDEX_ENTRY_V1
361 # Size of a C unsigned long long int, platform independent
365 # Size of a C unsigned long long int, platform independent
362 big_int_size = struct.calcsize(b'>Q')
366 big_int_size = struct.calcsize(b'>Q')
363 # Size of a C long int, platform independent
367 # Size of a C long int, platform independent
364 int_size = struct.calcsize(b'>i')
368 int_size = struct.calcsize(b'>i')
365 # An empty index entry, used as a default value to be overridden, or nullrev
369 # An empty index entry, used as a default value to be overridden, or nullrev
366 null_item = (
370 null_item = (
367 0,
371 0,
368 0,
372 0,
369 0,
373 0,
370 -1,
374 -1,
371 -1,
375 -1,
372 -1,
376 -1,
373 -1,
377 -1,
374 sha1nodeconstants.nullid,
378 sha1nodeconstants.nullid,
375 0,
379 0,
376 0,
380 0,
377 revlog_constants.COMP_MODE_INLINE,
381 revlog_constants.COMP_MODE_INLINE,
378 revlog_constants.COMP_MODE_INLINE,
382 revlog_constants.COMP_MODE_INLINE,
379 )
383 )
380
384
381 @util.propertycache
385 @util.propertycache
382 def entry_size(self):
386 def entry_size(self):
383 return self.index_format.size
387 return self.index_format.size
384
388
385 @property
389 @property
386 def nodemap(self):
390 def nodemap(self):
387 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
391 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
388 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
392 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
389 return self._nodemap
393 return self._nodemap
390
394
391 @util.propertycache
395 @util.propertycache
392 def _nodemap(self):
396 def _nodemap(self):
393 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
397 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
394 for r in range(0, len(self)):
398 for r in range(0, len(self)):
395 n = self[r][7]
399 n = self[r][7]
396 nodemap[n] = r
400 nodemap[n] = r
397 return nodemap
401 return nodemap
398
402
399 def has_node(self, node):
403 def has_node(self, node):
400 """return True if the node exist in the index"""
404 """return True if the node exist in the index"""
401 return node in self._nodemap
405 return node in self._nodemap
402
406
403 def rev(self, node):
407 def rev(self, node):
404 """return a revision for a node
408 """return a revision for a node
405
409
406 If the node is unknown, raise a RevlogError"""
410 If the node is unknown, raise a RevlogError"""
407 return self._nodemap[node]
411 return self._nodemap[node]
408
412
409 def get_rev(self, node):
413 def get_rev(self, node):
410 """return a revision for a node
414 """return a revision for a node
411
415
412 If the node is unknown, return None"""
416 If the node is unknown, return None"""
413 return self._nodemap.get(node)
417 return self._nodemap.get(node)
414
418
415 def _stripnodes(self, start):
419 def _stripnodes(self, start):
416 if '_nodemap' in vars(self):
420 if '_nodemap' in vars(self):
417 for r in range(start, len(self)):
421 for r in range(start, len(self)):
418 n = self[r][7]
422 n = self[r][7]
419 del self._nodemap[n]
423 del self._nodemap[n]
420
424
421 def clearcaches(self):
425 def clearcaches(self):
422 self.__dict__.pop('_nodemap', None)
426 self.__dict__.pop('_nodemap', None)
423
427
424 def __len__(self):
428 def __len__(self):
425 return self._lgt + len(self._extra)
429 return self._lgt + len(self._extra)
426
430
427 def append(self, tup):
431 def append(self, tup):
428 if '_nodemap' in vars(self):
432 if '_nodemap' in vars(self):
429 self._nodemap[tup[7]] = len(self)
433 self._nodemap[tup[7]] = len(self)
430 data = self._pack_entry(len(self), tup)
434 data = self._pack_entry(len(self), tup)
431 self._extra.append(data)
435 self._extra.append(data)
432
436
433 def _pack_entry(self, rev, entry):
437 def _pack_entry(self, rev, entry):
434 assert entry[8] == 0
438 assert entry[8] == 0
435 assert entry[9] == 0
439 assert entry[9] == 0
436 return self.index_format.pack(*entry[:8])
440 return self.index_format.pack(*entry[:8])
437
441
438 def _check_index(self, i):
442 def _check_index(self, i):
439 if not isinstance(i, int):
443 if not isinstance(i, int):
440 raise TypeError(b"expecting int indexes")
444 raise TypeError(b"expecting int indexes")
441 if i < 0 or i >= len(self):
445 if i < 0 or i >= len(self):
442 raise IndexError
446 raise IndexError
443
447
444 def __getitem__(self, i):
448 def __getitem__(self, i):
445 if i == -1:
449 if i == -1:
446 return self.null_item
450 return self.null_item
447 self._check_index(i)
451 self._check_index(i)
448 if i >= self._lgt:
452 if i >= self._lgt:
449 data = self._extra[i - self._lgt]
453 data = self._extra[i - self._lgt]
450 else:
454 else:
451 index = self._calculate_index(i)
455 index = self._calculate_index(i)
452 data = self._data[index : index + self.entry_size]
456 data = self._data[index : index + self.entry_size]
453 r = self._unpack_entry(i, data)
457 r = self._unpack_entry(i, data)
454 if self._lgt and i == 0:
458 if self._lgt and i == 0:
455 offset = revlogutils.offset_type(0, gettype(r[0]))
459 offset = revlogutils.offset_type(0, gettype(r[0]))
456 r = (offset,) + r[1:]
460 r = (offset,) + r[1:]
457 return r
461 return r
458
462
459 def _unpack_entry(self, rev, data):
463 def _unpack_entry(self, rev, data):
460 r = self.index_format.unpack(data)
464 r = self.index_format.unpack(data)
461 r = r + (
465 r = r + (
462 0,
466 0,
463 0,
467 0,
464 revlog_constants.COMP_MODE_INLINE,
468 revlog_constants.COMP_MODE_INLINE,
465 revlog_constants.COMP_MODE_INLINE,
469 revlog_constants.COMP_MODE_INLINE,
466 )
470 )
467 return r
471 return r
468
472
469 def pack_header(self, header):
473 def pack_header(self, header):
470 """pack header information as binary"""
474 """pack header information as binary"""
471 v_fmt = revlog_constants.INDEX_HEADER
475 v_fmt = revlog_constants.INDEX_HEADER
472 return v_fmt.pack(header)
476 return v_fmt.pack(header)
473
477
474 def entry_binary(self, rev):
478 def entry_binary(self, rev):
475 """return the raw binary string representing a revision"""
479 """return the raw binary string representing a revision"""
476 entry = self[rev]
480 entry = self[rev]
477 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
481 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
478 if rev == 0:
482 if rev == 0:
479 p = p[revlog_constants.INDEX_HEADER.size :]
483 p = p[revlog_constants.INDEX_HEADER.size :]
480 return p
484 return p
481
485
482
486
483 class IndexObject(BaseIndexObject):
487 class IndexObject(BaseIndexObject):
484 def __init__(self, data):
488 def __init__(self, data):
485 assert len(data) % self.entry_size == 0, (
489 assert len(data) % self.entry_size == 0, (
486 len(data),
490 len(data),
487 self.entry_size,
491 self.entry_size,
488 len(data) % self.entry_size,
492 len(data) % self.entry_size,
489 )
493 )
490 self._data = data
494 self._data = data
491 self._lgt = len(data) // self.entry_size
495 self._lgt = len(data) // self.entry_size
492 self._extra = []
496 self._extra = []
493
497
494 def _calculate_index(self, i):
498 def _calculate_index(self, i):
495 return i * self.entry_size
499 return i * self.entry_size
496
500
497 def __delitem__(self, i):
501 def __delitem__(self, i):
498 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
502 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
499 raise ValueError(b"deleting slices only supports a:-1 with step 1")
503 raise ValueError(b"deleting slices only supports a:-1 with step 1")
500 i = i.start
504 i = i.start
501 self._check_index(i)
505 self._check_index(i)
502 self._stripnodes(i)
506 self._stripnodes(i)
503 if i < self._lgt:
507 if i < self._lgt:
504 self._data = self._data[: i * self.entry_size]
508 self._data = self._data[: i * self.entry_size]
505 self._lgt = i
509 self._lgt = i
506 self._extra = []
510 self._extra = []
507 else:
511 else:
508 self._extra = self._extra[: i - self._lgt]
512 self._extra = self._extra[: i - self._lgt]
509
513
510
514
511 class PersistentNodeMapIndexObject(IndexObject):
515 class PersistentNodeMapIndexObject(IndexObject):
512 """a Debug oriented class to test persistent nodemap
516 """a Debug oriented class to test persistent nodemap
513
517
514 We need a simple python object to test API and higher level behavior. See
518 We need a simple python object to test API and higher level behavior. See
515 the Rust implementation for more serious usage. This should be used only
519 the Rust implementation for more serious usage. This should be used only
516 through the dedicated `devel.persistent-nodemap` config.
520 through the dedicated `devel.persistent-nodemap` config.
517 """
521 """
518
522
519 def nodemap_data_all(self):
523 def nodemap_data_all(self):
520 """Return bytes containing a full serialization of a nodemap
524 """Return bytes containing a full serialization of a nodemap
521
525
522 The nodemap should be valid for the full set of revisions in the
526 The nodemap should be valid for the full set of revisions in the
523 index."""
527 index."""
524 return nodemaputil.persistent_data(self)
528 return nodemaputil.persistent_data(self)
525
529
526 def nodemap_data_incremental(self):
530 def nodemap_data_incremental(self):
527 """Return bytes containing a incremental update to persistent nodemap
531 """Return bytes containing a incremental update to persistent nodemap
528
532
529 This containst the data for an append-only update of the data provided
533 This containst the data for an append-only update of the data provided
530 in the last call to `update_nodemap_data`.
534 in the last call to `update_nodemap_data`.
531 """
535 """
532 if self._nm_root is None:
536 if self._nm_root is None:
533 return None
537 return None
534 docket = self._nm_docket
538 docket = self._nm_docket
535 changed, data = nodemaputil.update_persistent_data(
539 changed, data = nodemaputil.update_persistent_data(
536 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
540 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
537 )
541 )
538
542
539 self._nm_root = self._nm_max_idx = self._nm_docket = None
543 self._nm_root = self._nm_max_idx = self._nm_docket = None
540 return docket, changed, data
544 return docket, changed, data
541
545
542 def update_nodemap_data(self, docket, nm_data):
546 def update_nodemap_data(self, docket, nm_data):
543 """provide full block of persisted binary data for a nodemap
547 """provide full block of persisted binary data for a nodemap
544
548
545 The data are expected to come from disk. See `nodemap_data_all` for a
549 The data are expected to come from disk. See `nodemap_data_all` for a
546 produceur of such data."""
550 produceur of such data."""
547 if nm_data is not None:
551 if nm_data is not None:
548 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
552 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
549 if self._nm_root:
553 if self._nm_root:
550 self._nm_docket = docket
554 self._nm_docket = docket
551 else:
555 else:
552 self._nm_root = self._nm_max_idx = self._nm_docket = None
556 self._nm_root = self._nm_max_idx = self._nm_docket = None
553
557
554
558
555 class InlinedIndexObject(BaseIndexObject):
559 class InlinedIndexObject(BaseIndexObject):
556 def __init__(self, data, inline=0):
560 def __init__(self, data, inline=0):
557 self._data = data
561 self._data = data
558 self._lgt = self._inline_scan(None)
562 self._lgt = self._inline_scan(None)
559 self._inline_scan(self._lgt)
563 self._inline_scan(self._lgt)
560 self._extra = []
564 self._extra = []
561
565
562 def _inline_scan(self, lgt):
566 def _inline_scan(self, lgt):
563 off = 0
567 off = 0
564 if lgt is not None:
568 if lgt is not None:
565 self._offsets = [0] * lgt
569 self._offsets = [0] * lgt
566 count = 0
570 count = 0
567 while off <= len(self._data) - self.entry_size:
571 while off <= len(self._data) - self.entry_size:
568 start = off + self.big_int_size
572 start = off + self.big_int_size
569 (s,) = struct.unpack(
573 (s,) = struct.unpack(
570 b'>i',
574 b'>i',
571 self._data[start : start + self.int_size],
575 self._data[start : start + self.int_size],
572 )
576 )
573 if lgt is not None:
577 if lgt is not None:
574 self._offsets[count] = off
578 self._offsets[count] = off
575 count += 1
579 count += 1
576 off += self.entry_size + s
580 off += self.entry_size + s
577 if off != len(self._data):
581 if off != len(self._data):
578 raise ValueError(b"corrupted data")
582 raise ValueError(b"corrupted data")
579 return count
583 return count
580
584
581 def __delitem__(self, i):
585 def __delitem__(self, i):
582 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
586 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
583 raise ValueError(b"deleting slices only supports a:-1 with step 1")
587 raise ValueError(b"deleting slices only supports a:-1 with step 1")
584 i = i.start
588 i = i.start
585 self._check_index(i)
589 self._check_index(i)
586 self._stripnodes(i)
590 self._stripnodes(i)
587 if i < self._lgt:
591 if i < self._lgt:
588 self._offsets = self._offsets[:i]
592 self._offsets = self._offsets[:i]
589 self._lgt = i
593 self._lgt = i
590 self._extra = []
594 self._extra = []
591 else:
595 else:
592 self._extra = self._extra[: i - self._lgt]
596 self._extra = self._extra[: i - self._lgt]
593
597
594 def _calculate_index(self, i):
598 def _calculate_index(self, i):
595 return self._offsets[i]
599 return self._offsets[i]
596
600
597
601
598 def parse_index2(data, inline, revlogv2=False):
602 def parse_index2(data, inline, revlogv2=False):
599 if not inline:
603 if not inline:
600 cls = IndexObject2 if revlogv2 else IndexObject
604 cls = IndexObject2 if revlogv2 else IndexObject
601 return cls(data), None
605 return cls(data), None
602 cls = InlinedIndexObject
606 cls = InlinedIndexObject
603 return cls(data, inline), (0, data)
607 return cls(data, inline), (0, data)
604
608
605
609
606 def parse_index_cl_v2(data):
610 def parse_index_cl_v2(data):
607 return IndexChangelogV2(data), None
611 return IndexChangelogV2(data), None
608
612
609
613
610 class IndexObject2(IndexObject):
614 class IndexObject2(IndexObject):
611 index_format = revlog_constants.INDEX_ENTRY_V2
615 index_format = revlog_constants.INDEX_ENTRY_V2
612
616
613 def replace_sidedata_info(
617 def replace_sidedata_info(
614 self,
618 self,
615 rev,
619 rev,
616 sidedata_offset,
620 sidedata_offset,
617 sidedata_length,
621 sidedata_length,
618 offset_flags,
622 offset_flags,
619 compression_mode,
623 compression_mode,
620 ):
624 ):
621 """
625 """
622 Replace an existing index entry's sidedata offset and length with new
626 Replace an existing index entry's sidedata offset and length with new
623 ones.
627 ones.
624 This cannot be used outside of the context of sidedata rewriting,
628 This cannot be used outside of the context of sidedata rewriting,
625 inside the transaction that creates the revision `rev`.
629 inside the transaction that creates the revision `rev`.
626 """
630 """
627 if rev < 0:
631 if rev < 0:
628 raise KeyError
632 raise KeyError
629 self._check_index(rev)
633 self._check_index(rev)
630 if rev < self._lgt:
634 if rev < self._lgt:
631 msg = b"cannot rewrite entries outside of this transaction"
635 msg = b"cannot rewrite entries outside of this transaction"
632 raise KeyError(msg)
636 raise KeyError(msg)
633 else:
637 else:
634 entry = list(self[rev])
638 entry = list(self[rev])
635 entry[0] = offset_flags
639 entry[0] = offset_flags
636 entry[8] = sidedata_offset
640 entry[8] = sidedata_offset
637 entry[9] = sidedata_length
641 entry[9] = sidedata_length
638 entry[11] = compression_mode
642 entry[11] = compression_mode
639 entry = tuple(entry)
643 entry = tuple(entry)
640 new = self._pack_entry(rev, entry)
644 new = self._pack_entry(rev, entry)
641 self._extra[rev - self._lgt] = new
645 self._extra[rev - self._lgt] = new
642
646
643 def _unpack_entry(self, rev, data):
647 def _unpack_entry(self, rev, data):
644 data = self.index_format.unpack(data)
648 data = self.index_format.unpack(data)
645 entry = data[:10]
649 entry = data[:10]
646 data_comp = data[10] & 3
650 data_comp = data[10] & 3
647 sidedata_comp = (data[10] & (3 << 2)) >> 2
651 sidedata_comp = (data[10] & (3 << 2)) >> 2
648 return entry + (data_comp, sidedata_comp)
652 return entry + (data_comp, sidedata_comp)
649
653
650 def _pack_entry(self, rev, entry):
654 def _pack_entry(self, rev, entry):
651 data = entry[:10]
655 data = entry[:10]
652 data_comp = entry[10] & 3
656 data_comp = entry[10] & 3
653 sidedata_comp = (entry[11] & 3) << 2
657 sidedata_comp = (entry[11] & 3) << 2
654 data += (data_comp | sidedata_comp,)
658 data += (data_comp | sidedata_comp,)
655
659
656 return self.index_format.pack(*data)
660 return self.index_format.pack(*data)
657
661
658 def entry_binary(self, rev):
662 def entry_binary(self, rev):
659 """return the raw binary string representing a revision"""
663 """return the raw binary string representing a revision"""
660 entry = self[rev]
664 entry = self[rev]
661 return self._pack_entry(rev, entry)
665 return self._pack_entry(rev, entry)
662
666
663 def pack_header(self, header):
667 def pack_header(self, header):
664 """pack header information as binary"""
668 """pack header information as binary"""
665 msg = 'version header should go in the docket, not the index: %d'
669 msg = 'version header should go in the docket, not the index: %d'
666 msg %= header
670 msg %= header
667 raise error.ProgrammingError(msg)
671 raise error.ProgrammingError(msg)
668
672
669
673
670 class IndexChangelogV2(IndexObject2):
674 class IndexChangelogV2(IndexObject2):
671 index_format = revlog_constants.INDEX_ENTRY_CL_V2
675 index_format = revlog_constants.INDEX_ENTRY_CL_V2
672
676
673 def _unpack_entry(self, rev, data, r=True):
677 def _unpack_entry(self, rev, data, r=True):
674 items = self.index_format.unpack(data)
678 items = self.index_format.unpack(data)
675 entry = items[:3] + (rev, rev) + items[3:8]
679 entry = items[:3] + (rev, rev) + items[3:8]
676 data_comp = items[8] & 3
680 data_comp = items[8] & 3
677 sidedata_comp = (items[8] >> 2) & 3
681 sidedata_comp = (items[8] >> 2) & 3
678 return entry + (data_comp, sidedata_comp)
682 return entry + (data_comp, sidedata_comp)
679
683
680 def _pack_entry(self, rev, entry):
684 def _pack_entry(self, rev, entry):
681 assert entry[3] == rev, entry[3]
685 assert entry[3] == rev, entry[3]
682 assert entry[4] == rev, entry[4]
686 assert entry[4] == rev, entry[4]
683 data = entry[:3] + entry[5:10]
687 data = entry[:3] + entry[5:10]
684 data_comp = entry[10] & 3
688 data_comp = entry[10] & 3
685 sidedata_comp = (entry[11] & 3) << 2
689 sidedata_comp = (entry[11] & 3) << 2
686 data += (data_comp | sidedata_comp,)
690 data += (data_comp | sidedata_comp,)
687 return self.index_format.pack(*data)
691 return self.index_format.pack(*data)
688
692
689
693
690 def parse_index_devel_nodemap(data, inline):
694 def parse_index_devel_nodemap(data, inline):
691 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
695 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
692 return PersistentNodeMapIndexObject(data), None
696 return PersistentNodeMapIndexObject(data), None
693
697
694
698
695 def parse_dirstate(dmap, copymap, st):
699 def parse_dirstate(dmap, copymap, st):
696 parents = [st[:20], st[20:40]]
700 parents = [st[:20], st[20:40]]
697 # dereference fields so they will be local in loop
701 # dereference fields so they will be local in loop
698 format = b">cllll"
702 format = b">cllll"
699 e_size = struct.calcsize(format)
703 e_size = struct.calcsize(format)
700 pos1 = 40
704 pos1 = 40
701 l = len(st)
705 l = len(st)
702
706
703 # the inner loop
707 # the inner loop
704 while pos1 < l:
708 while pos1 < l:
705 pos2 = pos1 + e_size
709 pos2 = pos1 + e_size
706 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
710 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
707 pos1 = pos2 + e[4]
711 pos1 = pos2 + e[4]
708 f = st[pos2:pos1]
712 f = st[pos2:pos1]
709 if b'\0' in f:
713 if b'\0' in f:
710 f, c = f.split(b'\0')
714 f, c = f.split(b'\0')
711 copymap[f] = c
715 copymap[f] = c
712 dmap[f] = DirstateItem.from_v1_data(*e[:4])
716 dmap[f] = DirstateItem.from_v1_data(*e[:4])
713 return parents
717 return parents
714
718
715
719
716 def pack_dirstate(dmap, copymap, pl, now):
720 def pack_dirstate(dmap, copymap, pl, now):
717 now = int(now)
721 now = int(now)
718 cs = stringio()
722 cs = stringio()
719 write = cs.write
723 write = cs.write
720 write(b"".join(pl))
724 write(b"".join(pl))
721 for f, e in pycompat.iteritems(dmap):
725 for f, e in pycompat.iteritems(dmap):
722 if e.need_delay(now):
726 if e.need_delay(now):
723 # The file was last modified "simultaneously" with the current
727 # The file was last modified "simultaneously" with the current
724 # write to dirstate (i.e. within the same second for file-
728 # write to dirstate (i.e. within the same second for file-
725 # systems with a granularity of 1 sec). This commonly happens
729 # systems with a granularity of 1 sec). This commonly happens
726 # for at least a couple of files on 'update'.
730 # for at least a couple of files on 'update'.
727 # The user could change the file without changing its size
731 # The user could change the file without changing its size
728 # within the same second. Invalidate the file's mtime in
732 # within the same second. Invalidate the file's mtime in
729 # dirstate, forcing future 'status' calls to compare the
733 # dirstate, forcing future 'status' calls to compare the
730 # contents of the file if the size is the same. This prevents
734 # contents of the file if the size is the same. This prevents
731 # mistakenly treating such files as clean.
735 # mistakenly treating such files as clean.
732 e.set_possibly_dirty()
736 e.set_possibly_dirty()
733
737
734 if f in copymap:
738 if f in copymap:
735 f = b"%s\0%s" % (f, copymap[f])
739 f = b"%s\0%s" % (f, copymap[f])
736 e = _pack(
740 e = _pack(
737 b">cllll",
741 b">cllll",
738 e.v1_state(),
742 e.v1_state(),
739 e.v1_mode(),
743 e.v1_mode(),
740 e.v1_size(),
744 e.v1_size(),
741 e.v1_mtime(),
745 e.v1_mtime(),
742 len(f),
746 len(f),
743 )
747 )
744 write(e)
748 write(e)
745 write(f)
749 write(f)
746 return cs.getvalue()
750 return cs.getvalue()
@@ -1,410 +1,414 b''
1 use crate::errors::HgError;
1 use crate::errors::HgError;
2 use bitflags::bitflags;
2 use bitflags::bitflags;
3 use std::convert::TryFrom;
3 use std::convert::TryFrom;
4
4
5 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
5 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
6 pub enum EntryState {
6 pub enum EntryState {
7 Normal,
7 Normal,
8 Added,
8 Added,
9 Removed,
9 Removed,
10 Merged,
10 Merged,
11 }
11 }
12
12
13 /// The C implementation uses all signed types. This will be an issue
13 /// The C implementation uses all signed types. This will be an issue
14 /// either when 4GB+ source files are commonplace or in 2038, whichever
14 /// either when 4GB+ source files are commonplace or in 2038, whichever
15 /// comes first.
15 /// comes first.
16 #[derive(Debug, PartialEq, Copy, Clone)]
16 #[derive(Debug, PartialEq, Copy, Clone)]
17 pub struct DirstateEntry {
17 pub struct DirstateEntry {
18 pub(crate) flags: Flags,
18 pub(crate) flags: Flags,
19 mode_size: Option<(i32, i32)>,
19 mode_size: Option<(i32, i32)>,
20 mtime: Option<i32>,
20 mtime: Option<i32>,
21 }
21 }
22
22
23 bitflags! {
23 bitflags! {
24 pub(crate) struct Flags: u8 {
24 pub(crate) struct Flags: u8 {
25 const WDIR_TRACKED = 1 << 0;
25 const WDIR_TRACKED = 1 << 0;
26 const P1_TRACKED = 1 << 1;
26 const P1_TRACKED = 1 << 1;
27 const P2_INFO = 1 << 2;
27 const P2_INFO = 1 << 2;
28 }
28 }
29 }
29 }
30
30
31 pub const V1_RANGEMASK: i32 = 0x7FFFFFFF;
31 pub const V1_RANGEMASK: i32 = 0x7FFFFFFF;
32
32
33 pub const MTIME_UNSET: i32 = -1;
33 pub const MTIME_UNSET: i32 = -1;
34
34
35 /// A `DirstateEntry` with a size of `-2` means that it was merged from the
35 /// A `DirstateEntry` with a size of `-2` means that it was merged from the
36 /// other parent. This allows revert to pick the right status back during a
36 /// other parent. This allows revert to pick the right status back during a
37 /// merge.
37 /// merge.
38 pub const SIZE_FROM_OTHER_PARENT: i32 = -2;
38 pub const SIZE_FROM_OTHER_PARENT: i32 = -2;
39 /// A special value used for internal representation of special case in
39 /// A special value used for internal representation of special case in
40 /// dirstate v1 format.
40 /// dirstate v1 format.
41 pub const SIZE_NON_NORMAL: i32 = -1;
41 pub const SIZE_NON_NORMAL: i32 = -1;
42
42
43 impl DirstateEntry {
43 impl DirstateEntry {
44 pub fn from_v2_data(
44 pub fn from_v2_data(
45 wdir_tracked: bool,
45 wdir_tracked: bool,
46 p1_tracked: bool,
46 p1_tracked: bool,
47 p2_info: bool,
47 p2_info: bool,
48 mode_size: Option<(i32, i32)>,
48 mode_size: Option<(i32, i32)>,
49 mtime: Option<i32>,
49 mtime: Option<i32>,
50 ) -> Self {
50 ) -> Self {
51 let mut flags = Flags::empty();
51 let mut flags = Flags::empty();
52 flags.set(Flags::WDIR_TRACKED, wdir_tracked);
52 flags.set(Flags::WDIR_TRACKED, wdir_tracked);
53 flags.set(Flags::P1_TRACKED, p1_tracked);
53 flags.set(Flags::P1_TRACKED, p1_tracked);
54 flags.set(Flags::P2_INFO, p2_info);
54 flags.set(Flags::P2_INFO, p2_info);
55 Self {
55 Self {
56 flags,
56 flags,
57 mode_size,
57 mode_size,
58 mtime,
58 mtime,
59 }
59 }
60 }
60 }
61
61
62 pub fn from_v1_data(
62 pub fn from_v1_data(
63 state: EntryState,
63 state: EntryState,
64 mode: i32,
64 mode: i32,
65 size: i32,
65 size: i32,
66 mtime: i32,
66 mtime: i32,
67 ) -> Self {
67 ) -> Self {
68 match state {
68 match state {
69 EntryState::Normal => {
69 EntryState::Normal => {
70 if size == SIZE_FROM_OTHER_PARENT {
70 if size == SIZE_FROM_OTHER_PARENT {
71 Self {
71 Self {
72 // might be missing P1_TRACKED
72 // might be missing P1_TRACKED
73 flags: Flags::WDIR_TRACKED | Flags::P2_INFO,
73 flags: Flags::WDIR_TRACKED | Flags::P2_INFO,
74 mode_size: None,
74 mode_size: None,
75 mtime: None,
75 mtime: None,
76 }
76 }
77 } else if size == SIZE_NON_NORMAL {
77 } else if size == SIZE_NON_NORMAL {
78 Self {
78 Self {
79 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
79 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
80 mode_size: None,
80 mode_size: None,
81 mtime: None,
81 mtime: None,
82 }
82 }
83 } else if mtime == MTIME_UNSET {
83 } else if mtime == MTIME_UNSET {
84 Self {
84 Self {
85 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
85 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
86 mode_size: Some((mode, size)),
86 mode_size: Some((mode, size)),
87 mtime: None,
87 mtime: None,
88 }
88 }
89 } else {
89 } else {
90 Self::new_normal(mode, size, mtime)
90 Self {
91 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
92 mode_size: Some((mode, size)),
93 mtime: Some(mtime),
94 }
91 }
95 }
92 }
96 }
93 EntryState::Added => Self {
97 EntryState::Added => Self {
94 flags: Flags::WDIR_TRACKED,
98 flags: Flags::WDIR_TRACKED,
95 mode_size: None,
99 mode_size: None,
96 mtime: None,
100 mtime: None,
97 },
101 },
98 EntryState::Removed => Self {
102 EntryState::Removed => Self {
99 flags: if size == SIZE_NON_NORMAL {
103 flags: if size == SIZE_NON_NORMAL {
100 Flags::P1_TRACKED | Flags::P2_INFO
104 Flags::P1_TRACKED | Flags::P2_INFO
101 } else if size == SIZE_FROM_OTHER_PARENT {
105 } else if size == SIZE_FROM_OTHER_PARENT {
102 // We don’t know if P1_TRACKED should be set (file history)
106 // We don’t know if P1_TRACKED should be set (file history)
103 Flags::P2_INFO
107 Flags::P2_INFO
104 } else {
108 } else {
105 Flags::P1_TRACKED
109 Flags::P1_TRACKED
106 },
110 },
107 mode_size: None,
111 mode_size: None,
108 mtime: None,
112 mtime: None,
109 },
113 },
110 EntryState::Merged => Self {
114 EntryState::Merged => Self {
111 flags: Flags::WDIR_TRACKED
115 flags: Flags::WDIR_TRACKED
112 | Flags::P1_TRACKED // might not be true because of rename ?
116 | Flags::P1_TRACKED // might not be true because of rename ?
113 | Flags::P2_INFO, // might not be true because of rename ?
117 | Flags::P2_INFO, // might not be true because of rename ?
114 mode_size: None,
118 mode_size: None,
115 mtime: None,
119 mtime: None,
116 },
120 },
117 }
121 }
118 }
122 }
119
123
120 pub fn new_normal(mode: i32, size: i32, mtime: i32) -> Self {
124 pub fn new_normal(mode: i32, size: i32, mtime: i32) -> Self {
121 Self {
125 Self {
122 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
126 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
123 mode_size: Some((mode, size)),
127 mode_size: Some((mode, size)),
124 mtime: Some(mtime),
128 mtime: Some(mtime),
125 }
129 }
126 }
130 }
127
131
128 /// Creates a new entry in "removed" state.
132 /// Creates a new entry in "removed" state.
129 ///
133 ///
130 /// `size` is expected to be zero, `SIZE_NON_NORMAL`, or
134 /// `size` is expected to be zero, `SIZE_NON_NORMAL`, or
131 /// `SIZE_FROM_OTHER_PARENT`
135 /// `SIZE_FROM_OTHER_PARENT`
132 pub fn new_removed(size: i32) -> Self {
136 pub fn new_removed(size: i32) -> Self {
133 Self::from_v1_data(EntryState::Removed, 0, size, 0)
137 Self::from_v1_data(EntryState::Removed, 0, size, 0)
134 }
138 }
135
139
136 pub fn tracked(&self) -> bool {
140 pub fn tracked(&self) -> bool {
137 self.flags.contains(Flags::WDIR_TRACKED)
141 self.flags.contains(Flags::WDIR_TRACKED)
138 }
142 }
139
143
140 pub fn p1_tracked(&self) -> bool {
144 pub fn p1_tracked(&self) -> bool {
141 self.flags.contains(Flags::P1_TRACKED)
145 self.flags.contains(Flags::P1_TRACKED)
142 }
146 }
143
147
144 fn in_either_parent(&self) -> bool {
148 fn in_either_parent(&self) -> bool {
145 self.flags.intersects(Flags::P1_TRACKED | Flags::P2_INFO)
149 self.flags.intersects(Flags::P1_TRACKED | Flags::P2_INFO)
146 }
150 }
147
151
148 pub fn removed(&self) -> bool {
152 pub fn removed(&self) -> bool {
149 self.in_either_parent() && !self.flags.contains(Flags::WDIR_TRACKED)
153 self.in_either_parent() && !self.flags.contains(Flags::WDIR_TRACKED)
150 }
154 }
151
155
152 pub fn p2_info(&self) -> bool {
156 pub fn p2_info(&self) -> bool {
153 self.flags.contains(Flags::WDIR_TRACKED | Flags::P2_INFO)
157 self.flags.contains(Flags::WDIR_TRACKED | Flags::P2_INFO)
154 }
158 }
155
159
156 pub fn added(&self) -> bool {
160 pub fn added(&self) -> bool {
157 self.flags.contains(Flags::WDIR_TRACKED) && !self.in_either_parent()
161 self.flags.contains(Flags::WDIR_TRACKED) && !self.in_either_parent()
158 }
162 }
159
163
160 pub fn maybe_clean(&self) -> bool {
164 pub fn maybe_clean(&self) -> bool {
161 if !self.flags.contains(Flags::WDIR_TRACKED) {
165 if !self.flags.contains(Flags::WDIR_TRACKED) {
162 false
166 false
163 } else if !self.flags.contains(Flags::P1_TRACKED) {
167 } else if !self.flags.contains(Flags::P1_TRACKED) {
164 false
168 false
165 } else if self.flags.contains(Flags::P2_INFO) {
169 } else if self.flags.contains(Flags::P2_INFO) {
166 false
170 false
167 } else {
171 } else {
168 true
172 true
169 }
173 }
170 }
174 }
171
175
172 pub fn any_tracked(&self) -> bool {
176 pub fn any_tracked(&self) -> bool {
173 self.flags.intersects(
177 self.flags.intersects(
174 Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO,
178 Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO,
175 )
179 )
176 }
180 }
177
181
178 /// Returns `(wdir_tracked, p1_tracked, p2_info, mode_size, mtime)`
182 /// Returns `(wdir_tracked, p1_tracked, p2_info, mode_size, mtime)`
179 pub(crate) fn v2_data(
183 pub(crate) fn v2_data(
180 &self,
184 &self,
181 ) -> (bool, bool, bool, Option<(i32, i32)>, Option<i32>) {
185 ) -> (bool, bool, bool, Option<(i32, i32)>, Option<i32>) {
182 if !self.any_tracked() {
186 if !self.any_tracked() {
183 // TODO: return an Option instead?
187 // TODO: return an Option instead?
184 panic!("Accessing v1_state of an untracked DirstateEntry")
188 panic!("Accessing v1_state of an untracked DirstateEntry")
185 }
189 }
186 let wdir_tracked = self.flags.contains(Flags::WDIR_TRACKED);
190 let wdir_tracked = self.flags.contains(Flags::WDIR_TRACKED);
187 let p1_tracked = self.flags.contains(Flags::P1_TRACKED);
191 let p1_tracked = self.flags.contains(Flags::P1_TRACKED);
188 let p2_info = self.flags.contains(Flags::P2_INFO);
192 let p2_info = self.flags.contains(Flags::P2_INFO);
189 let mode_size = self.mode_size;
193 let mode_size = self.mode_size;
190 let mtime = self.mtime;
194 let mtime = self.mtime;
191 (wdir_tracked, p1_tracked, p2_info, mode_size, mtime)
195 (wdir_tracked, p1_tracked, p2_info, mode_size, mtime)
192 }
196 }
193
197
194 fn v1_state(&self) -> EntryState {
198 fn v1_state(&self) -> EntryState {
195 if !self.any_tracked() {
199 if !self.any_tracked() {
196 // TODO: return an Option instead?
200 // TODO: return an Option instead?
197 panic!("Accessing v1_state of an untracked DirstateEntry")
201 panic!("Accessing v1_state of an untracked DirstateEntry")
198 }
202 }
199 if self.removed() {
203 if self.removed() {
200 EntryState::Removed
204 EntryState::Removed
201 } else if self
205 } else if self
202 .flags
206 .flags
203 .contains(Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO)
207 .contains(Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO)
204 {
208 {
205 EntryState::Merged
209 EntryState::Merged
206 } else if self.added() {
210 } else if self.added() {
207 EntryState::Added
211 EntryState::Added
208 } else {
212 } else {
209 EntryState::Normal
213 EntryState::Normal
210 }
214 }
211 }
215 }
212
216
213 fn v1_mode(&self) -> i32 {
217 fn v1_mode(&self) -> i32 {
214 if let Some((mode, _size)) = self.mode_size {
218 if let Some((mode, _size)) = self.mode_size {
215 mode
219 mode
216 } else {
220 } else {
217 0
221 0
218 }
222 }
219 }
223 }
220
224
221 fn v1_size(&self) -> i32 {
225 fn v1_size(&self) -> i32 {
222 if !self.any_tracked() {
226 if !self.any_tracked() {
223 // TODO: return an Option instead?
227 // TODO: return an Option instead?
224 panic!("Accessing v1_size of an untracked DirstateEntry")
228 panic!("Accessing v1_size of an untracked DirstateEntry")
225 }
229 }
226 if self.removed()
230 if self.removed()
227 && self.flags.contains(Flags::P1_TRACKED | Flags::P2_INFO)
231 && self.flags.contains(Flags::P1_TRACKED | Flags::P2_INFO)
228 {
232 {
229 SIZE_NON_NORMAL
233 SIZE_NON_NORMAL
230 } else if self.flags.contains(Flags::P2_INFO) {
234 } else if self.flags.contains(Flags::P2_INFO) {
231 SIZE_FROM_OTHER_PARENT
235 SIZE_FROM_OTHER_PARENT
232 } else if self.removed() {
236 } else if self.removed() {
233 0
237 0
234 } else if self.added() {
238 } else if self.added() {
235 SIZE_NON_NORMAL
239 SIZE_NON_NORMAL
236 } else if let Some((_mode, size)) = self.mode_size {
240 } else if let Some((_mode, size)) = self.mode_size {
237 size
241 size
238 } else {
242 } else {
239 SIZE_NON_NORMAL
243 SIZE_NON_NORMAL
240 }
244 }
241 }
245 }
242
246
243 fn v1_mtime(&self) -> i32 {
247 fn v1_mtime(&self) -> i32 {
244 if !self.any_tracked() {
248 if !self.any_tracked() {
245 // TODO: return an Option instead?
249 // TODO: return an Option instead?
246 panic!("Accessing v1_mtime of an untracked DirstateEntry")
250 panic!("Accessing v1_mtime of an untracked DirstateEntry")
247 }
251 }
248 if self.removed() {
252 if self.removed() {
249 0
253 0
250 } else if self.flags.contains(Flags::P2_INFO) {
254 } else if self.flags.contains(Flags::P2_INFO) {
251 MTIME_UNSET
255 MTIME_UNSET
252 } else if !self.flags.contains(Flags::P1_TRACKED) {
256 } else if !self.flags.contains(Flags::P1_TRACKED) {
253 MTIME_UNSET
257 MTIME_UNSET
254 } else {
258 } else {
255 self.mtime.unwrap_or(MTIME_UNSET)
259 self.mtime.unwrap_or(MTIME_UNSET)
256 }
260 }
257 }
261 }
258
262
259 // TODO: return `Option<EntryState>`? None when `!self.any_tracked`
263 // TODO: return `Option<EntryState>`? None when `!self.any_tracked`
260 pub fn state(&self) -> EntryState {
264 pub fn state(&self) -> EntryState {
261 self.v1_state()
265 self.v1_state()
262 }
266 }
263
267
264 // TODO: return Option?
268 // TODO: return Option?
265 pub fn mode(&self) -> i32 {
269 pub fn mode(&self) -> i32 {
266 self.v1_mode()
270 self.v1_mode()
267 }
271 }
268
272
269 // TODO: return Option?
273 // TODO: return Option?
270 pub fn size(&self) -> i32 {
274 pub fn size(&self) -> i32 {
271 self.v1_size()
275 self.v1_size()
272 }
276 }
273
277
274 // TODO: return Option?
278 // TODO: return Option?
275 pub fn mtime(&self) -> i32 {
279 pub fn mtime(&self) -> i32 {
276 self.v1_mtime()
280 self.v1_mtime()
277 }
281 }
278
282
279 pub fn drop_merge_data(&mut self) {
283 pub fn drop_merge_data(&mut self) {
280 if self.flags.contains(Flags::P2_INFO) {
284 if self.flags.contains(Flags::P2_INFO) {
281 self.flags.remove(Flags::P2_INFO);
285 self.flags.remove(Flags::P2_INFO);
282 self.mode_size = None;
286 self.mode_size = None;
283 self.mtime = None;
287 self.mtime = None;
284 }
288 }
285 }
289 }
286
290
287 pub fn set_possibly_dirty(&mut self) {
291 pub fn set_possibly_dirty(&mut self) {
288 self.mtime = None
292 self.mtime = None
289 }
293 }
290
294
291 pub fn set_clean(&mut self, mode: i32, size: i32, mtime: i32) {
295 pub fn set_clean(&mut self, mode: i32, size: i32, mtime: i32) {
292 self.flags.insert(Flags::WDIR_TRACKED | Flags::P1_TRACKED);
296 self.flags.insert(Flags::WDIR_TRACKED | Flags::P1_TRACKED);
293 self.mode_size = Some((mode, size));
297 self.mode_size = Some((mode, size));
294 self.mtime = Some(mtime);
298 self.mtime = Some(mtime);
295 }
299 }
296
300
297 pub fn set_tracked(&mut self) {
301 pub fn set_tracked(&mut self) {
298 self.flags.insert(Flags::WDIR_TRACKED);
302 self.flags.insert(Flags::WDIR_TRACKED);
299 // `set_tracked` is replacing various `normallookup` call. So we mark
303 // `set_tracked` is replacing various `normallookup` call. So we mark
300 // the files as needing lookup
304 // the files as needing lookup
301 //
305 //
302 // Consider dropping this in the future in favor of something less
306 // Consider dropping this in the future in favor of something less
303 // broad.
307 // broad.
304 self.mtime = None;
308 self.mtime = None;
305 }
309 }
306
310
307 pub fn set_untracked(&mut self) {
311 pub fn set_untracked(&mut self) {
308 self.flags.remove(Flags::WDIR_TRACKED);
312 self.flags.remove(Flags::WDIR_TRACKED);
309 self.mode_size = None;
313 self.mode_size = None;
310 self.mtime = None;
314 self.mtime = None;
311 }
315 }
312
316
313 /// Returns `(state, mode, size, mtime)` for the puprose of serialization
317 /// Returns `(state, mode, size, mtime)` for the puprose of serialization
314 /// in the dirstate-v1 format.
318 /// in the dirstate-v1 format.
315 ///
319 ///
316 /// This includes marker values such as `mtime == -1`. In the future we may
320 /// This includes marker values such as `mtime == -1`. In the future we may
317 /// want to not represent these cases that way in memory, but serialization
321 /// want to not represent these cases that way in memory, but serialization
318 /// will need to keep the same format.
322 /// will need to keep the same format.
319 pub fn v1_data(&self) -> (u8, i32, i32, i32) {
323 pub fn v1_data(&self) -> (u8, i32, i32, i32) {
320 (
324 (
321 self.v1_state().into(),
325 self.v1_state().into(),
322 self.v1_mode(),
326 self.v1_mode(),
323 self.v1_size(),
327 self.v1_size(),
324 self.v1_mtime(),
328 self.v1_mtime(),
325 )
329 )
326 }
330 }
327
331
328 pub(crate) fn is_from_other_parent(&self) -> bool {
332 pub(crate) fn is_from_other_parent(&self) -> bool {
329 self.state() == EntryState::Normal
333 self.state() == EntryState::Normal
330 && self.size() == SIZE_FROM_OTHER_PARENT
334 && self.size() == SIZE_FROM_OTHER_PARENT
331 }
335 }
332
336
333 // TODO: other platforms
337 // TODO: other platforms
334 #[cfg(unix)]
338 #[cfg(unix)]
335 pub fn mode_changed(
339 pub fn mode_changed(
336 &self,
340 &self,
337 filesystem_metadata: &std::fs::Metadata,
341 filesystem_metadata: &std::fs::Metadata,
338 ) -> bool {
342 ) -> bool {
339 use std::os::unix::fs::MetadataExt;
343 use std::os::unix::fs::MetadataExt;
340 const EXEC_BIT_MASK: u32 = 0o100;
344 const EXEC_BIT_MASK: u32 = 0o100;
341 let dirstate_exec_bit = (self.mode() as u32) & EXEC_BIT_MASK;
345 let dirstate_exec_bit = (self.mode() as u32) & EXEC_BIT_MASK;
342 let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK;
346 let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK;
343 dirstate_exec_bit != fs_exec_bit
347 dirstate_exec_bit != fs_exec_bit
344 }
348 }
345
349
346 /// Returns a `(state, mode, size, mtime)` tuple as for
350 /// Returns a `(state, mode, size, mtime)` tuple as for
347 /// `DirstateMapMethods::debug_iter`.
351 /// `DirstateMapMethods::debug_iter`.
348 pub fn debug_tuple(&self) -> (u8, i32, i32, i32) {
352 pub fn debug_tuple(&self) -> (u8, i32, i32, i32) {
349 (self.state().into(), self.mode(), self.size(), self.mtime())
353 (self.state().into(), self.mode(), self.size(), self.mtime())
350 }
354 }
351
355
352 pub fn mtime_is_ambiguous(&self, now: i32) -> bool {
356 pub fn mtime_is_ambiguous(&self, now: i32) -> bool {
353 self.state() == EntryState::Normal && self.mtime() == now
357 self.state() == EntryState::Normal && self.mtime() == now
354 }
358 }
355
359
356 pub fn clear_ambiguous_mtime(&mut self, now: i32) -> bool {
360 pub fn clear_ambiguous_mtime(&mut self, now: i32) -> bool {
357 let ambiguous = self.mtime_is_ambiguous(now);
361 let ambiguous = self.mtime_is_ambiguous(now);
358 if ambiguous {
362 if ambiguous {
359 // The file was last modified "simultaneously" with the current
363 // The file was last modified "simultaneously" with the current
360 // write to dirstate (i.e. within the same second for file-
364 // write to dirstate (i.e. within the same second for file-
361 // systems with a granularity of 1 sec). This commonly happens
365 // systems with a granularity of 1 sec). This commonly happens
362 // for at least a couple of files on 'update'.
366 // for at least a couple of files on 'update'.
363 // The user could change the file without changing its size
367 // The user could change the file without changing its size
364 // within the same second. Invalidate the file's mtime in
368 // within the same second. Invalidate the file's mtime in
365 // dirstate, forcing future 'status' calls to compare the
369 // dirstate, forcing future 'status' calls to compare the
366 // contents of the file if the size is the same. This prevents
370 // contents of the file if the size is the same. This prevents
367 // mistakenly treating such files as clean.
371 // mistakenly treating such files as clean.
368 self.set_possibly_dirty()
372 self.set_possibly_dirty()
369 }
373 }
370 ambiguous
374 ambiguous
371 }
375 }
372 }
376 }
373
377
374 impl EntryState {
378 impl EntryState {
375 pub fn is_tracked(self) -> bool {
379 pub fn is_tracked(self) -> bool {
376 use EntryState::*;
380 use EntryState::*;
377 match self {
381 match self {
378 Normal | Added | Merged => true,
382 Normal | Added | Merged => true,
379 Removed => false,
383 Removed => false,
380 }
384 }
381 }
385 }
382 }
386 }
383
387
384 impl TryFrom<u8> for EntryState {
388 impl TryFrom<u8> for EntryState {
385 type Error = HgError;
389 type Error = HgError;
386
390
387 fn try_from(value: u8) -> Result<Self, Self::Error> {
391 fn try_from(value: u8) -> Result<Self, Self::Error> {
388 match value {
392 match value {
389 b'n' => Ok(EntryState::Normal),
393 b'n' => Ok(EntryState::Normal),
390 b'a' => Ok(EntryState::Added),
394 b'a' => Ok(EntryState::Added),
391 b'r' => Ok(EntryState::Removed),
395 b'r' => Ok(EntryState::Removed),
392 b'm' => Ok(EntryState::Merged),
396 b'm' => Ok(EntryState::Merged),
393 _ => Err(HgError::CorruptedRepository(format!(
397 _ => Err(HgError::CorruptedRepository(format!(
394 "Incorrect dirstate entry state {}",
398 "Incorrect dirstate entry state {}",
395 value
399 value
396 ))),
400 ))),
397 }
401 }
398 }
402 }
399 }
403 }
400
404
401 impl Into<u8> for EntryState {
405 impl Into<u8> for EntryState {
402 fn into(self) -> u8 {
406 fn into(self) -> u8 {
403 match self {
407 match self {
404 EntryState::Normal => b'n',
408 EntryState::Normal => b'n',
405 EntryState::Added => b'a',
409 EntryState::Added => b'a',
406 EntryState::Removed => b'r',
410 EntryState::Removed => b'r',
407 EntryState::Merged => b'm',
411 EntryState::Merged => b'm',
408 }
412 }
409 }
413 }
410 }
414 }
General Comments 0
You need to be logged in to leave comments. Login now