##// END OF EJS Templates
dirstate-item: replace call to new_from_p2...
marmoute -
r48970:da304f78 default
parent child Browse files
Show More
@@ -1,759 +1,759 b''
1 # parsers.py - Python implementation of parsers.c
1 # parsers.py - Python implementation of parsers.c
2 #
2 #
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11 import zlib
11 import zlib
12
12
13 from ..node import (
13 from ..node import (
14 nullrev,
14 nullrev,
15 sha1nodeconstants,
15 sha1nodeconstants,
16 )
16 )
17 from ..thirdparty import attr
17 from ..thirdparty import attr
18 from .. import (
18 from .. import (
19 error,
19 error,
20 pycompat,
20 pycompat,
21 revlogutils,
21 revlogutils,
22 util,
22 util,
23 )
23 )
24
24
25 from ..revlogutils import nodemap as nodemaputil
25 from ..revlogutils import nodemap as nodemaputil
26 from ..revlogutils import constants as revlog_constants
26 from ..revlogutils import constants as revlog_constants
27
27
28 stringio = pycompat.bytesio
28 stringio = pycompat.bytesio
29
29
30
30
31 _pack = struct.pack
31 _pack = struct.pack
32 _unpack = struct.unpack
32 _unpack = struct.unpack
33 _compress = zlib.compress
33 _compress = zlib.compress
34 _decompress = zlib.decompress
34 _decompress = zlib.decompress
35
35
36
36
37 # a special value used internally for `size` if the file come from the other parent
37 # a special value used internally for `size` if the file come from the other parent
38 FROM_P2 = -2
38 FROM_P2 = -2
39
39
40 # a special value used internally for `size` if the file is modified/merged/added
40 # a special value used internally for `size` if the file is modified/merged/added
41 NONNORMAL = -1
41 NONNORMAL = -1
42
42
43 # a special value used internally for `time` if the time is ambigeous
43 # a special value used internally for `time` if the time is ambigeous
44 AMBIGUOUS_TIME = -1
44 AMBIGUOUS_TIME = -1
45
45
46
46
47 @attr.s(slots=True, init=False)
47 @attr.s(slots=True, init=False)
48 class DirstateItem(object):
48 class DirstateItem(object):
49 """represent a dirstate entry
49 """represent a dirstate entry
50
50
51 It hold multiple attributes
51 It hold multiple attributes
52
52
53 # about file tracking
53 # about file tracking
54 - wc_tracked: is the file tracked by the working copy
54 - wc_tracked: is the file tracked by the working copy
55 - p1_tracked: is the file tracked in working copy first parent
55 - p1_tracked: is the file tracked in working copy first parent
56 - p2_info: the file has been involved in some merge operation. Either
56 - p2_info: the file has been involved in some merge operation. Either
57 because it was actually merged, or because the p2 version was
57 because it was actually merged, or because the p2 version was
58 ahead, or because some renamed moved it there. In either case
58 ahead, or because some renamed moved it there. In either case
59 `hg status` will want it displayed as modified.
59 `hg status` will want it displayed as modified.
60
60
61 # about the file state expected from p1 manifest:
61 # about the file state expected from p1 manifest:
62 - mode: the file mode in p1
62 - mode: the file mode in p1
63 - size: the file size in p1
63 - size: the file size in p1
64
64
65 These value can be set to None, which mean we don't have a meaningful value
65 These value can be set to None, which mean we don't have a meaningful value
66 to compare with. Either because we don't really care about them as there
66 to compare with. Either because we don't really care about them as there
67 `status` is known without having to look at the disk or because we don't
67 `status` is known without having to look at the disk or because we don't
68 know these right now and a full comparison will be needed to find out if
68 know these right now and a full comparison will be needed to find out if
69 the file is clean.
69 the file is clean.
70
70
71 # about the file state on disk last time we saw it:
71 # about the file state on disk last time we saw it:
72 - mtime: the last known clean mtime for the file.
72 - mtime: the last known clean mtime for the file.
73
73
74 This value can be set to None if no cachable state exist. Either because we
74 This value can be set to None if no cachable state exist. Either because we
75 do not care (see previous section) or because we could not cache something
75 do not care (see previous section) or because we could not cache something
76 yet.
76 yet.
77 """
77 """
78
78
79 _wc_tracked = attr.ib()
79 _wc_tracked = attr.ib()
80 _p1_tracked = attr.ib()
80 _p1_tracked = attr.ib()
81 _p2_info = attr.ib()
81 _p2_info = attr.ib()
82 _mode = attr.ib()
82 _mode = attr.ib()
83 _size = attr.ib()
83 _size = attr.ib()
84 _mtime = attr.ib()
84 _mtime = attr.ib()
85
85
86 def __init__(
86 def __init__(
87 self,
87 self,
88 wc_tracked=False,
88 wc_tracked=False,
89 p1_tracked=False,
89 p1_tracked=False,
90 p2_info=False,
90 p2_info=False,
91 has_meaningful_data=True,
91 has_meaningful_data=True,
92 has_meaningful_mtime=True,
92 has_meaningful_mtime=True,
93 parentfiledata=None,
93 parentfiledata=None,
94 ):
94 ):
95 self._wc_tracked = wc_tracked
95 self._wc_tracked = wc_tracked
96 self._p1_tracked = p1_tracked
96 self._p1_tracked = p1_tracked
97 self._p2_info = p2_info
97 self._p2_info = p2_info
98
98
99 self._mode = None
99 self._mode = None
100 self._size = None
100 self._size = None
101 self._mtime = None
101 self._mtime = None
102 if parentfiledata is None:
102 if parentfiledata is None:
103 has_meaningful_mtime = False
103 has_meaningful_mtime = False
104 has_meaningful_data = False
104 has_meaningful_data = False
105 if has_meaningful_data:
105 if has_meaningful_data:
106 self._mode = parentfiledata[0]
106 self._mode = parentfiledata[0]
107 self._size = parentfiledata[1]
107 self._size = parentfiledata[1]
108 if has_meaningful_mtime:
108 if has_meaningful_mtime:
109 self._mtime = parentfiledata[2]
109 self._mtime = parentfiledata[2]
110
110
111 @classmethod
111 @classmethod
112 def new_from_p2(cls):
112 def new_from_p2(cls):
113 """constructor to help legacy API to build a new "from_p2" item
113 """constructor to help legacy API to build a new "from_p2" item
114
114
115 Should eventually be removed
115 Should eventually be removed
116 """
116 """
117 return cls(wc_tracked=True, p2_info=True)
117 return cls(wc_tracked=True, p2_info=True)
118
118
119 @classmethod
119 @classmethod
120 def new_possibly_dirty(cls):
120 def new_possibly_dirty(cls):
121 """constructor to help legacy API to build a new "possibly_dirty" item
121 """constructor to help legacy API to build a new "possibly_dirty" item
122
122
123 Should eventually be removed
123 Should eventually be removed
124 """
124 """
125 return cls(wc_tracked=True, p1_tracked=True)
125 return cls(wc_tracked=True, p1_tracked=True)
126
126
127 @classmethod
127 @classmethod
128 def new_normal(cls, mode, size, mtime):
128 def new_normal(cls, mode, size, mtime):
129 """constructor to help legacy API to build a new "normal" item
129 """constructor to help legacy API to build a new "normal" item
130
130
131 Should eventually be removed
131 Should eventually be removed
132 """
132 """
133 assert size != FROM_P2
133 assert size != FROM_P2
134 assert size != NONNORMAL
134 assert size != NONNORMAL
135 return cls(
135 return cls(
136 wc_tracked=True,
136 wc_tracked=True,
137 p1_tracked=True,
137 p1_tracked=True,
138 parentfiledata=(mode, size, mtime),
138 parentfiledata=(mode, size, mtime),
139 )
139 )
140
140
141 @classmethod
141 @classmethod
142 def from_v1_data(cls, state, mode, size, mtime):
142 def from_v1_data(cls, state, mode, size, mtime):
143 """Build a new DirstateItem object from V1 data
143 """Build a new DirstateItem object from V1 data
144
144
145 Since the dirstate-v1 format is frozen, the signature of this function
145 Since the dirstate-v1 format is frozen, the signature of this function
146 is not expected to change, unlike the __init__ one.
146 is not expected to change, unlike the __init__ one.
147 """
147 """
148 if state == b'm':
148 if state == b'm':
149 return cls(wc_tracked=True, p1_tracked=True, p2_info=True)
149 return cls(wc_tracked=True, p1_tracked=True, p2_info=True)
150 elif state == b'a':
150 elif state == b'a':
151 return cls(wc_tracked=True)
151 return cls(wc_tracked=True)
152 elif state == b'r':
152 elif state == b'r':
153 if size == NONNORMAL:
153 if size == NONNORMAL:
154 p1_tracked = True
154 p1_tracked = True
155 p2_info = True
155 p2_info = True
156 elif size == FROM_P2:
156 elif size == FROM_P2:
157 p1_tracked = False
157 p1_tracked = False
158 p2_info = True
158 p2_info = True
159 else:
159 else:
160 p1_tracked = True
160 p1_tracked = True
161 p2_info = False
161 p2_info = False
162 return cls(p1_tracked=p1_tracked, p2_info=p2_info)
162 return cls(p1_tracked=p1_tracked, p2_info=p2_info)
163 elif state == b'n':
163 elif state == b'n':
164 if size == FROM_P2:
164 if size == FROM_P2:
165 return cls.new_from_p2()
165 return cls(wc_tracked=True, p2_info=True)
166 elif size == NONNORMAL:
166 elif size == NONNORMAL:
167 return cls.new_possibly_dirty()
167 return cls.new_possibly_dirty()
168 elif mtime == AMBIGUOUS_TIME:
168 elif mtime == AMBIGUOUS_TIME:
169 instance = cls.new_normal(mode, size, 42)
169 instance = cls.new_normal(mode, size, 42)
170 instance._mtime = None
170 instance._mtime = None
171 return instance
171 return instance
172 else:
172 else:
173 return cls.new_normal(mode, size, mtime)
173 return cls.new_normal(mode, size, mtime)
174 else:
174 else:
175 raise RuntimeError(b'unknown state: %s' % state)
175 raise RuntimeError(b'unknown state: %s' % state)
176
176
177 def set_possibly_dirty(self):
177 def set_possibly_dirty(self):
178 """Mark a file as "possibly dirty"
178 """Mark a file as "possibly dirty"
179
179
180 This means the next status call will have to actually check its content
180 This means the next status call will have to actually check its content
181 to make sure it is correct.
181 to make sure it is correct.
182 """
182 """
183 self._mtime = None
183 self._mtime = None
184
184
185 def set_clean(self, mode, size, mtime):
185 def set_clean(self, mode, size, mtime):
186 """mark a file as "clean" cancelling potential "possibly dirty call"
186 """mark a file as "clean" cancelling potential "possibly dirty call"
187
187
188 Note: this function is a descendant of `dirstate.normal` and is
188 Note: this function is a descendant of `dirstate.normal` and is
189 currently expected to be call on "normal" entry only. There are not
189 currently expected to be call on "normal" entry only. There are not
190 reason for this to not change in the future as long as the ccode is
190 reason for this to not change in the future as long as the ccode is
191 updated to preserve the proper state of the non-normal files.
191 updated to preserve the proper state of the non-normal files.
192 """
192 """
193 self._wc_tracked = True
193 self._wc_tracked = True
194 self._p1_tracked = True
194 self._p1_tracked = True
195 self._mode = mode
195 self._mode = mode
196 self._size = size
196 self._size = size
197 self._mtime = mtime
197 self._mtime = mtime
198
198
199 def set_tracked(self):
199 def set_tracked(self):
200 """mark a file as tracked in the working copy
200 """mark a file as tracked in the working copy
201
201
202 This will ultimately be called by command like `hg add`.
202 This will ultimately be called by command like `hg add`.
203 """
203 """
204 self._wc_tracked = True
204 self._wc_tracked = True
205 # `set_tracked` is replacing various `normallookup` call. So we mark
205 # `set_tracked` is replacing various `normallookup` call. So we mark
206 # the files as needing lookup
206 # the files as needing lookup
207 #
207 #
208 # Consider dropping this in the future in favor of something less broad.
208 # Consider dropping this in the future in favor of something less broad.
209 self._mtime = None
209 self._mtime = None
210
210
211 def set_untracked(self):
211 def set_untracked(self):
212 """mark a file as untracked in the working copy
212 """mark a file as untracked in the working copy
213
213
214 This will ultimately be called by command like `hg remove`.
214 This will ultimately be called by command like `hg remove`.
215 """
215 """
216 self._wc_tracked = False
216 self._wc_tracked = False
217 self._mode = None
217 self._mode = None
218 self._size = None
218 self._size = None
219 self._mtime = None
219 self._mtime = None
220
220
221 def drop_merge_data(self):
221 def drop_merge_data(self):
222 """remove all "merge-only" from a DirstateItem
222 """remove all "merge-only" from a DirstateItem
223
223
224 This is to be call by the dirstatemap code when the second parent is dropped
224 This is to be call by the dirstatemap code when the second parent is dropped
225 """
225 """
226 if self._p2_info:
226 if self._p2_info:
227 self._p2_info = False
227 self._p2_info = False
228 self._mode = None
228 self._mode = None
229 self._size = None
229 self._size = None
230 self._mtime = None
230 self._mtime = None
231
231
232 @property
232 @property
233 def mode(self):
233 def mode(self):
234 return self.v1_mode()
234 return self.v1_mode()
235
235
236 @property
236 @property
237 def size(self):
237 def size(self):
238 return self.v1_size()
238 return self.v1_size()
239
239
240 @property
240 @property
241 def mtime(self):
241 def mtime(self):
242 return self.v1_mtime()
242 return self.v1_mtime()
243
243
244 @property
244 @property
245 def state(self):
245 def state(self):
246 """
246 """
247 States are:
247 States are:
248 n normal
248 n normal
249 m needs merging
249 m needs merging
250 r marked for removal
250 r marked for removal
251 a marked for addition
251 a marked for addition
252
252
253 XXX This "state" is a bit obscure and mostly a direct expression of the
253 XXX This "state" is a bit obscure and mostly a direct expression of the
254 dirstatev1 format. It would make sense to ultimately deprecate it in
254 dirstatev1 format. It would make sense to ultimately deprecate it in
255 favor of the more "semantic" attributes.
255 favor of the more "semantic" attributes.
256 """
256 """
257 if not self.any_tracked:
257 if not self.any_tracked:
258 return b'?'
258 return b'?'
259 return self.v1_state()
259 return self.v1_state()
260
260
261 @property
261 @property
262 def tracked(self):
262 def tracked(self):
263 """True is the file is tracked in the working copy"""
263 """True is the file is tracked in the working copy"""
264 return self._wc_tracked
264 return self._wc_tracked
265
265
266 @property
266 @property
267 def any_tracked(self):
267 def any_tracked(self):
268 """True is the file is tracked anywhere (wc or parents)"""
268 """True is the file is tracked anywhere (wc or parents)"""
269 return self._wc_tracked or self._p1_tracked or self._p2_info
269 return self._wc_tracked or self._p1_tracked or self._p2_info
270
270
271 @property
271 @property
272 def added(self):
272 def added(self):
273 """True if the file has been added"""
273 """True if the file has been added"""
274 return self._wc_tracked and not (self._p1_tracked or self._p2_info)
274 return self._wc_tracked and not (self._p1_tracked or self._p2_info)
275
275
276 @property
276 @property
277 def maybe_clean(self):
277 def maybe_clean(self):
278 """True if the file has a chance to be in the "clean" state"""
278 """True if the file has a chance to be in the "clean" state"""
279 if not self._wc_tracked:
279 if not self._wc_tracked:
280 return False
280 return False
281 elif not self._p1_tracked:
281 elif not self._p1_tracked:
282 return False
282 return False
283 elif self._p2_info:
283 elif self._p2_info:
284 return False
284 return False
285 return True
285 return True
286
286
287 @property
287 @property
288 def p1_tracked(self):
288 def p1_tracked(self):
289 """True if the file is tracked in the first parent manifest"""
289 """True if the file is tracked in the first parent manifest"""
290 return self._p1_tracked
290 return self._p1_tracked
291
291
292 @property
292 @property
293 def p2_info(self):
293 def p2_info(self):
294 """True if the file needed to merge or apply any input from p2
294 """True if the file needed to merge or apply any input from p2
295
295
296 See the class documentation for details.
296 See the class documentation for details.
297 """
297 """
298 return self._wc_tracked and self._p2_info
298 return self._wc_tracked and self._p2_info
299
299
300 @property
300 @property
301 def removed(self):
301 def removed(self):
302 """True if the file has been removed"""
302 """True if the file has been removed"""
303 return not self._wc_tracked and (self._p1_tracked or self._p2_info)
303 return not self._wc_tracked and (self._p1_tracked or self._p2_info)
304
304
305 def v1_state(self):
305 def v1_state(self):
306 """return a "state" suitable for v1 serialization"""
306 """return a "state" suitable for v1 serialization"""
307 if not self.any_tracked:
307 if not self.any_tracked:
308 # the object has no state to record, this is -currently-
308 # the object has no state to record, this is -currently-
309 # unsupported
309 # unsupported
310 raise RuntimeError('untracked item')
310 raise RuntimeError('untracked item')
311 elif self.removed:
311 elif self.removed:
312 return b'r'
312 return b'r'
313 elif self._p1_tracked and self._p2_info:
313 elif self._p1_tracked and self._p2_info:
314 return b'm'
314 return b'm'
315 elif self.added:
315 elif self.added:
316 return b'a'
316 return b'a'
317 else:
317 else:
318 return b'n'
318 return b'n'
319
319
320 def v1_mode(self):
320 def v1_mode(self):
321 """return a "mode" suitable for v1 serialization"""
321 """return a "mode" suitable for v1 serialization"""
322 return self._mode if self._mode is not None else 0
322 return self._mode if self._mode is not None else 0
323
323
324 def v1_size(self):
324 def v1_size(self):
325 """return a "size" suitable for v1 serialization"""
325 """return a "size" suitable for v1 serialization"""
326 if not self.any_tracked:
326 if not self.any_tracked:
327 # the object has no state to record, this is -currently-
327 # the object has no state to record, this is -currently-
328 # unsupported
328 # unsupported
329 raise RuntimeError('untracked item')
329 raise RuntimeError('untracked item')
330 elif self.removed and self._p1_tracked and self._p2_info:
330 elif self.removed and self._p1_tracked and self._p2_info:
331 return NONNORMAL
331 return NONNORMAL
332 elif self._p2_info:
332 elif self._p2_info:
333 return FROM_P2
333 return FROM_P2
334 elif self.removed:
334 elif self.removed:
335 return 0
335 return 0
336 elif self.added:
336 elif self.added:
337 return NONNORMAL
337 return NONNORMAL
338 elif self._size is None:
338 elif self._size is None:
339 return NONNORMAL
339 return NONNORMAL
340 else:
340 else:
341 return self._size
341 return self._size
342
342
343 def v1_mtime(self):
343 def v1_mtime(self):
344 """return a "mtime" suitable for v1 serialization"""
344 """return a "mtime" suitable for v1 serialization"""
345 if not self.any_tracked:
345 if not self.any_tracked:
346 # the object has no state to record, this is -currently-
346 # the object has no state to record, this is -currently-
347 # unsupported
347 # unsupported
348 raise RuntimeError('untracked item')
348 raise RuntimeError('untracked item')
349 elif self.removed:
349 elif self.removed:
350 return 0
350 return 0
351 elif self._mtime is None:
351 elif self._mtime is None:
352 return AMBIGUOUS_TIME
352 return AMBIGUOUS_TIME
353 elif self._p2_info:
353 elif self._p2_info:
354 return AMBIGUOUS_TIME
354 return AMBIGUOUS_TIME
355 elif not self._p1_tracked:
355 elif not self._p1_tracked:
356 return AMBIGUOUS_TIME
356 return AMBIGUOUS_TIME
357 else:
357 else:
358 return self._mtime
358 return self._mtime
359
359
360 def need_delay(self, now):
360 def need_delay(self, now):
361 """True if the stored mtime would be ambiguous with the current time"""
361 """True if the stored mtime would be ambiguous with the current time"""
362 return self.v1_state() == b'n' and self.v1_mtime() == now
362 return self.v1_state() == b'n' and self.v1_mtime() == now
363
363
364
364
365 def gettype(q):
365 def gettype(q):
366 return int(q & 0xFFFF)
366 return int(q & 0xFFFF)
367
367
368
368
369 class BaseIndexObject(object):
369 class BaseIndexObject(object):
370 # Can I be passed to an algorithme implemented in Rust ?
370 # Can I be passed to an algorithme implemented in Rust ?
371 rust_ext_compat = 0
371 rust_ext_compat = 0
372 # Format of an index entry according to Python's `struct` language
372 # Format of an index entry according to Python's `struct` language
373 index_format = revlog_constants.INDEX_ENTRY_V1
373 index_format = revlog_constants.INDEX_ENTRY_V1
374 # Size of a C unsigned long long int, platform independent
374 # Size of a C unsigned long long int, platform independent
375 big_int_size = struct.calcsize(b'>Q')
375 big_int_size = struct.calcsize(b'>Q')
376 # Size of a C long int, platform independent
376 # Size of a C long int, platform independent
377 int_size = struct.calcsize(b'>i')
377 int_size = struct.calcsize(b'>i')
378 # An empty index entry, used as a default value to be overridden, or nullrev
378 # An empty index entry, used as a default value to be overridden, or nullrev
379 null_item = (
379 null_item = (
380 0,
380 0,
381 0,
381 0,
382 0,
382 0,
383 -1,
383 -1,
384 -1,
384 -1,
385 -1,
385 -1,
386 -1,
386 -1,
387 sha1nodeconstants.nullid,
387 sha1nodeconstants.nullid,
388 0,
388 0,
389 0,
389 0,
390 revlog_constants.COMP_MODE_INLINE,
390 revlog_constants.COMP_MODE_INLINE,
391 revlog_constants.COMP_MODE_INLINE,
391 revlog_constants.COMP_MODE_INLINE,
392 )
392 )
393
393
394 @util.propertycache
394 @util.propertycache
395 def entry_size(self):
395 def entry_size(self):
396 return self.index_format.size
396 return self.index_format.size
397
397
398 @property
398 @property
399 def nodemap(self):
399 def nodemap(self):
400 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
400 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
401 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
401 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
402 return self._nodemap
402 return self._nodemap
403
403
404 @util.propertycache
404 @util.propertycache
405 def _nodemap(self):
405 def _nodemap(self):
406 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
406 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
407 for r in range(0, len(self)):
407 for r in range(0, len(self)):
408 n = self[r][7]
408 n = self[r][7]
409 nodemap[n] = r
409 nodemap[n] = r
410 return nodemap
410 return nodemap
411
411
412 def has_node(self, node):
412 def has_node(self, node):
413 """return True if the node exist in the index"""
413 """return True if the node exist in the index"""
414 return node in self._nodemap
414 return node in self._nodemap
415
415
416 def rev(self, node):
416 def rev(self, node):
417 """return a revision for a node
417 """return a revision for a node
418
418
419 If the node is unknown, raise a RevlogError"""
419 If the node is unknown, raise a RevlogError"""
420 return self._nodemap[node]
420 return self._nodemap[node]
421
421
422 def get_rev(self, node):
422 def get_rev(self, node):
423 """return a revision for a node
423 """return a revision for a node
424
424
425 If the node is unknown, return None"""
425 If the node is unknown, return None"""
426 return self._nodemap.get(node)
426 return self._nodemap.get(node)
427
427
428 def _stripnodes(self, start):
428 def _stripnodes(self, start):
429 if '_nodemap' in vars(self):
429 if '_nodemap' in vars(self):
430 for r in range(start, len(self)):
430 for r in range(start, len(self)):
431 n = self[r][7]
431 n = self[r][7]
432 del self._nodemap[n]
432 del self._nodemap[n]
433
433
434 def clearcaches(self):
434 def clearcaches(self):
435 self.__dict__.pop('_nodemap', None)
435 self.__dict__.pop('_nodemap', None)
436
436
437 def __len__(self):
437 def __len__(self):
438 return self._lgt + len(self._extra)
438 return self._lgt + len(self._extra)
439
439
440 def append(self, tup):
440 def append(self, tup):
441 if '_nodemap' in vars(self):
441 if '_nodemap' in vars(self):
442 self._nodemap[tup[7]] = len(self)
442 self._nodemap[tup[7]] = len(self)
443 data = self._pack_entry(len(self), tup)
443 data = self._pack_entry(len(self), tup)
444 self._extra.append(data)
444 self._extra.append(data)
445
445
446 def _pack_entry(self, rev, entry):
446 def _pack_entry(self, rev, entry):
447 assert entry[8] == 0
447 assert entry[8] == 0
448 assert entry[9] == 0
448 assert entry[9] == 0
449 return self.index_format.pack(*entry[:8])
449 return self.index_format.pack(*entry[:8])
450
450
451 def _check_index(self, i):
451 def _check_index(self, i):
452 if not isinstance(i, int):
452 if not isinstance(i, int):
453 raise TypeError(b"expecting int indexes")
453 raise TypeError(b"expecting int indexes")
454 if i < 0 or i >= len(self):
454 if i < 0 or i >= len(self):
455 raise IndexError
455 raise IndexError
456
456
457 def __getitem__(self, i):
457 def __getitem__(self, i):
458 if i == -1:
458 if i == -1:
459 return self.null_item
459 return self.null_item
460 self._check_index(i)
460 self._check_index(i)
461 if i >= self._lgt:
461 if i >= self._lgt:
462 data = self._extra[i - self._lgt]
462 data = self._extra[i - self._lgt]
463 else:
463 else:
464 index = self._calculate_index(i)
464 index = self._calculate_index(i)
465 data = self._data[index : index + self.entry_size]
465 data = self._data[index : index + self.entry_size]
466 r = self._unpack_entry(i, data)
466 r = self._unpack_entry(i, data)
467 if self._lgt and i == 0:
467 if self._lgt and i == 0:
468 offset = revlogutils.offset_type(0, gettype(r[0]))
468 offset = revlogutils.offset_type(0, gettype(r[0]))
469 r = (offset,) + r[1:]
469 r = (offset,) + r[1:]
470 return r
470 return r
471
471
472 def _unpack_entry(self, rev, data):
472 def _unpack_entry(self, rev, data):
473 r = self.index_format.unpack(data)
473 r = self.index_format.unpack(data)
474 r = r + (
474 r = r + (
475 0,
475 0,
476 0,
476 0,
477 revlog_constants.COMP_MODE_INLINE,
477 revlog_constants.COMP_MODE_INLINE,
478 revlog_constants.COMP_MODE_INLINE,
478 revlog_constants.COMP_MODE_INLINE,
479 )
479 )
480 return r
480 return r
481
481
482 def pack_header(self, header):
482 def pack_header(self, header):
483 """pack header information as binary"""
483 """pack header information as binary"""
484 v_fmt = revlog_constants.INDEX_HEADER
484 v_fmt = revlog_constants.INDEX_HEADER
485 return v_fmt.pack(header)
485 return v_fmt.pack(header)
486
486
487 def entry_binary(self, rev):
487 def entry_binary(self, rev):
488 """return the raw binary string representing a revision"""
488 """return the raw binary string representing a revision"""
489 entry = self[rev]
489 entry = self[rev]
490 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
490 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
491 if rev == 0:
491 if rev == 0:
492 p = p[revlog_constants.INDEX_HEADER.size :]
492 p = p[revlog_constants.INDEX_HEADER.size :]
493 return p
493 return p
494
494
495
495
496 class IndexObject(BaseIndexObject):
496 class IndexObject(BaseIndexObject):
497 def __init__(self, data):
497 def __init__(self, data):
498 assert len(data) % self.entry_size == 0, (
498 assert len(data) % self.entry_size == 0, (
499 len(data),
499 len(data),
500 self.entry_size,
500 self.entry_size,
501 len(data) % self.entry_size,
501 len(data) % self.entry_size,
502 )
502 )
503 self._data = data
503 self._data = data
504 self._lgt = len(data) // self.entry_size
504 self._lgt = len(data) // self.entry_size
505 self._extra = []
505 self._extra = []
506
506
507 def _calculate_index(self, i):
507 def _calculate_index(self, i):
508 return i * self.entry_size
508 return i * self.entry_size
509
509
510 def __delitem__(self, i):
510 def __delitem__(self, i):
511 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
511 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
512 raise ValueError(b"deleting slices only supports a:-1 with step 1")
512 raise ValueError(b"deleting slices only supports a:-1 with step 1")
513 i = i.start
513 i = i.start
514 self._check_index(i)
514 self._check_index(i)
515 self._stripnodes(i)
515 self._stripnodes(i)
516 if i < self._lgt:
516 if i < self._lgt:
517 self._data = self._data[: i * self.entry_size]
517 self._data = self._data[: i * self.entry_size]
518 self._lgt = i
518 self._lgt = i
519 self._extra = []
519 self._extra = []
520 else:
520 else:
521 self._extra = self._extra[: i - self._lgt]
521 self._extra = self._extra[: i - self._lgt]
522
522
523
523
524 class PersistentNodeMapIndexObject(IndexObject):
524 class PersistentNodeMapIndexObject(IndexObject):
525 """a Debug oriented class to test persistent nodemap
525 """a Debug oriented class to test persistent nodemap
526
526
527 We need a simple python object to test API and higher level behavior. See
527 We need a simple python object to test API and higher level behavior. See
528 the Rust implementation for more serious usage. This should be used only
528 the Rust implementation for more serious usage. This should be used only
529 through the dedicated `devel.persistent-nodemap` config.
529 through the dedicated `devel.persistent-nodemap` config.
530 """
530 """
531
531
532 def nodemap_data_all(self):
532 def nodemap_data_all(self):
533 """Return bytes containing a full serialization of a nodemap
533 """Return bytes containing a full serialization of a nodemap
534
534
535 The nodemap should be valid for the full set of revisions in the
535 The nodemap should be valid for the full set of revisions in the
536 index."""
536 index."""
537 return nodemaputil.persistent_data(self)
537 return nodemaputil.persistent_data(self)
538
538
539 def nodemap_data_incremental(self):
539 def nodemap_data_incremental(self):
540 """Return bytes containing a incremental update to persistent nodemap
540 """Return bytes containing a incremental update to persistent nodemap
541
541
542 This containst the data for an append-only update of the data provided
542 This containst the data for an append-only update of the data provided
543 in the last call to `update_nodemap_data`.
543 in the last call to `update_nodemap_data`.
544 """
544 """
545 if self._nm_root is None:
545 if self._nm_root is None:
546 return None
546 return None
547 docket = self._nm_docket
547 docket = self._nm_docket
548 changed, data = nodemaputil.update_persistent_data(
548 changed, data = nodemaputil.update_persistent_data(
549 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
549 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
550 )
550 )
551
551
552 self._nm_root = self._nm_max_idx = self._nm_docket = None
552 self._nm_root = self._nm_max_idx = self._nm_docket = None
553 return docket, changed, data
553 return docket, changed, data
554
554
555 def update_nodemap_data(self, docket, nm_data):
555 def update_nodemap_data(self, docket, nm_data):
556 """provide full block of persisted binary data for a nodemap
556 """provide full block of persisted binary data for a nodemap
557
557
558 The data are expected to come from disk. See `nodemap_data_all` for a
558 The data are expected to come from disk. See `nodemap_data_all` for a
559 produceur of such data."""
559 produceur of such data."""
560 if nm_data is not None:
560 if nm_data is not None:
561 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
561 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
562 if self._nm_root:
562 if self._nm_root:
563 self._nm_docket = docket
563 self._nm_docket = docket
564 else:
564 else:
565 self._nm_root = self._nm_max_idx = self._nm_docket = None
565 self._nm_root = self._nm_max_idx = self._nm_docket = None
566
566
567
567
568 class InlinedIndexObject(BaseIndexObject):
568 class InlinedIndexObject(BaseIndexObject):
569 def __init__(self, data, inline=0):
569 def __init__(self, data, inline=0):
570 self._data = data
570 self._data = data
571 self._lgt = self._inline_scan(None)
571 self._lgt = self._inline_scan(None)
572 self._inline_scan(self._lgt)
572 self._inline_scan(self._lgt)
573 self._extra = []
573 self._extra = []
574
574
575 def _inline_scan(self, lgt):
575 def _inline_scan(self, lgt):
576 off = 0
576 off = 0
577 if lgt is not None:
577 if lgt is not None:
578 self._offsets = [0] * lgt
578 self._offsets = [0] * lgt
579 count = 0
579 count = 0
580 while off <= len(self._data) - self.entry_size:
580 while off <= len(self._data) - self.entry_size:
581 start = off + self.big_int_size
581 start = off + self.big_int_size
582 (s,) = struct.unpack(
582 (s,) = struct.unpack(
583 b'>i',
583 b'>i',
584 self._data[start : start + self.int_size],
584 self._data[start : start + self.int_size],
585 )
585 )
586 if lgt is not None:
586 if lgt is not None:
587 self._offsets[count] = off
587 self._offsets[count] = off
588 count += 1
588 count += 1
589 off += self.entry_size + s
589 off += self.entry_size + s
590 if off != len(self._data):
590 if off != len(self._data):
591 raise ValueError(b"corrupted data")
591 raise ValueError(b"corrupted data")
592 return count
592 return count
593
593
594 def __delitem__(self, i):
594 def __delitem__(self, i):
595 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
595 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
596 raise ValueError(b"deleting slices only supports a:-1 with step 1")
596 raise ValueError(b"deleting slices only supports a:-1 with step 1")
597 i = i.start
597 i = i.start
598 self._check_index(i)
598 self._check_index(i)
599 self._stripnodes(i)
599 self._stripnodes(i)
600 if i < self._lgt:
600 if i < self._lgt:
601 self._offsets = self._offsets[:i]
601 self._offsets = self._offsets[:i]
602 self._lgt = i
602 self._lgt = i
603 self._extra = []
603 self._extra = []
604 else:
604 else:
605 self._extra = self._extra[: i - self._lgt]
605 self._extra = self._extra[: i - self._lgt]
606
606
607 def _calculate_index(self, i):
607 def _calculate_index(self, i):
608 return self._offsets[i]
608 return self._offsets[i]
609
609
610
610
611 def parse_index2(data, inline, revlogv2=False):
611 def parse_index2(data, inline, revlogv2=False):
612 if not inline:
612 if not inline:
613 cls = IndexObject2 if revlogv2 else IndexObject
613 cls = IndexObject2 if revlogv2 else IndexObject
614 return cls(data), None
614 return cls(data), None
615 cls = InlinedIndexObject
615 cls = InlinedIndexObject
616 return cls(data, inline), (0, data)
616 return cls(data, inline), (0, data)
617
617
618
618
619 def parse_index_cl_v2(data):
619 def parse_index_cl_v2(data):
620 return IndexChangelogV2(data), None
620 return IndexChangelogV2(data), None
621
621
622
622
623 class IndexObject2(IndexObject):
623 class IndexObject2(IndexObject):
624 index_format = revlog_constants.INDEX_ENTRY_V2
624 index_format = revlog_constants.INDEX_ENTRY_V2
625
625
626 def replace_sidedata_info(
626 def replace_sidedata_info(
627 self,
627 self,
628 rev,
628 rev,
629 sidedata_offset,
629 sidedata_offset,
630 sidedata_length,
630 sidedata_length,
631 offset_flags,
631 offset_flags,
632 compression_mode,
632 compression_mode,
633 ):
633 ):
634 """
634 """
635 Replace an existing index entry's sidedata offset and length with new
635 Replace an existing index entry's sidedata offset and length with new
636 ones.
636 ones.
637 This cannot be used outside of the context of sidedata rewriting,
637 This cannot be used outside of the context of sidedata rewriting,
638 inside the transaction that creates the revision `rev`.
638 inside the transaction that creates the revision `rev`.
639 """
639 """
640 if rev < 0:
640 if rev < 0:
641 raise KeyError
641 raise KeyError
642 self._check_index(rev)
642 self._check_index(rev)
643 if rev < self._lgt:
643 if rev < self._lgt:
644 msg = b"cannot rewrite entries outside of this transaction"
644 msg = b"cannot rewrite entries outside of this transaction"
645 raise KeyError(msg)
645 raise KeyError(msg)
646 else:
646 else:
647 entry = list(self[rev])
647 entry = list(self[rev])
648 entry[0] = offset_flags
648 entry[0] = offset_flags
649 entry[8] = sidedata_offset
649 entry[8] = sidedata_offset
650 entry[9] = sidedata_length
650 entry[9] = sidedata_length
651 entry[11] = compression_mode
651 entry[11] = compression_mode
652 entry = tuple(entry)
652 entry = tuple(entry)
653 new = self._pack_entry(rev, entry)
653 new = self._pack_entry(rev, entry)
654 self._extra[rev - self._lgt] = new
654 self._extra[rev - self._lgt] = new
655
655
656 def _unpack_entry(self, rev, data):
656 def _unpack_entry(self, rev, data):
657 data = self.index_format.unpack(data)
657 data = self.index_format.unpack(data)
658 entry = data[:10]
658 entry = data[:10]
659 data_comp = data[10] & 3
659 data_comp = data[10] & 3
660 sidedata_comp = (data[10] & (3 << 2)) >> 2
660 sidedata_comp = (data[10] & (3 << 2)) >> 2
661 return entry + (data_comp, sidedata_comp)
661 return entry + (data_comp, sidedata_comp)
662
662
663 def _pack_entry(self, rev, entry):
663 def _pack_entry(self, rev, entry):
664 data = entry[:10]
664 data = entry[:10]
665 data_comp = entry[10] & 3
665 data_comp = entry[10] & 3
666 sidedata_comp = (entry[11] & 3) << 2
666 sidedata_comp = (entry[11] & 3) << 2
667 data += (data_comp | sidedata_comp,)
667 data += (data_comp | sidedata_comp,)
668
668
669 return self.index_format.pack(*data)
669 return self.index_format.pack(*data)
670
670
671 def entry_binary(self, rev):
671 def entry_binary(self, rev):
672 """return the raw binary string representing a revision"""
672 """return the raw binary string representing a revision"""
673 entry = self[rev]
673 entry = self[rev]
674 return self._pack_entry(rev, entry)
674 return self._pack_entry(rev, entry)
675
675
676 def pack_header(self, header):
676 def pack_header(self, header):
677 """pack header information as binary"""
677 """pack header information as binary"""
678 msg = 'version header should go in the docket, not the index: %d'
678 msg = 'version header should go in the docket, not the index: %d'
679 msg %= header
679 msg %= header
680 raise error.ProgrammingError(msg)
680 raise error.ProgrammingError(msg)
681
681
682
682
683 class IndexChangelogV2(IndexObject2):
683 class IndexChangelogV2(IndexObject2):
684 index_format = revlog_constants.INDEX_ENTRY_CL_V2
684 index_format = revlog_constants.INDEX_ENTRY_CL_V2
685
685
686 def _unpack_entry(self, rev, data, r=True):
686 def _unpack_entry(self, rev, data, r=True):
687 items = self.index_format.unpack(data)
687 items = self.index_format.unpack(data)
688 entry = items[:3] + (rev, rev) + items[3:8]
688 entry = items[:3] + (rev, rev) + items[3:8]
689 data_comp = items[8] & 3
689 data_comp = items[8] & 3
690 sidedata_comp = (items[8] >> 2) & 3
690 sidedata_comp = (items[8] >> 2) & 3
691 return entry + (data_comp, sidedata_comp)
691 return entry + (data_comp, sidedata_comp)
692
692
693 def _pack_entry(self, rev, entry):
693 def _pack_entry(self, rev, entry):
694 assert entry[3] == rev, entry[3]
694 assert entry[3] == rev, entry[3]
695 assert entry[4] == rev, entry[4]
695 assert entry[4] == rev, entry[4]
696 data = entry[:3] + entry[5:10]
696 data = entry[:3] + entry[5:10]
697 data_comp = entry[10] & 3
697 data_comp = entry[10] & 3
698 sidedata_comp = (entry[11] & 3) << 2
698 sidedata_comp = (entry[11] & 3) << 2
699 data += (data_comp | sidedata_comp,)
699 data += (data_comp | sidedata_comp,)
700 return self.index_format.pack(*data)
700 return self.index_format.pack(*data)
701
701
702
702
703 def parse_index_devel_nodemap(data, inline):
703 def parse_index_devel_nodemap(data, inline):
704 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
704 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
705 return PersistentNodeMapIndexObject(data), None
705 return PersistentNodeMapIndexObject(data), None
706
706
707
707
708 def parse_dirstate(dmap, copymap, st):
708 def parse_dirstate(dmap, copymap, st):
709 parents = [st[:20], st[20:40]]
709 parents = [st[:20], st[20:40]]
710 # dereference fields so they will be local in loop
710 # dereference fields so they will be local in loop
711 format = b">cllll"
711 format = b">cllll"
712 e_size = struct.calcsize(format)
712 e_size = struct.calcsize(format)
713 pos1 = 40
713 pos1 = 40
714 l = len(st)
714 l = len(st)
715
715
716 # the inner loop
716 # the inner loop
717 while pos1 < l:
717 while pos1 < l:
718 pos2 = pos1 + e_size
718 pos2 = pos1 + e_size
719 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
719 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
720 pos1 = pos2 + e[4]
720 pos1 = pos2 + e[4]
721 f = st[pos2:pos1]
721 f = st[pos2:pos1]
722 if b'\0' in f:
722 if b'\0' in f:
723 f, c = f.split(b'\0')
723 f, c = f.split(b'\0')
724 copymap[f] = c
724 copymap[f] = c
725 dmap[f] = DirstateItem.from_v1_data(*e[:4])
725 dmap[f] = DirstateItem.from_v1_data(*e[:4])
726 return parents
726 return parents
727
727
728
728
729 def pack_dirstate(dmap, copymap, pl, now):
729 def pack_dirstate(dmap, copymap, pl, now):
730 now = int(now)
730 now = int(now)
731 cs = stringio()
731 cs = stringio()
732 write = cs.write
732 write = cs.write
733 write(b"".join(pl))
733 write(b"".join(pl))
734 for f, e in pycompat.iteritems(dmap):
734 for f, e in pycompat.iteritems(dmap):
735 if e.need_delay(now):
735 if e.need_delay(now):
736 # The file was last modified "simultaneously" with the current
736 # The file was last modified "simultaneously" with the current
737 # write to dirstate (i.e. within the same second for file-
737 # write to dirstate (i.e. within the same second for file-
738 # systems with a granularity of 1 sec). This commonly happens
738 # systems with a granularity of 1 sec). This commonly happens
739 # for at least a couple of files on 'update'.
739 # for at least a couple of files on 'update'.
740 # The user could change the file without changing its size
740 # The user could change the file without changing its size
741 # within the same second. Invalidate the file's mtime in
741 # within the same second. Invalidate the file's mtime in
742 # dirstate, forcing future 'status' calls to compare the
742 # dirstate, forcing future 'status' calls to compare the
743 # contents of the file if the size is the same. This prevents
743 # contents of the file if the size is the same. This prevents
744 # mistakenly treating such files as clean.
744 # mistakenly treating such files as clean.
745 e.set_possibly_dirty()
745 e.set_possibly_dirty()
746
746
747 if f in copymap:
747 if f in copymap:
748 f = b"%s\0%s" % (f, copymap[f])
748 f = b"%s\0%s" % (f, copymap[f])
749 e = _pack(
749 e = _pack(
750 b">cllll",
750 b">cllll",
751 e.v1_state(),
751 e.v1_state(),
752 e.v1_mode(),
752 e.v1_mode(),
753 e.v1_size(),
753 e.v1_size(),
754 e.v1_mtime(),
754 e.v1_mtime(),
755 len(f),
755 len(f),
756 )
756 )
757 write(e)
757 write(e)
758 write(f)
758 write(f)
759 return cs.getvalue()
759 return cs.getvalue()
@@ -1,418 +1,423 b''
1 use crate::errors::HgError;
1 use crate::errors::HgError;
2 use bitflags::bitflags;
2 use bitflags::bitflags;
3 use std::convert::TryFrom;
3 use std::convert::TryFrom;
4
4
5 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
5 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
6 pub enum EntryState {
6 pub enum EntryState {
7 Normal,
7 Normal,
8 Added,
8 Added,
9 Removed,
9 Removed,
10 Merged,
10 Merged,
11 }
11 }
12
12
13 /// The C implementation uses all signed types. This will be an issue
13 /// The C implementation uses all signed types. This will be an issue
14 /// either when 4GB+ source files are commonplace or in 2038, whichever
14 /// either when 4GB+ source files are commonplace or in 2038, whichever
15 /// comes first.
15 /// comes first.
16 #[derive(Debug, PartialEq, Copy, Clone)]
16 #[derive(Debug, PartialEq, Copy, Clone)]
17 pub struct DirstateEntry {
17 pub struct DirstateEntry {
18 pub(crate) flags: Flags,
18 pub(crate) flags: Flags,
19 mode_size: Option<(i32, i32)>,
19 mode_size: Option<(i32, i32)>,
20 mtime: Option<i32>,
20 mtime: Option<i32>,
21 }
21 }
22
22
23 bitflags! {
23 bitflags! {
24 pub(crate) struct Flags: u8 {
24 pub(crate) struct Flags: u8 {
25 const WDIR_TRACKED = 1 << 0;
25 const WDIR_TRACKED = 1 << 0;
26 const P1_TRACKED = 1 << 1;
26 const P1_TRACKED = 1 << 1;
27 const P2_INFO = 1 << 2;
27 const P2_INFO = 1 << 2;
28 }
28 }
29 }
29 }
30
30
31 pub const V1_RANGEMASK: i32 = 0x7FFFFFFF;
31 pub const V1_RANGEMASK: i32 = 0x7FFFFFFF;
32
32
33 pub const MTIME_UNSET: i32 = -1;
33 pub const MTIME_UNSET: i32 = -1;
34
34
35 /// A `DirstateEntry` with a size of `-2` means that it was merged from the
35 /// A `DirstateEntry` with a size of `-2` means that it was merged from the
36 /// other parent. This allows revert to pick the right status back during a
36 /// other parent. This allows revert to pick the right status back during a
37 /// merge.
37 /// merge.
38 pub const SIZE_FROM_OTHER_PARENT: i32 = -2;
38 pub const SIZE_FROM_OTHER_PARENT: i32 = -2;
39 /// A special value used for internal representation of special case in
39 /// A special value used for internal representation of special case in
40 /// dirstate v1 format.
40 /// dirstate v1 format.
41 pub const SIZE_NON_NORMAL: i32 = -1;
41 pub const SIZE_NON_NORMAL: i32 = -1;
42
42
43 impl DirstateEntry {
43 impl DirstateEntry {
44 pub fn from_v2_data(
44 pub fn from_v2_data(
45 wdir_tracked: bool,
45 wdir_tracked: bool,
46 p1_tracked: bool,
46 p1_tracked: bool,
47 p2_info: bool,
47 p2_info: bool,
48 mode_size: Option<(i32, i32)>,
48 mode_size: Option<(i32, i32)>,
49 mtime: Option<i32>,
49 mtime: Option<i32>,
50 ) -> Self {
50 ) -> Self {
51 let mut flags = Flags::empty();
51 let mut flags = Flags::empty();
52 flags.set(Flags::WDIR_TRACKED, wdir_tracked);
52 flags.set(Flags::WDIR_TRACKED, wdir_tracked);
53 flags.set(Flags::P1_TRACKED, p1_tracked);
53 flags.set(Flags::P1_TRACKED, p1_tracked);
54 flags.set(Flags::P2_INFO, p2_info);
54 flags.set(Flags::P2_INFO, p2_info);
55 Self {
55 Self {
56 flags,
56 flags,
57 mode_size,
57 mode_size,
58 mtime,
58 mtime,
59 }
59 }
60 }
60 }
61
61
62 pub fn from_v1_data(
62 pub fn from_v1_data(
63 state: EntryState,
63 state: EntryState,
64 mode: i32,
64 mode: i32,
65 size: i32,
65 size: i32,
66 mtime: i32,
66 mtime: i32,
67 ) -> Self {
67 ) -> Self {
68 match state {
68 match state {
69 EntryState::Normal => {
69 EntryState::Normal => {
70 if size == SIZE_FROM_OTHER_PARENT {
70 if size == SIZE_FROM_OTHER_PARENT {
71 Self::new_from_p2()
71 Self {
72 // might be missing P1_TRACKED
73 flags: Flags::WDIR_TRACKED | Flags::P2_INFO,
74 mode_size: None,
75 mtime: None,
76 }
72 } else if size == SIZE_NON_NORMAL {
77 } else if size == SIZE_NON_NORMAL {
73 Self::new_possibly_dirty()
78 Self::new_possibly_dirty()
74 } else if mtime == MTIME_UNSET {
79 } else if mtime == MTIME_UNSET {
75 Self {
80 Self {
76 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
81 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
77 mode_size: Some((mode, size)),
82 mode_size: Some((mode, size)),
78 mtime: None,
83 mtime: None,
79 }
84 }
80 } else {
85 } else {
81 Self::new_normal(mode, size, mtime)
86 Self::new_normal(mode, size, mtime)
82 }
87 }
83 }
88 }
84 EntryState::Added => Self {
89 EntryState::Added => Self {
85 flags: Flags::WDIR_TRACKED,
90 flags: Flags::WDIR_TRACKED,
86 mode_size: None,
91 mode_size: None,
87 mtime: None,
92 mtime: None,
88 },
93 },
89 EntryState::Removed => Self {
94 EntryState::Removed => Self {
90 flags: if size == SIZE_NON_NORMAL {
95 flags: if size == SIZE_NON_NORMAL {
91 Flags::P1_TRACKED | Flags::P2_INFO
96 Flags::P1_TRACKED | Flags::P2_INFO
92 } else if size == SIZE_FROM_OTHER_PARENT {
97 } else if size == SIZE_FROM_OTHER_PARENT {
93 // We don’t know if P1_TRACKED should be set (file history)
98 // We don’t know if P1_TRACKED should be set (file history)
94 Flags::P2_INFO
99 Flags::P2_INFO
95 } else {
100 } else {
96 Flags::P1_TRACKED
101 Flags::P1_TRACKED
97 },
102 },
98 mode_size: None,
103 mode_size: None,
99 mtime: None,
104 mtime: None,
100 },
105 },
101 EntryState::Merged => Self {
106 EntryState::Merged => Self {
102 flags: Flags::WDIR_TRACKED
107 flags: Flags::WDIR_TRACKED
103 | Flags::P1_TRACKED // might not be true because of rename ?
108 | Flags::P1_TRACKED // might not be true because of rename ?
104 | Flags::P2_INFO, // might not be true because of rename ?
109 | Flags::P2_INFO, // might not be true because of rename ?
105 mode_size: None,
110 mode_size: None,
106 mtime: None,
111 mtime: None,
107 },
112 },
108 }
113 }
109 }
114 }
110
115
111 pub fn new_from_p2() -> Self {
116 pub fn new_from_p2() -> Self {
112 Self {
117 Self {
113 // might be missing P1_TRACKED
118 // might be missing P1_TRACKED
114 flags: Flags::WDIR_TRACKED | Flags::P2_INFO,
119 flags: Flags::WDIR_TRACKED | Flags::P2_INFO,
115 mode_size: None,
120 mode_size: None,
116 mtime: None,
121 mtime: None,
117 }
122 }
118 }
123 }
119
124
120 pub fn new_possibly_dirty() -> Self {
125 pub fn new_possibly_dirty() -> Self {
121 Self {
126 Self {
122 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
127 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
123 mode_size: None,
128 mode_size: None,
124 mtime: None,
129 mtime: None,
125 }
130 }
126 }
131 }
127
132
128 pub fn new_normal(mode: i32, size: i32, mtime: i32) -> Self {
133 pub fn new_normal(mode: i32, size: i32, mtime: i32) -> Self {
129 Self {
134 Self {
130 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
135 flags: Flags::WDIR_TRACKED | Flags::P1_TRACKED,
131 mode_size: Some((mode, size)),
136 mode_size: Some((mode, size)),
132 mtime: Some(mtime),
137 mtime: Some(mtime),
133 }
138 }
134 }
139 }
135
140
136 /// Creates a new entry in "removed" state.
141 /// Creates a new entry in "removed" state.
137 ///
142 ///
138 /// `size` is expected to be zero, `SIZE_NON_NORMAL`, or
143 /// `size` is expected to be zero, `SIZE_NON_NORMAL`, or
139 /// `SIZE_FROM_OTHER_PARENT`
144 /// `SIZE_FROM_OTHER_PARENT`
140 pub fn new_removed(size: i32) -> Self {
145 pub fn new_removed(size: i32) -> Self {
141 Self::from_v1_data(EntryState::Removed, 0, size, 0)
146 Self::from_v1_data(EntryState::Removed, 0, size, 0)
142 }
147 }
143
148
144 pub fn tracked(&self) -> bool {
149 pub fn tracked(&self) -> bool {
145 self.flags.contains(Flags::WDIR_TRACKED)
150 self.flags.contains(Flags::WDIR_TRACKED)
146 }
151 }
147
152
148 pub fn p1_tracked(&self) -> bool {
153 pub fn p1_tracked(&self) -> bool {
149 self.flags.contains(Flags::P1_TRACKED)
154 self.flags.contains(Flags::P1_TRACKED)
150 }
155 }
151
156
152 fn in_either_parent(&self) -> bool {
157 fn in_either_parent(&self) -> bool {
153 self.flags.intersects(Flags::P1_TRACKED | Flags::P2_INFO)
158 self.flags.intersects(Flags::P1_TRACKED | Flags::P2_INFO)
154 }
159 }
155
160
156 pub fn removed(&self) -> bool {
161 pub fn removed(&self) -> bool {
157 self.in_either_parent() && !self.flags.contains(Flags::WDIR_TRACKED)
162 self.in_either_parent() && !self.flags.contains(Flags::WDIR_TRACKED)
158 }
163 }
159
164
160 pub fn p2_info(&self) -> bool {
165 pub fn p2_info(&self) -> bool {
161 self.flags.contains(Flags::WDIR_TRACKED | Flags::P2_INFO)
166 self.flags.contains(Flags::WDIR_TRACKED | Flags::P2_INFO)
162 }
167 }
163
168
164 pub fn added(&self) -> bool {
169 pub fn added(&self) -> bool {
165 self.flags.contains(Flags::WDIR_TRACKED) && !self.in_either_parent()
170 self.flags.contains(Flags::WDIR_TRACKED) && !self.in_either_parent()
166 }
171 }
167
172
168 pub fn maybe_clean(&self) -> bool {
173 pub fn maybe_clean(&self) -> bool {
169 if !self.flags.contains(Flags::WDIR_TRACKED) {
174 if !self.flags.contains(Flags::WDIR_TRACKED) {
170 false
175 false
171 } else if !self.flags.contains(Flags::P1_TRACKED) {
176 } else if !self.flags.contains(Flags::P1_TRACKED) {
172 false
177 false
173 } else if self.flags.contains(Flags::P2_INFO) {
178 } else if self.flags.contains(Flags::P2_INFO) {
174 false
179 false
175 } else {
180 } else {
176 true
181 true
177 }
182 }
178 }
183 }
179
184
180 pub fn any_tracked(&self) -> bool {
185 pub fn any_tracked(&self) -> bool {
181 self.flags.intersects(
186 self.flags.intersects(
182 Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO,
187 Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO,
183 )
188 )
184 }
189 }
185
190
186 /// Returns `(wdir_tracked, p1_tracked, p2_info, mode_size, mtime)`
191 /// Returns `(wdir_tracked, p1_tracked, p2_info, mode_size, mtime)`
187 pub(crate) fn v2_data(
192 pub(crate) fn v2_data(
188 &self,
193 &self,
189 ) -> (bool, bool, bool, Option<(i32, i32)>, Option<i32>) {
194 ) -> (bool, bool, bool, Option<(i32, i32)>, Option<i32>) {
190 if !self.any_tracked() {
195 if !self.any_tracked() {
191 // TODO: return an Option instead?
196 // TODO: return an Option instead?
192 panic!("Accessing v1_state of an untracked DirstateEntry")
197 panic!("Accessing v1_state of an untracked DirstateEntry")
193 }
198 }
194 let wdir_tracked = self.flags.contains(Flags::WDIR_TRACKED);
199 let wdir_tracked = self.flags.contains(Flags::WDIR_TRACKED);
195 let p1_tracked = self.flags.contains(Flags::P1_TRACKED);
200 let p1_tracked = self.flags.contains(Flags::P1_TRACKED);
196 let p2_info = self.flags.contains(Flags::P2_INFO);
201 let p2_info = self.flags.contains(Flags::P2_INFO);
197 let mode_size = self.mode_size;
202 let mode_size = self.mode_size;
198 let mtime = self.mtime;
203 let mtime = self.mtime;
199 (wdir_tracked, p1_tracked, p2_info, mode_size, mtime)
204 (wdir_tracked, p1_tracked, p2_info, mode_size, mtime)
200 }
205 }
201
206
202 fn v1_state(&self) -> EntryState {
207 fn v1_state(&self) -> EntryState {
203 if !self.any_tracked() {
208 if !self.any_tracked() {
204 // TODO: return an Option instead?
209 // TODO: return an Option instead?
205 panic!("Accessing v1_state of an untracked DirstateEntry")
210 panic!("Accessing v1_state of an untracked DirstateEntry")
206 }
211 }
207 if self.removed() {
212 if self.removed() {
208 EntryState::Removed
213 EntryState::Removed
209 } else if self
214 } else if self
210 .flags
215 .flags
211 .contains(Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO)
216 .contains(Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO)
212 {
217 {
213 EntryState::Merged
218 EntryState::Merged
214 } else if self.added() {
219 } else if self.added() {
215 EntryState::Added
220 EntryState::Added
216 } else {
221 } else {
217 EntryState::Normal
222 EntryState::Normal
218 }
223 }
219 }
224 }
220
225
221 fn v1_mode(&self) -> i32 {
226 fn v1_mode(&self) -> i32 {
222 if let Some((mode, _size)) = self.mode_size {
227 if let Some((mode, _size)) = self.mode_size {
223 mode
228 mode
224 } else {
229 } else {
225 0
230 0
226 }
231 }
227 }
232 }
228
233
229 fn v1_size(&self) -> i32 {
234 fn v1_size(&self) -> i32 {
230 if !self.any_tracked() {
235 if !self.any_tracked() {
231 // TODO: return an Option instead?
236 // TODO: return an Option instead?
232 panic!("Accessing v1_size of an untracked DirstateEntry")
237 panic!("Accessing v1_size of an untracked DirstateEntry")
233 }
238 }
234 if self.removed()
239 if self.removed()
235 && self.flags.contains(Flags::P1_TRACKED | Flags::P2_INFO)
240 && self.flags.contains(Flags::P1_TRACKED | Flags::P2_INFO)
236 {
241 {
237 SIZE_NON_NORMAL
242 SIZE_NON_NORMAL
238 } else if self.flags.contains(Flags::P2_INFO) {
243 } else if self.flags.contains(Flags::P2_INFO) {
239 SIZE_FROM_OTHER_PARENT
244 SIZE_FROM_OTHER_PARENT
240 } else if self.removed() {
245 } else if self.removed() {
241 0
246 0
242 } else if self.added() {
247 } else if self.added() {
243 SIZE_NON_NORMAL
248 SIZE_NON_NORMAL
244 } else if let Some((_mode, size)) = self.mode_size {
249 } else if let Some((_mode, size)) = self.mode_size {
245 size
250 size
246 } else {
251 } else {
247 SIZE_NON_NORMAL
252 SIZE_NON_NORMAL
248 }
253 }
249 }
254 }
250
255
251 fn v1_mtime(&self) -> i32 {
256 fn v1_mtime(&self) -> i32 {
252 if !self.any_tracked() {
257 if !self.any_tracked() {
253 // TODO: return an Option instead?
258 // TODO: return an Option instead?
254 panic!("Accessing v1_mtime of an untracked DirstateEntry")
259 panic!("Accessing v1_mtime of an untracked DirstateEntry")
255 }
260 }
256 if self.removed() {
261 if self.removed() {
257 0
262 0
258 } else if self.flags.contains(Flags::P2_INFO) {
263 } else if self.flags.contains(Flags::P2_INFO) {
259 MTIME_UNSET
264 MTIME_UNSET
260 } else if !self.flags.contains(Flags::P1_TRACKED) {
265 } else if !self.flags.contains(Flags::P1_TRACKED) {
261 MTIME_UNSET
266 MTIME_UNSET
262 } else {
267 } else {
263 self.mtime.unwrap_or(MTIME_UNSET)
268 self.mtime.unwrap_or(MTIME_UNSET)
264 }
269 }
265 }
270 }
266
271
267 // TODO: return `Option<EntryState>`? None when `!self.any_tracked`
272 // TODO: return `Option<EntryState>`? None when `!self.any_tracked`
268 pub fn state(&self) -> EntryState {
273 pub fn state(&self) -> EntryState {
269 self.v1_state()
274 self.v1_state()
270 }
275 }
271
276
272 // TODO: return Option?
277 // TODO: return Option?
273 pub fn mode(&self) -> i32 {
278 pub fn mode(&self) -> i32 {
274 self.v1_mode()
279 self.v1_mode()
275 }
280 }
276
281
277 // TODO: return Option?
282 // TODO: return Option?
278 pub fn size(&self) -> i32 {
283 pub fn size(&self) -> i32 {
279 self.v1_size()
284 self.v1_size()
280 }
285 }
281
286
282 // TODO: return Option?
287 // TODO: return Option?
283 pub fn mtime(&self) -> i32 {
288 pub fn mtime(&self) -> i32 {
284 self.v1_mtime()
289 self.v1_mtime()
285 }
290 }
286
291
287 pub fn drop_merge_data(&mut self) {
292 pub fn drop_merge_data(&mut self) {
288 if self.flags.contains(Flags::P2_INFO) {
293 if self.flags.contains(Flags::P2_INFO) {
289 self.flags.remove(Flags::P2_INFO);
294 self.flags.remove(Flags::P2_INFO);
290 self.mode_size = None;
295 self.mode_size = None;
291 self.mtime = None;
296 self.mtime = None;
292 }
297 }
293 }
298 }
294
299
295 pub fn set_possibly_dirty(&mut self) {
300 pub fn set_possibly_dirty(&mut self) {
296 self.mtime = None
301 self.mtime = None
297 }
302 }
298
303
299 pub fn set_clean(&mut self, mode: i32, size: i32, mtime: i32) {
304 pub fn set_clean(&mut self, mode: i32, size: i32, mtime: i32) {
300 self.flags.insert(Flags::WDIR_TRACKED | Flags::P1_TRACKED);
305 self.flags.insert(Flags::WDIR_TRACKED | Flags::P1_TRACKED);
301 self.mode_size = Some((mode, size));
306 self.mode_size = Some((mode, size));
302 self.mtime = Some(mtime);
307 self.mtime = Some(mtime);
303 }
308 }
304
309
305 pub fn set_tracked(&mut self) {
310 pub fn set_tracked(&mut self) {
306 self.flags.insert(Flags::WDIR_TRACKED);
311 self.flags.insert(Flags::WDIR_TRACKED);
307 // `set_tracked` is replacing various `normallookup` call. So we mark
312 // `set_tracked` is replacing various `normallookup` call. So we mark
308 // the files as needing lookup
313 // the files as needing lookup
309 //
314 //
310 // Consider dropping this in the future in favor of something less
315 // Consider dropping this in the future in favor of something less
311 // broad.
316 // broad.
312 self.mtime = None;
317 self.mtime = None;
313 }
318 }
314
319
315 pub fn set_untracked(&mut self) {
320 pub fn set_untracked(&mut self) {
316 self.flags.remove(Flags::WDIR_TRACKED);
321 self.flags.remove(Flags::WDIR_TRACKED);
317 self.mode_size = None;
322 self.mode_size = None;
318 self.mtime = None;
323 self.mtime = None;
319 }
324 }
320
325
321 /// Returns `(state, mode, size, mtime)` for the puprose of serialization
326 /// Returns `(state, mode, size, mtime)` for the puprose of serialization
322 /// in the dirstate-v1 format.
327 /// in the dirstate-v1 format.
323 ///
328 ///
324 /// This includes marker values such as `mtime == -1`. In the future we may
329 /// This includes marker values such as `mtime == -1`. In the future we may
325 /// want to not represent these cases that way in memory, but serialization
330 /// want to not represent these cases that way in memory, but serialization
326 /// will need to keep the same format.
331 /// will need to keep the same format.
327 pub fn v1_data(&self) -> (u8, i32, i32, i32) {
332 pub fn v1_data(&self) -> (u8, i32, i32, i32) {
328 (
333 (
329 self.v1_state().into(),
334 self.v1_state().into(),
330 self.v1_mode(),
335 self.v1_mode(),
331 self.v1_size(),
336 self.v1_size(),
332 self.v1_mtime(),
337 self.v1_mtime(),
333 )
338 )
334 }
339 }
335
340
336 pub(crate) fn is_from_other_parent(&self) -> bool {
341 pub(crate) fn is_from_other_parent(&self) -> bool {
337 self.state() == EntryState::Normal
342 self.state() == EntryState::Normal
338 && self.size() == SIZE_FROM_OTHER_PARENT
343 && self.size() == SIZE_FROM_OTHER_PARENT
339 }
344 }
340
345
341 // TODO: other platforms
346 // TODO: other platforms
342 #[cfg(unix)]
347 #[cfg(unix)]
343 pub fn mode_changed(
348 pub fn mode_changed(
344 &self,
349 &self,
345 filesystem_metadata: &std::fs::Metadata,
350 filesystem_metadata: &std::fs::Metadata,
346 ) -> bool {
351 ) -> bool {
347 use std::os::unix::fs::MetadataExt;
352 use std::os::unix::fs::MetadataExt;
348 const EXEC_BIT_MASK: u32 = 0o100;
353 const EXEC_BIT_MASK: u32 = 0o100;
349 let dirstate_exec_bit = (self.mode() as u32) & EXEC_BIT_MASK;
354 let dirstate_exec_bit = (self.mode() as u32) & EXEC_BIT_MASK;
350 let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK;
355 let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK;
351 dirstate_exec_bit != fs_exec_bit
356 dirstate_exec_bit != fs_exec_bit
352 }
357 }
353
358
354 /// Returns a `(state, mode, size, mtime)` tuple as for
359 /// Returns a `(state, mode, size, mtime)` tuple as for
355 /// `DirstateMapMethods::debug_iter`.
360 /// `DirstateMapMethods::debug_iter`.
356 pub fn debug_tuple(&self) -> (u8, i32, i32, i32) {
361 pub fn debug_tuple(&self) -> (u8, i32, i32, i32) {
357 (self.state().into(), self.mode(), self.size(), self.mtime())
362 (self.state().into(), self.mode(), self.size(), self.mtime())
358 }
363 }
359
364
360 pub fn mtime_is_ambiguous(&self, now: i32) -> bool {
365 pub fn mtime_is_ambiguous(&self, now: i32) -> bool {
361 self.state() == EntryState::Normal && self.mtime() == now
366 self.state() == EntryState::Normal && self.mtime() == now
362 }
367 }
363
368
364 pub fn clear_ambiguous_mtime(&mut self, now: i32) -> bool {
369 pub fn clear_ambiguous_mtime(&mut self, now: i32) -> bool {
365 let ambiguous = self.mtime_is_ambiguous(now);
370 let ambiguous = self.mtime_is_ambiguous(now);
366 if ambiguous {
371 if ambiguous {
367 // The file was last modified "simultaneously" with the current
372 // The file was last modified "simultaneously" with the current
368 // write to dirstate (i.e. within the same second for file-
373 // write to dirstate (i.e. within the same second for file-
369 // systems with a granularity of 1 sec). This commonly happens
374 // systems with a granularity of 1 sec). This commonly happens
370 // for at least a couple of files on 'update'.
375 // for at least a couple of files on 'update'.
371 // The user could change the file without changing its size
376 // The user could change the file without changing its size
372 // within the same second. Invalidate the file's mtime in
377 // within the same second. Invalidate the file's mtime in
373 // dirstate, forcing future 'status' calls to compare the
378 // dirstate, forcing future 'status' calls to compare the
374 // contents of the file if the size is the same. This prevents
379 // contents of the file if the size is the same. This prevents
375 // mistakenly treating such files as clean.
380 // mistakenly treating such files as clean.
376 self.set_possibly_dirty()
381 self.set_possibly_dirty()
377 }
382 }
378 ambiguous
383 ambiguous
379 }
384 }
380 }
385 }
381
386
382 impl EntryState {
387 impl EntryState {
383 pub fn is_tracked(self) -> bool {
388 pub fn is_tracked(self) -> bool {
384 use EntryState::*;
389 use EntryState::*;
385 match self {
390 match self {
386 Normal | Added | Merged => true,
391 Normal | Added | Merged => true,
387 Removed => false,
392 Removed => false,
388 }
393 }
389 }
394 }
390 }
395 }
391
396
392 impl TryFrom<u8> for EntryState {
397 impl TryFrom<u8> for EntryState {
393 type Error = HgError;
398 type Error = HgError;
394
399
395 fn try_from(value: u8) -> Result<Self, Self::Error> {
400 fn try_from(value: u8) -> Result<Self, Self::Error> {
396 match value {
401 match value {
397 b'n' => Ok(EntryState::Normal),
402 b'n' => Ok(EntryState::Normal),
398 b'a' => Ok(EntryState::Added),
403 b'a' => Ok(EntryState::Added),
399 b'r' => Ok(EntryState::Removed),
404 b'r' => Ok(EntryState::Removed),
400 b'm' => Ok(EntryState::Merged),
405 b'm' => Ok(EntryState::Merged),
401 _ => Err(HgError::CorruptedRepository(format!(
406 _ => Err(HgError::CorruptedRepository(format!(
402 "Incorrect dirstate entry state {}",
407 "Incorrect dirstate entry state {}",
403 value
408 value
404 ))),
409 ))),
405 }
410 }
406 }
411 }
407 }
412 }
408
413
409 impl Into<u8> for EntryState {
414 impl Into<u8> for EntryState {
410 fn into(self) -> u8 {
415 fn into(self) -> u8 {
411 match self {
416 match self {
412 EntryState::Normal => b'n',
417 EntryState::Normal => b'n',
413 EntryState::Added => b'a',
418 EntryState::Added => b'a',
414 EntryState::Removed => b'r',
419 EntryState::Removed => b'r',
415 EntryState::Merged => b'm',
420 EntryState::Merged => b'm',
416 }
421 }
417 }
422 }
418 }
423 }
General Comments 0
You need to be logged in to leave comments. Login now