##// END OF EJS Templates
dirstate-v2: Support appending to the same data file...
Simon Sapin -
r48478:065e6162 default
parent child Browse files
Show More
@@ -1,719 +1,750 b''
1 # dirstatemap.py
1 # dirstatemap.py
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 from __future__ import absolute_import
6 from __future__ import absolute_import
7
7
8 import errno
8 import errno
9
9
10 from .i18n import _
10 from .i18n import _
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 pathutil,
14 pathutil,
15 policy,
15 policy,
16 pycompat,
16 pycompat,
17 txnutil,
17 txnutil,
18 util,
18 util,
19 )
19 )
20
20
21 from .dirstateutils import (
21 from .dirstateutils import (
22 docket as docketmod,
22 docket as docketmod,
23 )
23 )
24
24
25 parsers = policy.importmod('parsers')
25 parsers = policy.importmod('parsers')
26 rustmod = policy.importrust('dirstate')
26 rustmod = policy.importrust('dirstate')
27
27
28 propertycache = util.propertycache
28 propertycache = util.propertycache
29
29
30 DirstateItem = parsers.DirstateItem
30 DirstateItem = parsers.DirstateItem
31
31
32
32
33 # a special value used internally for `size` if the file come from the other parent
33 # a special value used internally for `size` if the file come from the other parent
34 FROM_P2 = -2
34 FROM_P2 = -2
35
35
36 # a special value used internally for `size` if the file is modified/merged/added
36 # a special value used internally for `size` if the file is modified/merged/added
37 NONNORMAL = -1
37 NONNORMAL = -1
38
38
39 # a special value used internally for `time` if the time is ambigeous
39 # a special value used internally for `time` if the time is ambigeous
40 AMBIGUOUS_TIME = -1
40 AMBIGUOUS_TIME = -1
41
41
42 rangemask = 0x7FFFFFFF
42 rangemask = 0x7FFFFFFF
43
43
44
44
45 class dirstatemap(object):
45 class dirstatemap(object):
46 """Map encapsulating the dirstate's contents.
46 """Map encapsulating the dirstate's contents.
47
47
48 The dirstate contains the following state:
48 The dirstate contains the following state:
49
49
50 - `identity` is the identity of the dirstate file, which can be used to
50 - `identity` is the identity of the dirstate file, which can be used to
51 detect when changes have occurred to the dirstate file.
51 detect when changes have occurred to the dirstate file.
52
52
53 - `parents` is a pair containing the parents of the working copy. The
53 - `parents` is a pair containing the parents of the working copy. The
54 parents are updated by calling `setparents`.
54 parents are updated by calling `setparents`.
55
55
56 - the state map maps filenames to tuples of (state, mode, size, mtime),
56 - the state map maps filenames to tuples of (state, mode, size, mtime),
57 where state is a single character representing 'normal', 'added',
57 where state is a single character representing 'normal', 'added',
58 'removed', or 'merged'. It is read by treating the dirstate as a
58 'removed', or 'merged'. It is read by treating the dirstate as a
59 dict. File state is updated by calling the `addfile`, `removefile` and
59 dict. File state is updated by calling the `addfile`, `removefile` and
60 `dropfile` methods.
60 `dropfile` methods.
61
61
62 - `copymap` maps destination filenames to their source filename.
62 - `copymap` maps destination filenames to their source filename.
63
63
64 The dirstate also provides the following views onto the state:
64 The dirstate also provides the following views onto the state:
65
65
66 - `nonnormalset` is a set of the filenames that have state other
66 - `nonnormalset` is a set of the filenames that have state other
67 than 'normal', or are normal but have an mtime of -1 ('normallookup').
67 than 'normal', or are normal but have an mtime of -1 ('normallookup').
68
68
69 - `otherparentset` is a set of the filenames that are marked as coming
69 - `otherparentset` is a set of the filenames that are marked as coming
70 from the second parent when the dirstate is currently being merged.
70 from the second parent when the dirstate is currently being merged.
71
71
72 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
72 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
73 form that they appear as in the dirstate.
73 form that they appear as in the dirstate.
74
74
75 - `dirfoldmap` is a dict mapping normalized directory names to the
75 - `dirfoldmap` is a dict mapping normalized directory names to the
76 denormalized form that they appear as in the dirstate.
76 denormalized form that they appear as in the dirstate.
77 """
77 """
78
78
79 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
79 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
80 self._ui = ui
80 self._ui = ui
81 self._opener = opener
81 self._opener = opener
82 self._root = root
82 self._root = root
83 self._filename = b'dirstate'
83 self._filename = b'dirstate'
84 self._nodelen = 20
84 self._nodelen = 20
85 self._nodeconstants = nodeconstants
85 self._nodeconstants = nodeconstants
86 assert (
86 assert (
87 not use_dirstate_v2
87 not use_dirstate_v2
88 ), "should have detected unsupported requirement"
88 ), "should have detected unsupported requirement"
89
89
90 self._parents = None
90 self._parents = None
91 self._dirtyparents = False
91 self._dirtyparents = False
92
92
93 # for consistent view between _pl() and _read() invocations
93 # for consistent view between _pl() and _read() invocations
94 self._pendingmode = None
94 self._pendingmode = None
95
95
96 @propertycache
96 @propertycache
97 def _map(self):
97 def _map(self):
98 self._map = {}
98 self._map = {}
99 self.read()
99 self.read()
100 return self._map
100 return self._map
101
101
102 @propertycache
102 @propertycache
103 def copymap(self):
103 def copymap(self):
104 self.copymap = {}
104 self.copymap = {}
105 self._map
105 self._map
106 return self.copymap
106 return self.copymap
107
107
108 def directories(self):
108 def directories(self):
109 # Rust / dirstate-v2 only
109 # Rust / dirstate-v2 only
110 return []
110 return []
111
111
112 def clear(self):
112 def clear(self):
113 self._map.clear()
113 self._map.clear()
114 self.copymap.clear()
114 self.copymap.clear()
115 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
115 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
116 util.clearcachedproperty(self, b"_dirs")
116 util.clearcachedproperty(self, b"_dirs")
117 util.clearcachedproperty(self, b"_alldirs")
117 util.clearcachedproperty(self, b"_alldirs")
118 util.clearcachedproperty(self, b"filefoldmap")
118 util.clearcachedproperty(self, b"filefoldmap")
119 util.clearcachedproperty(self, b"dirfoldmap")
119 util.clearcachedproperty(self, b"dirfoldmap")
120 util.clearcachedproperty(self, b"nonnormalset")
120 util.clearcachedproperty(self, b"nonnormalset")
121 util.clearcachedproperty(self, b"otherparentset")
121 util.clearcachedproperty(self, b"otherparentset")
122
122
123 def items(self):
123 def items(self):
124 return pycompat.iteritems(self._map)
124 return pycompat.iteritems(self._map)
125
125
126 # forward for python2,3 compat
126 # forward for python2,3 compat
127 iteritems = items
127 iteritems = items
128
128
129 def __len__(self):
129 def __len__(self):
130 return len(self._map)
130 return len(self._map)
131
131
132 def __iter__(self):
132 def __iter__(self):
133 return iter(self._map)
133 return iter(self._map)
134
134
135 def get(self, key, default=None):
135 def get(self, key, default=None):
136 return self._map.get(key, default)
136 return self._map.get(key, default)
137
137
138 def __contains__(self, key):
138 def __contains__(self, key):
139 return key in self._map
139 return key in self._map
140
140
141 def __getitem__(self, key):
141 def __getitem__(self, key):
142 return self._map[key]
142 return self._map[key]
143
143
144 def keys(self):
144 def keys(self):
145 return self._map.keys()
145 return self._map.keys()
146
146
147 def preload(self):
147 def preload(self):
148 """Loads the underlying data, if it's not already loaded"""
148 """Loads the underlying data, if it's not already loaded"""
149 self._map
149 self._map
150
150
151 def addfile(
151 def addfile(
152 self,
152 self,
153 f,
153 f,
154 mode=0,
154 mode=0,
155 size=None,
155 size=None,
156 mtime=None,
156 mtime=None,
157 added=False,
157 added=False,
158 merged=False,
158 merged=False,
159 from_p2=False,
159 from_p2=False,
160 possibly_dirty=False,
160 possibly_dirty=False,
161 ):
161 ):
162 """Add a tracked file to the dirstate."""
162 """Add a tracked file to the dirstate."""
163 if added:
163 if added:
164 assert not merged
164 assert not merged
165 assert not possibly_dirty
165 assert not possibly_dirty
166 assert not from_p2
166 assert not from_p2
167 state = b'a'
167 state = b'a'
168 size = NONNORMAL
168 size = NONNORMAL
169 mtime = AMBIGUOUS_TIME
169 mtime = AMBIGUOUS_TIME
170 elif merged:
170 elif merged:
171 assert not possibly_dirty
171 assert not possibly_dirty
172 assert not from_p2
172 assert not from_p2
173 state = b'm'
173 state = b'm'
174 size = FROM_P2
174 size = FROM_P2
175 mtime = AMBIGUOUS_TIME
175 mtime = AMBIGUOUS_TIME
176 elif from_p2:
176 elif from_p2:
177 assert not possibly_dirty
177 assert not possibly_dirty
178 state = b'n'
178 state = b'n'
179 size = FROM_P2
179 size = FROM_P2
180 mtime = AMBIGUOUS_TIME
180 mtime = AMBIGUOUS_TIME
181 elif possibly_dirty:
181 elif possibly_dirty:
182 state = b'n'
182 state = b'n'
183 size = NONNORMAL
183 size = NONNORMAL
184 mtime = AMBIGUOUS_TIME
184 mtime = AMBIGUOUS_TIME
185 else:
185 else:
186 assert size != FROM_P2
186 assert size != FROM_P2
187 assert size != NONNORMAL
187 assert size != NONNORMAL
188 state = b'n'
188 state = b'n'
189 size = size & rangemask
189 size = size & rangemask
190 mtime = mtime & rangemask
190 mtime = mtime & rangemask
191 assert state is not None
191 assert state is not None
192 assert size is not None
192 assert size is not None
193 assert mtime is not None
193 assert mtime is not None
194 old_entry = self.get(f)
194 old_entry = self.get(f)
195 if (
195 if (
196 old_entry is None or old_entry.removed
196 old_entry is None or old_entry.removed
197 ) and "_dirs" in self.__dict__:
197 ) and "_dirs" in self.__dict__:
198 self._dirs.addpath(f)
198 self._dirs.addpath(f)
199 if old_entry is None and "_alldirs" in self.__dict__:
199 if old_entry is None and "_alldirs" in self.__dict__:
200 self._alldirs.addpath(f)
200 self._alldirs.addpath(f)
201 self._map[f] = DirstateItem(state, mode, size, mtime)
201 self._map[f] = DirstateItem(state, mode, size, mtime)
202 if state != b'n' or mtime == AMBIGUOUS_TIME:
202 if state != b'n' or mtime == AMBIGUOUS_TIME:
203 self.nonnormalset.add(f)
203 self.nonnormalset.add(f)
204 if size == FROM_P2:
204 if size == FROM_P2:
205 self.otherparentset.add(f)
205 self.otherparentset.add(f)
206
206
207 def removefile(self, f, in_merge=False):
207 def removefile(self, f, in_merge=False):
208 """
208 """
209 Mark a file as removed in the dirstate.
209 Mark a file as removed in the dirstate.
210
210
211 The `size` parameter is used to store sentinel values that indicate
211 The `size` parameter is used to store sentinel values that indicate
212 the file's previous state. In the future, we should refactor this
212 the file's previous state. In the future, we should refactor this
213 to be more explicit about what that state is.
213 to be more explicit about what that state is.
214 """
214 """
215 entry = self.get(f)
215 entry = self.get(f)
216 size = 0
216 size = 0
217 if in_merge:
217 if in_merge:
218 # XXX we should not be able to have 'm' state and 'FROM_P2' if not
218 # XXX we should not be able to have 'm' state and 'FROM_P2' if not
219 # during a merge. So I (marmoute) am not sure we need the
219 # during a merge. So I (marmoute) am not sure we need the
220 # conditionnal at all. Adding double checking this with assert
220 # conditionnal at all. Adding double checking this with assert
221 # would be nice.
221 # would be nice.
222 if entry is not None:
222 if entry is not None:
223 # backup the previous state
223 # backup the previous state
224 if entry.merged: # merge
224 if entry.merged: # merge
225 size = NONNORMAL
225 size = NONNORMAL
226 elif entry.from_p2:
226 elif entry.from_p2:
227 size = FROM_P2
227 size = FROM_P2
228 self.otherparentset.add(f)
228 self.otherparentset.add(f)
229 if entry is not None and not (entry.merged or entry.from_p2):
229 if entry is not None and not (entry.merged or entry.from_p2):
230 self.copymap.pop(f, None)
230 self.copymap.pop(f, None)
231
231
232 if entry is not None and not entry.removed and "_dirs" in self.__dict__:
232 if entry is not None and not entry.removed and "_dirs" in self.__dict__:
233 self._dirs.delpath(f)
233 self._dirs.delpath(f)
234 if entry is None and "_alldirs" in self.__dict__:
234 if entry is None and "_alldirs" in self.__dict__:
235 self._alldirs.addpath(f)
235 self._alldirs.addpath(f)
236 if "filefoldmap" in self.__dict__:
236 if "filefoldmap" in self.__dict__:
237 normed = util.normcase(f)
237 normed = util.normcase(f)
238 self.filefoldmap.pop(normed, None)
238 self.filefoldmap.pop(normed, None)
239 self._map[f] = DirstateItem(b'r', 0, size, 0)
239 self._map[f] = DirstateItem(b'r', 0, size, 0)
240 self.nonnormalset.add(f)
240 self.nonnormalset.add(f)
241
241
242 def dropfile(self, f):
242 def dropfile(self, f):
243 """
243 """
244 Remove a file from the dirstate. Returns True if the file was
244 Remove a file from the dirstate. Returns True if the file was
245 previously recorded.
245 previously recorded.
246 """
246 """
247 old_entry = self._map.pop(f, None)
247 old_entry = self._map.pop(f, None)
248 exists = False
248 exists = False
249 oldstate = b'?'
249 oldstate = b'?'
250 if old_entry is not None:
250 if old_entry is not None:
251 exists = True
251 exists = True
252 oldstate = old_entry.state
252 oldstate = old_entry.state
253 if exists:
253 if exists:
254 if oldstate != b"r" and "_dirs" in self.__dict__:
254 if oldstate != b"r" and "_dirs" in self.__dict__:
255 self._dirs.delpath(f)
255 self._dirs.delpath(f)
256 if "_alldirs" in self.__dict__:
256 if "_alldirs" in self.__dict__:
257 self._alldirs.delpath(f)
257 self._alldirs.delpath(f)
258 if "filefoldmap" in self.__dict__:
258 if "filefoldmap" in self.__dict__:
259 normed = util.normcase(f)
259 normed = util.normcase(f)
260 self.filefoldmap.pop(normed, None)
260 self.filefoldmap.pop(normed, None)
261 self.nonnormalset.discard(f)
261 self.nonnormalset.discard(f)
262 return exists
262 return exists
263
263
264 def clearambiguoustimes(self, files, now):
264 def clearambiguoustimes(self, files, now):
265 for f in files:
265 for f in files:
266 e = self.get(f)
266 e = self.get(f)
267 if e is not None and e.need_delay(now):
267 if e is not None and e.need_delay(now):
268 e.set_possibly_dirty()
268 e.set_possibly_dirty()
269 self.nonnormalset.add(f)
269 self.nonnormalset.add(f)
270
270
271 def nonnormalentries(self):
271 def nonnormalentries(self):
272 '''Compute the nonnormal dirstate entries from the dmap'''
272 '''Compute the nonnormal dirstate entries from the dmap'''
273 try:
273 try:
274 return parsers.nonnormalotherparententries(self._map)
274 return parsers.nonnormalotherparententries(self._map)
275 except AttributeError:
275 except AttributeError:
276 nonnorm = set()
276 nonnorm = set()
277 otherparent = set()
277 otherparent = set()
278 for fname, e in pycompat.iteritems(self._map):
278 for fname, e in pycompat.iteritems(self._map):
279 if e.state != b'n' or e.mtime == AMBIGUOUS_TIME:
279 if e.state != b'n' or e.mtime == AMBIGUOUS_TIME:
280 nonnorm.add(fname)
280 nonnorm.add(fname)
281 if e.from_p2:
281 if e.from_p2:
282 otherparent.add(fname)
282 otherparent.add(fname)
283 return nonnorm, otherparent
283 return nonnorm, otherparent
284
284
285 @propertycache
285 @propertycache
286 def filefoldmap(self):
286 def filefoldmap(self):
287 """Returns a dictionary mapping normalized case paths to their
287 """Returns a dictionary mapping normalized case paths to their
288 non-normalized versions.
288 non-normalized versions.
289 """
289 """
290 try:
290 try:
291 makefilefoldmap = parsers.make_file_foldmap
291 makefilefoldmap = parsers.make_file_foldmap
292 except AttributeError:
292 except AttributeError:
293 pass
293 pass
294 else:
294 else:
295 return makefilefoldmap(
295 return makefilefoldmap(
296 self._map, util.normcasespec, util.normcasefallback
296 self._map, util.normcasespec, util.normcasefallback
297 )
297 )
298
298
299 f = {}
299 f = {}
300 normcase = util.normcase
300 normcase = util.normcase
301 for name, s in pycompat.iteritems(self._map):
301 for name, s in pycompat.iteritems(self._map):
302 if not s.removed:
302 if not s.removed:
303 f[normcase(name)] = name
303 f[normcase(name)] = name
304 f[b'.'] = b'.' # prevents useless util.fspath() invocation
304 f[b'.'] = b'.' # prevents useless util.fspath() invocation
305 return f
305 return f
306
306
307 def hastrackeddir(self, d):
307 def hastrackeddir(self, d):
308 """
308 """
309 Returns True if the dirstate contains a tracked (not removed) file
309 Returns True if the dirstate contains a tracked (not removed) file
310 in this directory.
310 in this directory.
311 """
311 """
312 return d in self._dirs
312 return d in self._dirs
313
313
314 def hasdir(self, d):
314 def hasdir(self, d):
315 """
315 """
316 Returns True if the dirstate contains a file (tracked or removed)
316 Returns True if the dirstate contains a file (tracked or removed)
317 in this directory.
317 in this directory.
318 """
318 """
319 return d in self._alldirs
319 return d in self._alldirs
320
320
321 @propertycache
321 @propertycache
322 def _dirs(self):
322 def _dirs(self):
323 return pathutil.dirs(self._map, b'r')
323 return pathutil.dirs(self._map, b'r')
324
324
325 @propertycache
325 @propertycache
326 def _alldirs(self):
326 def _alldirs(self):
327 return pathutil.dirs(self._map)
327 return pathutil.dirs(self._map)
328
328
329 def _opendirstatefile(self):
329 def _opendirstatefile(self):
330 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
330 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
331 if self._pendingmode is not None and self._pendingmode != mode:
331 if self._pendingmode is not None and self._pendingmode != mode:
332 fp.close()
332 fp.close()
333 raise error.Abort(
333 raise error.Abort(
334 _(b'working directory state may be changed parallelly')
334 _(b'working directory state may be changed parallelly')
335 )
335 )
336 self._pendingmode = mode
336 self._pendingmode = mode
337 return fp
337 return fp
338
338
339 def parents(self):
339 def parents(self):
340 if not self._parents:
340 if not self._parents:
341 try:
341 try:
342 fp = self._opendirstatefile()
342 fp = self._opendirstatefile()
343 st = fp.read(2 * self._nodelen)
343 st = fp.read(2 * self._nodelen)
344 fp.close()
344 fp.close()
345 except IOError as err:
345 except IOError as err:
346 if err.errno != errno.ENOENT:
346 if err.errno != errno.ENOENT:
347 raise
347 raise
348 # File doesn't exist, so the current state is empty
348 # File doesn't exist, so the current state is empty
349 st = b''
349 st = b''
350
350
351 l = len(st)
351 l = len(st)
352 if l == self._nodelen * 2:
352 if l == self._nodelen * 2:
353 self._parents = (
353 self._parents = (
354 st[: self._nodelen],
354 st[: self._nodelen],
355 st[self._nodelen : 2 * self._nodelen],
355 st[self._nodelen : 2 * self._nodelen],
356 )
356 )
357 elif l == 0:
357 elif l == 0:
358 self._parents = (
358 self._parents = (
359 self._nodeconstants.nullid,
359 self._nodeconstants.nullid,
360 self._nodeconstants.nullid,
360 self._nodeconstants.nullid,
361 )
361 )
362 else:
362 else:
363 raise error.Abort(
363 raise error.Abort(
364 _(b'working directory state appears damaged!')
364 _(b'working directory state appears damaged!')
365 )
365 )
366
366
367 return self._parents
367 return self._parents
368
368
369 def setparents(self, p1, p2):
369 def setparents(self, p1, p2):
370 self._parents = (p1, p2)
370 self._parents = (p1, p2)
371 self._dirtyparents = True
371 self._dirtyparents = True
372
372
373 def read(self):
373 def read(self):
374 # ignore HG_PENDING because identity is used only for writing
374 # ignore HG_PENDING because identity is used only for writing
375 self.identity = util.filestat.frompath(
375 self.identity = util.filestat.frompath(
376 self._opener.join(self._filename)
376 self._opener.join(self._filename)
377 )
377 )
378
378
379 try:
379 try:
380 fp = self._opendirstatefile()
380 fp = self._opendirstatefile()
381 try:
381 try:
382 st = fp.read()
382 st = fp.read()
383 finally:
383 finally:
384 fp.close()
384 fp.close()
385 except IOError as err:
385 except IOError as err:
386 if err.errno != errno.ENOENT:
386 if err.errno != errno.ENOENT:
387 raise
387 raise
388 return
388 return
389 if not st:
389 if not st:
390 return
390 return
391
391
392 if util.safehasattr(parsers, b'dict_new_presized'):
392 if util.safehasattr(parsers, b'dict_new_presized'):
393 # Make an estimate of the number of files in the dirstate based on
393 # Make an estimate of the number of files in the dirstate based on
394 # its size. This trades wasting some memory for avoiding costly
394 # its size. This trades wasting some memory for avoiding costly
395 # resizes. Each entry have a prefix of 17 bytes followed by one or
395 # resizes. Each entry have a prefix of 17 bytes followed by one or
396 # two path names. Studies on various large-scale real-world repositories
396 # two path names. Studies on various large-scale real-world repositories
397 # found 54 bytes a reasonable upper limit for the average path names.
397 # found 54 bytes a reasonable upper limit for the average path names.
398 # Copy entries are ignored for the sake of this estimate.
398 # Copy entries are ignored for the sake of this estimate.
399 self._map = parsers.dict_new_presized(len(st) // 71)
399 self._map = parsers.dict_new_presized(len(st) // 71)
400
400
401 # Python's garbage collector triggers a GC each time a certain number
401 # Python's garbage collector triggers a GC each time a certain number
402 # of container objects (the number being defined by
402 # of container objects (the number being defined by
403 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
403 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
404 # for each file in the dirstate. The C version then immediately marks
404 # for each file in the dirstate. The C version then immediately marks
405 # them as not to be tracked by the collector. However, this has no
405 # them as not to be tracked by the collector. However, this has no
406 # effect on when GCs are triggered, only on what objects the GC looks
406 # effect on when GCs are triggered, only on what objects the GC looks
407 # into. This means that O(number of files) GCs are unavoidable.
407 # into. This means that O(number of files) GCs are unavoidable.
408 # Depending on when in the process's lifetime the dirstate is parsed,
408 # Depending on when in the process's lifetime the dirstate is parsed,
409 # this can get very expensive. As a workaround, disable GC while
409 # this can get very expensive. As a workaround, disable GC while
410 # parsing the dirstate.
410 # parsing the dirstate.
411 #
411 #
412 # (we cannot decorate the function directly since it is in a C module)
412 # (we cannot decorate the function directly since it is in a C module)
413 parse_dirstate = util.nogc(parsers.parse_dirstate)
413 parse_dirstate = util.nogc(parsers.parse_dirstate)
414 p = parse_dirstate(self._map, self.copymap, st)
414 p = parse_dirstate(self._map, self.copymap, st)
415 if not self._dirtyparents:
415 if not self._dirtyparents:
416 self.setparents(*p)
416 self.setparents(*p)
417
417
418 # Avoid excess attribute lookups by fast pathing certain checks
418 # Avoid excess attribute lookups by fast pathing certain checks
419 self.__contains__ = self._map.__contains__
419 self.__contains__ = self._map.__contains__
420 self.__getitem__ = self._map.__getitem__
420 self.__getitem__ = self._map.__getitem__
421 self.get = self._map.get
421 self.get = self._map.get
422
422
423 def write(self, _tr, st, now):
423 def write(self, _tr, st, now):
424 st.write(
424 st.write(
425 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
425 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
426 )
426 )
427 st.close()
427 st.close()
428 self._dirtyparents = False
428 self._dirtyparents = False
429 self.nonnormalset, self.otherparentset = self.nonnormalentries()
429 self.nonnormalset, self.otherparentset = self.nonnormalentries()
430
430
431 @propertycache
431 @propertycache
432 def nonnormalset(self):
432 def nonnormalset(self):
433 nonnorm, otherparents = self.nonnormalentries()
433 nonnorm, otherparents = self.nonnormalentries()
434 self.otherparentset = otherparents
434 self.otherparentset = otherparents
435 return nonnorm
435 return nonnorm
436
436
437 @propertycache
437 @propertycache
438 def otherparentset(self):
438 def otherparentset(self):
439 nonnorm, otherparents = self.nonnormalentries()
439 nonnorm, otherparents = self.nonnormalentries()
440 self.nonnormalset = nonnorm
440 self.nonnormalset = nonnorm
441 return otherparents
441 return otherparents
442
442
443 def non_normal_or_other_parent_paths(self):
443 def non_normal_or_other_parent_paths(self):
444 return self.nonnormalset.union(self.otherparentset)
444 return self.nonnormalset.union(self.otherparentset)
445
445
446 @propertycache
446 @propertycache
447 def identity(self):
447 def identity(self):
448 self._map
448 self._map
449 return self.identity
449 return self.identity
450
450
451 @propertycache
451 @propertycache
452 def dirfoldmap(self):
452 def dirfoldmap(self):
453 f = {}
453 f = {}
454 normcase = util.normcase
454 normcase = util.normcase
455 for name in self._dirs:
455 for name in self._dirs:
456 f[normcase(name)] = name
456 f[normcase(name)] = name
457 return f
457 return f
458
458
459
459
460 if rustmod is not None:
460 if rustmod is not None:
461
461
462 class dirstatemap(object):
462 class dirstatemap(object):
463 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
463 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
464 self._use_dirstate_v2 = use_dirstate_v2
464 self._use_dirstate_v2 = use_dirstate_v2
465 self._nodeconstants = nodeconstants
465 self._nodeconstants = nodeconstants
466 self._ui = ui
466 self._ui = ui
467 self._opener = opener
467 self._opener = opener
468 self._root = root
468 self._root = root
469 self._filename = b'dirstate'
469 self._filename = b'dirstate'
470 self._nodelen = 20 # Also update Rust code when changing this!
470 self._nodelen = 20 # Also update Rust code when changing this!
471 self._parents = None
471 self._parents = None
472 self._dirtyparents = False
472 self._dirtyparents = False
473 self._docket = None
473 self._docket = None
474
474
475 # for consistent view between _pl() and _read() invocations
475 # for consistent view between _pl() and _read() invocations
476 self._pendingmode = None
476 self._pendingmode = None
477
477
478 self._use_dirstate_tree = self._ui.configbool(
478 self._use_dirstate_tree = self._ui.configbool(
479 b"experimental",
479 b"experimental",
480 b"dirstate-tree.in-memory",
480 b"dirstate-tree.in-memory",
481 False,
481 False,
482 )
482 )
483
483
484 def addfile(
484 def addfile(
485 self,
485 self,
486 f,
486 f,
487 mode=0,
487 mode=0,
488 size=None,
488 size=None,
489 mtime=None,
489 mtime=None,
490 added=False,
490 added=False,
491 merged=False,
491 merged=False,
492 from_p2=False,
492 from_p2=False,
493 possibly_dirty=False,
493 possibly_dirty=False,
494 ):
494 ):
495 return self._rustmap.addfile(
495 return self._rustmap.addfile(
496 f,
496 f,
497 mode,
497 mode,
498 size,
498 size,
499 mtime,
499 mtime,
500 added,
500 added,
501 merged,
501 merged,
502 from_p2,
502 from_p2,
503 possibly_dirty,
503 possibly_dirty,
504 )
504 )
505
505
506 def removefile(self, *args, **kwargs):
506 def removefile(self, *args, **kwargs):
507 return self._rustmap.removefile(*args, **kwargs)
507 return self._rustmap.removefile(*args, **kwargs)
508
508
509 def dropfile(self, *args, **kwargs):
509 def dropfile(self, *args, **kwargs):
510 return self._rustmap.dropfile(*args, **kwargs)
510 return self._rustmap.dropfile(*args, **kwargs)
511
511
512 def clearambiguoustimes(self, *args, **kwargs):
512 def clearambiguoustimes(self, *args, **kwargs):
513 return self._rustmap.clearambiguoustimes(*args, **kwargs)
513 return self._rustmap.clearambiguoustimes(*args, **kwargs)
514
514
515 def nonnormalentries(self):
515 def nonnormalentries(self):
516 return self._rustmap.nonnormalentries()
516 return self._rustmap.nonnormalentries()
517
517
518 def get(self, *args, **kwargs):
518 def get(self, *args, **kwargs):
519 return self._rustmap.get(*args, **kwargs)
519 return self._rustmap.get(*args, **kwargs)
520
520
521 @property
521 @property
522 def copymap(self):
522 def copymap(self):
523 return self._rustmap.copymap()
523 return self._rustmap.copymap()
524
524
525 def directories(self):
525 def directories(self):
526 return self._rustmap.directories()
526 return self._rustmap.directories()
527
527
528 def preload(self):
528 def preload(self):
529 self._rustmap
529 self._rustmap
530
530
531 def clear(self):
531 def clear(self):
532 self._rustmap.clear()
532 self._rustmap.clear()
533 self.setparents(
533 self.setparents(
534 self._nodeconstants.nullid, self._nodeconstants.nullid
534 self._nodeconstants.nullid, self._nodeconstants.nullid
535 )
535 )
536 util.clearcachedproperty(self, b"_dirs")
536 util.clearcachedproperty(self, b"_dirs")
537 util.clearcachedproperty(self, b"_alldirs")
537 util.clearcachedproperty(self, b"_alldirs")
538 util.clearcachedproperty(self, b"dirfoldmap")
538 util.clearcachedproperty(self, b"dirfoldmap")
539
539
540 def items(self):
540 def items(self):
541 return self._rustmap.items()
541 return self._rustmap.items()
542
542
543 def keys(self):
543 def keys(self):
544 return iter(self._rustmap)
544 return iter(self._rustmap)
545
545
546 def __contains__(self, key):
546 def __contains__(self, key):
547 return key in self._rustmap
547 return key in self._rustmap
548
548
549 def __getitem__(self, item):
549 def __getitem__(self, item):
550 return self._rustmap[item]
550 return self._rustmap[item]
551
551
552 def __len__(self):
552 def __len__(self):
553 return len(self._rustmap)
553 return len(self._rustmap)
554
554
555 def __iter__(self):
555 def __iter__(self):
556 return iter(self._rustmap)
556 return iter(self._rustmap)
557
557
558 # forward for python2,3 compat
558 # forward for python2,3 compat
559 iteritems = items
559 iteritems = items
560
560
561 def _opendirstatefile(self):
561 def _opendirstatefile(self):
562 fp, mode = txnutil.trypending(
562 fp, mode = txnutil.trypending(
563 self._root, self._opener, self._filename
563 self._root, self._opener, self._filename
564 )
564 )
565 if self._pendingmode is not None and self._pendingmode != mode:
565 if self._pendingmode is not None and self._pendingmode != mode:
566 fp.close()
566 fp.close()
567 raise error.Abort(
567 raise error.Abort(
568 _(b'working directory state may be changed parallelly')
568 _(b'working directory state may be changed parallelly')
569 )
569 )
570 self._pendingmode = mode
570 self._pendingmode = mode
571 return fp
571 return fp
572
572
573 def _readdirstatefile(self, size=-1):
573 def _readdirstatefile(self, size=-1):
574 try:
574 try:
575 with self._opendirstatefile() as fp:
575 with self._opendirstatefile() as fp:
576 return fp.read(size)
576 return fp.read(size)
577 except IOError as err:
577 except IOError as err:
578 if err.errno != errno.ENOENT:
578 if err.errno != errno.ENOENT:
579 raise
579 raise
580 # File doesn't exist, so the current state is empty
580 # File doesn't exist, so the current state is empty
581 return b''
581 return b''
582
582
583 def setparents(self, p1, p2):
583 def setparents(self, p1, p2):
584 self._parents = (p1, p2)
584 self._parents = (p1, p2)
585 self._dirtyparents = True
585 self._dirtyparents = True
586
586
587 def parents(self):
587 def parents(self):
588 if not self._parents:
588 if not self._parents:
589 if self._use_dirstate_v2:
589 if self._use_dirstate_v2:
590 self._parents = self.docket.parents
590 self._parents = self.docket.parents
591 else:
591 else:
592 read_len = self._nodelen * 2
592 read_len = self._nodelen * 2
593 st = self._readdirstatefile(read_len)
593 st = self._readdirstatefile(read_len)
594 l = len(st)
594 l = len(st)
595 if l == read_len:
595 if l == read_len:
596 self._parents = (
596 self._parents = (
597 st[: self._nodelen],
597 st[: self._nodelen],
598 st[self._nodelen : 2 * self._nodelen],
598 st[self._nodelen : 2 * self._nodelen],
599 )
599 )
600 elif l == 0:
600 elif l == 0:
601 self._parents = (
601 self._parents = (
602 self._nodeconstants.nullid,
602 self._nodeconstants.nullid,
603 self._nodeconstants.nullid,
603 self._nodeconstants.nullid,
604 )
604 )
605 else:
605 else:
606 raise error.Abort(
606 raise error.Abort(
607 _(b'working directory state appears damaged!')
607 _(b'working directory state appears damaged!')
608 )
608 )
609
609
610 return self._parents
610 return self._parents
611
611
612 @property
612 @property
613 def docket(self):
613 def docket(self):
614 if not self._docket:
614 if not self._docket:
615 if not self._use_dirstate_v2:
615 if not self._use_dirstate_v2:
616 raise error.ProgrammingError(
616 raise error.ProgrammingError(
617 b'dirstate only has a docket in v2 format'
617 b'dirstate only has a docket in v2 format'
618 )
618 )
619 self._docket = docketmod.DirstateDocket.parse(
619 self._docket = docketmod.DirstateDocket.parse(
620 self._readdirstatefile(), self._nodeconstants
620 self._readdirstatefile(), self._nodeconstants
621 )
621 )
622 return self._docket
622 return self._docket
623
623
624 @propertycache
624 @propertycache
625 def _rustmap(self):
625 def _rustmap(self):
626 """
626 """
627 Fills the Dirstatemap when called.
627 Fills the Dirstatemap when called.
628 """
628 """
629 # ignore HG_PENDING because identity is used only for writing
629 # ignore HG_PENDING because identity is used only for writing
630 self.identity = util.filestat.frompath(
630 self.identity = util.filestat.frompath(
631 self._opener.join(self._filename)
631 self._opener.join(self._filename)
632 )
632 )
633
633
634 if self._use_dirstate_v2:
634 if self._use_dirstate_v2:
635 if self.docket.uuid:
635 if self.docket.uuid:
636 # TODO: use mmap when possible
636 # TODO: use mmap when possible
637 data = self._opener.read(self.docket.data_filename())
637 data = self._opener.read(self.docket.data_filename())
638 else:
638 else:
639 data = b''
639 data = b''
640 self._rustmap = rustmod.DirstateMap.new_v2(
640 self._rustmap = rustmod.DirstateMap.new_v2(
641 data, self.docket.data_size
641 data, self.docket.data_size
642 )
642 )
643 parents = self.docket.parents
643 parents = self.docket.parents
644 else:
644 else:
645 self._rustmap, parents = rustmod.DirstateMap.new_v1(
645 self._rustmap, parents = rustmod.DirstateMap.new_v1(
646 self._use_dirstate_tree, self._readdirstatefile()
646 self._use_dirstate_tree, self._readdirstatefile()
647 )
647 )
648
648
649 if parents and not self._dirtyparents:
649 if parents and not self._dirtyparents:
650 self.setparents(*parents)
650 self.setparents(*parents)
651
651
652 self.__contains__ = self._rustmap.__contains__
652 self.__contains__ = self._rustmap.__contains__
653 self.__getitem__ = self._rustmap.__getitem__
653 self.__getitem__ = self._rustmap.__getitem__
654 self.get = self._rustmap.get
654 self.get = self._rustmap.get
655 return self._rustmap
655 return self._rustmap
656
656
657 def write(self, tr, st, now):
657 def write(self, tr, st, now):
658 if self._use_dirstate_v2:
658 if not self._use_dirstate_v2:
659 packed = self._rustmap.write_v2(now)
659 p1, p2 = self.parents()
660 packed = self._rustmap.write_v1(p1, p2, now)
661 st.write(packed)
662 st.close()
663 self._dirtyparents = False
664 return
665
666 # We can only append to an existing data file if there is one
667 can_append = self.docket.uuid is not None
668 packed, append = self._rustmap.write_v2(now, can_append)
669 if append:
670 docket = self.docket
671 data_filename = docket.data_filename()
672 if tr:
673 tr.add(data_filename, docket.data_size)
674 with self._opener(data_filename, b'r+b') as fp:
675 fp.seek(docket.data_size)
676 assert fp.tell() == docket.data_size
677 written = fp.write(packed)
678 if written is not None: # py2 may return None
679 assert written == len(packed), (written, len(packed))
680 docket.data_size += len(packed)
681 docket.parents = self.parents()
682 st.write(docket.serialize())
683 st.close()
684 else:
660 old_docket = self.docket
685 old_docket = self.docket
661 new_docket = docketmod.DirstateDocket.with_new_uuid(
686 new_docket = docketmod.DirstateDocket.with_new_uuid(
662 self.parents(), len(packed)
687 self.parents(), len(packed)
663 )
688 )
664 self._opener.write(new_docket.data_filename(), packed)
689 data_filename = new_docket.data_filename()
690 if tr:
691 tr.add(data_filename, 0)
692 self._opener.write(data_filename, packed)
665 # Write the new docket after the new data file has been
693 # Write the new docket after the new data file has been
666 # written. Because `st` was opened with `atomictemp=True`,
694 # written. Because `st` was opened with `atomictemp=True`,
667 # the actual `.hg/dirstate` file is only affected on close.
695 # the actual `.hg/dirstate` file is only affected on close.
668 st.write(new_docket.serialize())
696 st.write(new_docket.serialize())
669 st.close()
697 st.close()
670 # Remove the old data file after the new docket pointing to
698 # Remove the old data file after the new docket pointing to
671 # the new data file was written.
699 # the new data file was written.
672 if old_docket.uuid:
700 if old_docket.uuid:
673 self._opener.unlink(old_docket.data_filename())
701 data_filename = old_docket.data_filename()
702 unlink = lambda _tr=None: self._opener.unlink(data_filename)
703 if tr:
704 category = b"dirstate-v2-clean-" + old_docket.uuid
705 tr.addpostclose(category, unlink)
706 else:
707 unlink()
674 self._docket = new_docket
708 self._docket = new_docket
675 else:
709 # Reload from the newly-written file
676 p1, p2 = self.parents()
710 util.clearcachedproperty(self, b"_rustmap")
677 packed = self._rustmap.write_v1(p1, p2, now)
678 st.write(packed)
679 st.close()
680 self._dirtyparents = False
711 self._dirtyparents = False
681
712
682 @propertycache
713 @propertycache
683 def filefoldmap(self):
714 def filefoldmap(self):
684 """Returns a dictionary mapping normalized case paths to their
715 """Returns a dictionary mapping normalized case paths to their
685 non-normalized versions.
716 non-normalized versions.
686 """
717 """
687 return self._rustmap.filefoldmapasdict()
718 return self._rustmap.filefoldmapasdict()
688
719
689 def hastrackeddir(self, d):
720 def hastrackeddir(self, d):
690 return self._rustmap.hastrackeddir(d)
721 return self._rustmap.hastrackeddir(d)
691
722
692 def hasdir(self, d):
723 def hasdir(self, d):
693 return self._rustmap.hasdir(d)
724 return self._rustmap.hasdir(d)
694
725
695 @propertycache
726 @propertycache
696 def identity(self):
727 def identity(self):
697 self._rustmap
728 self._rustmap
698 return self.identity
729 return self.identity
699
730
700 @property
731 @property
701 def nonnormalset(self):
732 def nonnormalset(self):
702 nonnorm = self._rustmap.non_normal_entries()
733 nonnorm = self._rustmap.non_normal_entries()
703 return nonnorm
734 return nonnorm
704
735
705 @propertycache
736 @propertycache
706 def otherparentset(self):
737 def otherparentset(self):
707 otherparents = self._rustmap.other_parent_entries()
738 otherparents = self._rustmap.other_parent_entries()
708 return otherparents
739 return otherparents
709
740
710 def non_normal_or_other_parent_paths(self):
741 def non_normal_or_other_parent_paths(self):
711 return self._rustmap.non_normal_or_other_parent_paths()
742 return self._rustmap.non_normal_or_other_parent_paths()
712
743
713 @propertycache
744 @propertycache
714 def dirfoldmap(self):
745 def dirfoldmap(self):
715 f = {}
746 f = {}
716 normcase = util.normcase
747 normcase = util.normcase
717 for name, _pseudo_entry in self.directories():
748 for name, _pseudo_entry in self.directories():
718 f[normcase(name)] = name
749 f[normcase(name)] = name
719 return f
750 return f
@@ -1,1207 +1,1232 b''
1 use bytes_cast::BytesCast;
1 use bytes_cast::BytesCast;
2 use micro_timer::timed;
2 use micro_timer::timed;
3 use std::borrow::Cow;
3 use std::borrow::Cow;
4 use std::convert::TryInto;
4 use std::convert::TryInto;
5 use std::path::PathBuf;
5 use std::path::PathBuf;
6
6
7 use super::on_disk;
7 use super::on_disk;
8 use super::on_disk::DirstateV2ParseError;
8 use super::on_disk::DirstateV2ParseError;
9 use super::path_with_basename::WithBasename;
9 use super::path_with_basename::WithBasename;
10 use crate::dirstate::parsers::pack_entry;
10 use crate::dirstate::parsers::pack_entry;
11 use crate::dirstate::parsers::packed_entry_size;
11 use crate::dirstate::parsers::packed_entry_size;
12 use crate::dirstate::parsers::parse_dirstate_entries;
12 use crate::dirstate::parsers::parse_dirstate_entries;
13 use crate::dirstate::parsers::Timestamp;
13 use crate::dirstate::parsers::Timestamp;
14 use crate::dirstate::MTIME_UNSET;
14 use crate::dirstate::MTIME_UNSET;
15 use crate::dirstate::SIZE_FROM_OTHER_PARENT;
15 use crate::dirstate::SIZE_FROM_OTHER_PARENT;
16 use crate::dirstate::SIZE_NON_NORMAL;
16 use crate::dirstate::SIZE_NON_NORMAL;
17 use crate::dirstate::V1_RANGEMASK;
17 use crate::dirstate::V1_RANGEMASK;
18 use crate::matchers::Matcher;
18 use crate::matchers::Matcher;
19 use crate::utils::hg_path::{HgPath, HgPathBuf};
19 use crate::utils::hg_path::{HgPath, HgPathBuf};
20 use crate::CopyMapIter;
20 use crate::CopyMapIter;
21 use crate::DirstateEntry;
21 use crate::DirstateEntry;
22 use crate::DirstateError;
22 use crate::DirstateError;
23 use crate::DirstateParents;
23 use crate::DirstateParents;
24 use crate::DirstateStatus;
24 use crate::DirstateStatus;
25 use crate::EntryState;
25 use crate::EntryState;
26 use crate::FastHashMap;
26 use crate::FastHashMap;
27 use crate::PatternFileWarning;
27 use crate::PatternFileWarning;
28 use crate::StateMapIter;
28 use crate::StateMapIter;
29 use crate::StatusError;
29 use crate::StatusError;
30 use crate::StatusOptions;
30 use crate::StatusOptions;
31
31
32 pub struct DirstateMap<'on_disk> {
32 pub struct DirstateMap<'on_disk> {
33 /// Contents of the `.hg/dirstate` file
33 /// Contents of the `.hg/dirstate` file
34 pub(super) on_disk: &'on_disk [u8],
34 pub(super) on_disk: &'on_disk [u8],
35
35
36 pub(super) root: ChildNodes<'on_disk>,
36 pub(super) root: ChildNodes<'on_disk>,
37
37
38 /// Number of nodes anywhere in the tree that have `.entry.is_some()`.
38 /// Number of nodes anywhere in the tree that have `.entry.is_some()`.
39 pub(super) nodes_with_entry_count: u32,
39 pub(super) nodes_with_entry_count: u32,
40
40
41 /// Number of nodes anywhere in the tree that have
41 /// Number of nodes anywhere in the tree that have
42 /// `.copy_source.is_some()`.
42 /// `.copy_source.is_some()`.
43 pub(super) nodes_with_copy_source_count: u32,
43 pub(super) nodes_with_copy_source_count: u32,
44
44
45 /// See on_disk::Header
45 /// See on_disk::Header
46 pub(super) ignore_patterns_hash: on_disk::IgnorePatternsHash,
46 pub(super) ignore_patterns_hash: on_disk::IgnorePatternsHash,
47 }
47 }
48
48
49 /// Using a plain `HgPathBuf` of the full path from the repository root as a
49 /// Using a plain `HgPathBuf` of the full path from the repository root as a
50 /// map key would also work: all paths in a given map have the same parent
50 /// map key would also work: all paths in a given map have the same parent
51 /// path, so comparing full paths gives the same result as comparing base
51 /// path, so comparing full paths gives the same result as comparing base
52 /// names. However `HashMap` would waste time always re-hashing the same
52 /// names. However `HashMap` would waste time always re-hashing the same
53 /// string prefix.
53 /// string prefix.
54 pub(super) type NodeKey<'on_disk> = WithBasename<Cow<'on_disk, HgPath>>;
54 pub(super) type NodeKey<'on_disk> = WithBasename<Cow<'on_disk, HgPath>>;
55
55
56 /// Similar to `&'tree Cow<'on_disk, HgPath>`, but can also be returned
56 /// Similar to `&'tree Cow<'on_disk, HgPath>`, but can also be returned
57 /// for on-disk nodes that don’t actually have a `Cow` to borrow.
57 /// for on-disk nodes that don’t actually have a `Cow` to borrow.
58 pub(super) enum BorrowedPath<'tree, 'on_disk> {
58 pub(super) enum BorrowedPath<'tree, 'on_disk> {
59 InMemory(&'tree HgPathBuf),
59 InMemory(&'tree HgPathBuf),
60 OnDisk(&'on_disk HgPath),
60 OnDisk(&'on_disk HgPath),
61 }
61 }
62
62
63 pub(super) enum ChildNodes<'on_disk> {
63 pub(super) enum ChildNodes<'on_disk> {
64 InMemory(FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
64 InMemory(FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
65 OnDisk(&'on_disk [on_disk::Node]),
65 OnDisk(&'on_disk [on_disk::Node]),
66 }
66 }
67
67
68 pub(super) enum ChildNodesRef<'tree, 'on_disk> {
68 pub(super) enum ChildNodesRef<'tree, 'on_disk> {
69 InMemory(&'tree FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
69 InMemory(&'tree FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>),
70 OnDisk(&'on_disk [on_disk::Node]),
70 OnDisk(&'on_disk [on_disk::Node]),
71 }
71 }
72
72
73 pub(super) enum NodeRef<'tree, 'on_disk> {
73 pub(super) enum NodeRef<'tree, 'on_disk> {
74 InMemory(&'tree NodeKey<'on_disk>, &'tree Node<'on_disk>),
74 InMemory(&'tree NodeKey<'on_disk>, &'tree Node<'on_disk>),
75 OnDisk(&'on_disk on_disk::Node),
75 OnDisk(&'on_disk on_disk::Node),
76 }
76 }
77
77
78 impl<'tree, 'on_disk> BorrowedPath<'tree, 'on_disk> {
78 impl<'tree, 'on_disk> BorrowedPath<'tree, 'on_disk> {
79 pub fn detach_from_tree(&self) -> Cow<'on_disk, HgPath> {
79 pub fn detach_from_tree(&self) -> Cow<'on_disk, HgPath> {
80 match *self {
80 match *self {
81 BorrowedPath::InMemory(in_memory) => Cow::Owned(in_memory.clone()),
81 BorrowedPath::InMemory(in_memory) => Cow::Owned(in_memory.clone()),
82 BorrowedPath::OnDisk(on_disk) => Cow::Borrowed(on_disk),
82 BorrowedPath::OnDisk(on_disk) => Cow::Borrowed(on_disk),
83 }
83 }
84 }
84 }
85 }
85 }
86
86
87 impl<'tree, 'on_disk> std::ops::Deref for BorrowedPath<'tree, 'on_disk> {
87 impl<'tree, 'on_disk> std::ops::Deref for BorrowedPath<'tree, 'on_disk> {
88 type Target = HgPath;
88 type Target = HgPath;
89
89
90 fn deref(&self) -> &HgPath {
90 fn deref(&self) -> &HgPath {
91 match *self {
91 match *self {
92 BorrowedPath::InMemory(in_memory) => in_memory,
92 BorrowedPath::InMemory(in_memory) => in_memory,
93 BorrowedPath::OnDisk(on_disk) => on_disk,
93 BorrowedPath::OnDisk(on_disk) => on_disk,
94 }
94 }
95 }
95 }
96 }
96 }
97
97
98 impl Default for ChildNodes<'_> {
98 impl Default for ChildNodes<'_> {
99 fn default() -> Self {
99 fn default() -> Self {
100 ChildNodes::InMemory(Default::default())
100 ChildNodes::InMemory(Default::default())
101 }
101 }
102 }
102 }
103
103
104 impl<'on_disk> ChildNodes<'on_disk> {
104 impl<'on_disk> ChildNodes<'on_disk> {
105 pub(super) fn as_ref<'tree>(
105 pub(super) fn as_ref<'tree>(
106 &'tree self,
106 &'tree self,
107 ) -> ChildNodesRef<'tree, 'on_disk> {
107 ) -> ChildNodesRef<'tree, 'on_disk> {
108 match self {
108 match self {
109 ChildNodes::InMemory(nodes) => ChildNodesRef::InMemory(nodes),
109 ChildNodes::InMemory(nodes) => ChildNodesRef::InMemory(nodes),
110 ChildNodes::OnDisk(nodes) => ChildNodesRef::OnDisk(nodes),
110 ChildNodes::OnDisk(nodes) => ChildNodesRef::OnDisk(nodes),
111 }
111 }
112 }
112 }
113
113
114 pub(super) fn is_empty(&self) -> bool {
114 pub(super) fn is_empty(&self) -> bool {
115 match self {
115 match self {
116 ChildNodes::InMemory(nodes) => nodes.is_empty(),
116 ChildNodes::InMemory(nodes) => nodes.is_empty(),
117 ChildNodes::OnDisk(nodes) => nodes.is_empty(),
117 ChildNodes::OnDisk(nodes) => nodes.is_empty(),
118 }
118 }
119 }
119 }
120
120
121 pub(super) fn make_mut(
121 pub(super) fn make_mut(
122 &mut self,
122 &mut self,
123 on_disk: &'on_disk [u8],
123 on_disk: &'on_disk [u8],
124 ) -> Result<
124 ) -> Result<
125 &mut FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>,
125 &mut FastHashMap<NodeKey<'on_disk>, Node<'on_disk>>,
126 DirstateV2ParseError,
126 DirstateV2ParseError,
127 > {
127 > {
128 match self {
128 match self {
129 ChildNodes::InMemory(nodes) => Ok(nodes),
129 ChildNodes::InMemory(nodes) => Ok(nodes),
130 ChildNodes::OnDisk(nodes) => {
130 ChildNodes::OnDisk(nodes) => {
131 let nodes = nodes
131 let nodes = nodes
132 .iter()
132 .iter()
133 .map(|node| {
133 .map(|node| {
134 Ok((
134 Ok((
135 node.path(on_disk)?,
135 node.path(on_disk)?,
136 node.to_in_memory_node(on_disk)?,
136 node.to_in_memory_node(on_disk)?,
137 ))
137 ))
138 })
138 })
139 .collect::<Result<_, _>>()?;
139 .collect::<Result<_, _>>()?;
140 *self = ChildNodes::InMemory(nodes);
140 *self = ChildNodes::InMemory(nodes);
141 match self {
141 match self {
142 ChildNodes::InMemory(nodes) => Ok(nodes),
142 ChildNodes::InMemory(nodes) => Ok(nodes),
143 ChildNodes::OnDisk(_) => unreachable!(),
143 ChildNodes::OnDisk(_) => unreachable!(),
144 }
144 }
145 }
145 }
146 }
146 }
147 }
147 }
148 }
148 }
149
149
150 impl<'tree, 'on_disk> ChildNodesRef<'tree, 'on_disk> {
150 impl<'tree, 'on_disk> ChildNodesRef<'tree, 'on_disk> {
151 pub(super) fn get(
151 pub(super) fn get(
152 &self,
152 &self,
153 base_name: &HgPath,
153 base_name: &HgPath,
154 on_disk: &'on_disk [u8],
154 on_disk: &'on_disk [u8],
155 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
155 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
156 match self {
156 match self {
157 ChildNodesRef::InMemory(nodes) => Ok(nodes
157 ChildNodesRef::InMemory(nodes) => Ok(nodes
158 .get_key_value(base_name)
158 .get_key_value(base_name)
159 .map(|(k, v)| NodeRef::InMemory(k, v))),
159 .map(|(k, v)| NodeRef::InMemory(k, v))),
160 ChildNodesRef::OnDisk(nodes) => {
160 ChildNodesRef::OnDisk(nodes) => {
161 let mut parse_result = Ok(());
161 let mut parse_result = Ok(());
162 let search_result = nodes.binary_search_by(|node| {
162 let search_result = nodes.binary_search_by(|node| {
163 match node.base_name(on_disk) {
163 match node.base_name(on_disk) {
164 Ok(node_base_name) => node_base_name.cmp(base_name),
164 Ok(node_base_name) => node_base_name.cmp(base_name),
165 Err(e) => {
165 Err(e) => {
166 parse_result = Err(e);
166 parse_result = Err(e);
167 // Dummy comparison result, `search_result` won’t
167 // Dummy comparison result, `search_result` won’t
168 // be used since `parse_result` is an error
168 // be used since `parse_result` is an error
169 std::cmp::Ordering::Equal
169 std::cmp::Ordering::Equal
170 }
170 }
171 }
171 }
172 });
172 });
173 parse_result.map(|()| {
173 parse_result.map(|()| {
174 search_result.ok().map(|i| NodeRef::OnDisk(&nodes[i]))
174 search_result.ok().map(|i| NodeRef::OnDisk(&nodes[i]))
175 })
175 })
176 }
176 }
177 }
177 }
178 }
178 }
179
179
180 /// Iterate in undefined order
180 /// Iterate in undefined order
181 pub(super) fn iter(
181 pub(super) fn iter(
182 &self,
182 &self,
183 ) -> impl Iterator<Item = NodeRef<'tree, 'on_disk>> {
183 ) -> impl Iterator<Item = NodeRef<'tree, 'on_disk>> {
184 match self {
184 match self {
185 ChildNodesRef::InMemory(nodes) => itertools::Either::Left(
185 ChildNodesRef::InMemory(nodes) => itertools::Either::Left(
186 nodes.iter().map(|(k, v)| NodeRef::InMemory(k, v)),
186 nodes.iter().map(|(k, v)| NodeRef::InMemory(k, v)),
187 ),
187 ),
188 ChildNodesRef::OnDisk(nodes) => {
188 ChildNodesRef::OnDisk(nodes) => {
189 itertools::Either::Right(nodes.iter().map(NodeRef::OnDisk))
189 itertools::Either::Right(nodes.iter().map(NodeRef::OnDisk))
190 }
190 }
191 }
191 }
192 }
192 }
193
193
194 /// Iterate in parallel in undefined order
194 /// Iterate in parallel in undefined order
195 pub(super) fn par_iter(
195 pub(super) fn par_iter(
196 &self,
196 &self,
197 ) -> impl rayon::iter::ParallelIterator<Item = NodeRef<'tree, 'on_disk>>
197 ) -> impl rayon::iter::ParallelIterator<Item = NodeRef<'tree, 'on_disk>>
198 {
198 {
199 use rayon::prelude::*;
199 use rayon::prelude::*;
200 match self {
200 match self {
201 ChildNodesRef::InMemory(nodes) => rayon::iter::Either::Left(
201 ChildNodesRef::InMemory(nodes) => rayon::iter::Either::Left(
202 nodes.par_iter().map(|(k, v)| NodeRef::InMemory(k, v)),
202 nodes.par_iter().map(|(k, v)| NodeRef::InMemory(k, v)),
203 ),
203 ),
204 ChildNodesRef::OnDisk(nodes) => rayon::iter::Either::Right(
204 ChildNodesRef::OnDisk(nodes) => rayon::iter::Either::Right(
205 nodes.par_iter().map(NodeRef::OnDisk),
205 nodes.par_iter().map(NodeRef::OnDisk),
206 ),
206 ),
207 }
207 }
208 }
208 }
209
209
210 pub(super) fn sorted(&self) -> Vec<NodeRef<'tree, 'on_disk>> {
210 pub(super) fn sorted(&self) -> Vec<NodeRef<'tree, 'on_disk>> {
211 match self {
211 match self {
212 ChildNodesRef::InMemory(nodes) => {
212 ChildNodesRef::InMemory(nodes) => {
213 let mut vec: Vec<_> = nodes
213 let mut vec: Vec<_> = nodes
214 .iter()
214 .iter()
215 .map(|(k, v)| NodeRef::InMemory(k, v))
215 .map(|(k, v)| NodeRef::InMemory(k, v))
216 .collect();
216 .collect();
217 fn sort_key<'a>(node: &'a NodeRef) -> &'a HgPath {
217 fn sort_key<'a>(node: &'a NodeRef) -> &'a HgPath {
218 match node {
218 match node {
219 NodeRef::InMemory(path, _node) => path.base_name(),
219 NodeRef::InMemory(path, _node) => path.base_name(),
220 NodeRef::OnDisk(_) => unreachable!(),
220 NodeRef::OnDisk(_) => unreachable!(),
221 }
221 }
222 }
222 }
223 // `sort_unstable_by_key` doesn’t allow keys borrowing from the
223 // `sort_unstable_by_key` doesn’t allow keys borrowing from the
224 // value: https://github.com/rust-lang/rust/issues/34162
224 // value: https://github.com/rust-lang/rust/issues/34162
225 vec.sort_unstable_by(|a, b| sort_key(a).cmp(sort_key(b)));
225 vec.sort_unstable_by(|a, b| sort_key(a).cmp(sort_key(b)));
226 vec
226 vec
227 }
227 }
228 ChildNodesRef::OnDisk(nodes) => {
228 ChildNodesRef::OnDisk(nodes) => {
229 // Nodes on disk are already sorted
229 // Nodes on disk are already sorted
230 nodes.iter().map(NodeRef::OnDisk).collect()
230 nodes.iter().map(NodeRef::OnDisk).collect()
231 }
231 }
232 }
232 }
233 }
233 }
234 }
234 }
235
235
236 impl<'tree, 'on_disk> NodeRef<'tree, 'on_disk> {
236 impl<'tree, 'on_disk> NodeRef<'tree, 'on_disk> {
237 pub(super) fn full_path(
237 pub(super) fn full_path(
238 &self,
238 &self,
239 on_disk: &'on_disk [u8],
239 on_disk: &'on_disk [u8],
240 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
240 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
241 match self {
241 match self {
242 NodeRef::InMemory(path, _node) => Ok(path.full_path()),
242 NodeRef::InMemory(path, _node) => Ok(path.full_path()),
243 NodeRef::OnDisk(node) => node.full_path(on_disk),
243 NodeRef::OnDisk(node) => node.full_path(on_disk),
244 }
244 }
245 }
245 }
246
246
247 /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk,
247 /// Returns a `BorrowedPath`, which can be turned into a `Cow<'on_disk,
248 /// HgPath>` detached from `'tree`
248 /// HgPath>` detached from `'tree`
249 pub(super) fn full_path_borrowed(
249 pub(super) fn full_path_borrowed(
250 &self,
250 &self,
251 on_disk: &'on_disk [u8],
251 on_disk: &'on_disk [u8],
252 ) -> Result<BorrowedPath<'tree, 'on_disk>, DirstateV2ParseError> {
252 ) -> Result<BorrowedPath<'tree, 'on_disk>, DirstateV2ParseError> {
253 match self {
253 match self {
254 NodeRef::InMemory(path, _node) => match path.full_path() {
254 NodeRef::InMemory(path, _node) => match path.full_path() {
255 Cow::Borrowed(on_disk) => Ok(BorrowedPath::OnDisk(on_disk)),
255 Cow::Borrowed(on_disk) => Ok(BorrowedPath::OnDisk(on_disk)),
256 Cow::Owned(in_memory) => Ok(BorrowedPath::InMemory(in_memory)),
256 Cow::Owned(in_memory) => Ok(BorrowedPath::InMemory(in_memory)),
257 },
257 },
258 NodeRef::OnDisk(node) => {
258 NodeRef::OnDisk(node) => {
259 Ok(BorrowedPath::OnDisk(node.full_path(on_disk)?))
259 Ok(BorrowedPath::OnDisk(node.full_path(on_disk)?))
260 }
260 }
261 }
261 }
262 }
262 }
263
263
264 pub(super) fn base_name(
264 pub(super) fn base_name(
265 &self,
265 &self,
266 on_disk: &'on_disk [u8],
266 on_disk: &'on_disk [u8],
267 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
267 ) -> Result<&'tree HgPath, DirstateV2ParseError> {
268 match self {
268 match self {
269 NodeRef::InMemory(path, _node) => Ok(path.base_name()),
269 NodeRef::InMemory(path, _node) => Ok(path.base_name()),
270 NodeRef::OnDisk(node) => node.base_name(on_disk),
270 NodeRef::OnDisk(node) => node.base_name(on_disk),
271 }
271 }
272 }
272 }
273
273
274 pub(super) fn children(
274 pub(super) fn children(
275 &self,
275 &self,
276 on_disk: &'on_disk [u8],
276 on_disk: &'on_disk [u8],
277 ) -> Result<ChildNodesRef<'tree, 'on_disk>, DirstateV2ParseError> {
277 ) -> Result<ChildNodesRef<'tree, 'on_disk>, DirstateV2ParseError> {
278 match self {
278 match self {
279 NodeRef::InMemory(_path, node) => Ok(node.children.as_ref()),
279 NodeRef::InMemory(_path, node) => Ok(node.children.as_ref()),
280 NodeRef::OnDisk(node) => {
280 NodeRef::OnDisk(node) => {
281 Ok(ChildNodesRef::OnDisk(node.children(on_disk)?))
281 Ok(ChildNodesRef::OnDisk(node.children(on_disk)?))
282 }
282 }
283 }
283 }
284 }
284 }
285
285
286 pub(super) fn has_copy_source(&self) -> bool {
286 pub(super) fn has_copy_source(&self) -> bool {
287 match self {
287 match self {
288 NodeRef::InMemory(_path, node) => node.copy_source.is_some(),
288 NodeRef::InMemory(_path, node) => node.copy_source.is_some(),
289 NodeRef::OnDisk(node) => node.has_copy_source(),
289 NodeRef::OnDisk(node) => node.has_copy_source(),
290 }
290 }
291 }
291 }
292
292
293 pub(super) fn copy_source(
293 pub(super) fn copy_source(
294 &self,
294 &self,
295 on_disk: &'on_disk [u8],
295 on_disk: &'on_disk [u8],
296 ) -> Result<Option<&'tree HgPath>, DirstateV2ParseError> {
296 ) -> Result<Option<&'tree HgPath>, DirstateV2ParseError> {
297 match self {
297 match self {
298 NodeRef::InMemory(_path, node) => {
298 NodeRef::InMemory(_path, node) => {
299 Ok(node.copy_source.as_ref().map(|s| &**s))
299 Ok(node.copy_source.as_ref().map(|s| &**s))
300 }
300 }
301 NodeRef::OnDisk(node) => node.copy_source(on_disk),
301 NodeRef::OnDisk(node) => node.copy_source(on_disk),
302 }
302 }
303 }
303 }
304
304
305 pub(super) fn entry(
305 pub(super) fn entry(
306 &self,
306 &self,
307 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
307 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
308 match self {
308 match self {
309 NodeRef::InMemory(_path, node) => {
309 NodeRef::InMemory(_path, node) => {
310 Ok(node.data.as_entry().copied())
310 Ok(node.data.as_entry().copied())
311 }
311 }
312 NodeRef::OnDisk(node) => node.entry(),
312 NodeRef::OnDisk(node) => node.entry(),
313 }
313 }
314 }
314 }
315
315
316 pub(super) fn state(
316 pub(super) fn state(
317 &self,
317 &self,
318 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
318 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
319 match self {
319 match self {
320 NodeRef::InMemory(_path, node) => {
320 NodeRef::InMemory(_path, node) => {
321 Ok(node.data.as_entry().map(|entry| entry.state))
321 Ok(node.data.as_entry().map(|entry| entry.state))
322 }
322 }
323 NodeRef::OnDisk(node) => node.state(),
323 NodeRef::OnDisk(node) => node.state(),
324 }
324 }
325 }
325 }
326
326
327 pub(super) fn cached_directory_mtime(
327 pub(super) fn cached_directory_mtime(
328 &self,
328 &self,
329 ) -> Option<&'tree on_disk::Timestamp> {
329 ) -> Option<&'tree on_disk::Timestamp> {
330 match self {
330 match self {
331 NodeRef::InMemory(_path, node) => match &node.data {
331 NodeRef::InMemory(_path, node) => match &node.data {
332 NodeData::CachedDirectory { mtime } => Some(mtime),
332 NodeData::CachedDirectory { mtime } => Some(mtime),
333 _ => None,
333 _ => None,
334 },
334 },
335 NodeRef::OnDisk(node) => node.cached_directory_mtime(),
335 NodeRef::OnDisk(node) => node.cached_directory_mtime(),
336 }
336 }
337 }
337 }
338
338
339 pub(super) fn descendants_with_entry_count(&self) -> u32 {
339 pub(super) fn descendants_with_entry_count(&self) -> u32 {
340 match self {
340 match self {
341 NodeRef::InMemory(_path, node) => {
341 NodeRef::InMemory(_path, node) => {
342 node.descendants_with_entry_count
342 node.descendants_with_entry_count
343 }
343 }
344 NodeRef::OnDisk(node) => node.descendants_with_entry_count.get(),
344 NodeRef::OnDisk(node) => node.descendants_with_entry_count.get(),
345 }
345 }
346 }
346 }
347
347
348 pub(super) fn tracked_descendants_count(&self) -> u32 {
348 pub(super) fn tracked_descendants_count(&self) -> u32 {
349 match self {
349 match self {
350 NodeRef::InMemory(_path, node) => node.tracked_descendants_count,
350 NodeRef::InMemory(_path, node) => node.tracked_descendants_count,
351 NodeRef::OnDisk(node) => node.tracked_descendants_count.get(),
351 NodeRef::OnDisk(node) => node.tracked_descendants_count.get(),
352 }
352 }
353 }
353 }
354 }
354 }
355
355
356 /// Represents a file or a directory
356 /// Represents a file or a directory
357 #[derive(Default)]
357 #[derive(Default)]
358 pub(super) struct Node<'on_disk> {
358 pub(super) struct Node<'on_disk> {
359 pub(super) data: NodeData,
359 pub(super) data: NodeData,
360
360
361 pub(super) copy_source: Option<Cow<'on_disk, HgPath>>,
361 pub(super) copy_source: Option<Cow<'on_disk, HgPath>>,
362
362
363 pub(super) children: ChildNodes<'on_disk>,
363 pub(super) children: ChildNodes<'on_disk>,
364
364
365 /// How many (non-inclusive) descendants of this node have an entry.
365 /// How many (non-inclusive) descendants of this node have an entry.
366 pub(super) descendants_with_entry_count: u32,
366 pub(super) descendants_with_entry_count: u32,
367
367
368 /// How many (non-inclusive) descendants of this node have an entry whose
368 /// How many (non-inclusive) descendants of this node have an entry whose
369 /// state is "tracked".
369 /// state is "tracked".
370 pub(super) tracked_descendants_count: u32,
370 pub(super) tracked_descendants_count: u32,
371 }
371 }
372
372
373 pub(super) enum NodeData {
373 pub(super) enum NodeData {
374 Entry(DirstateEntry),
374 Entry(DirstateEntry),
375 CachedDirectory { mtime: on_disk::Timestamp },
375 CachedDirectory { mtime: on_disk::Timestamp },
376 None,
376 None,
377 }
377 }
378
378
379 impl Default for NodeData {
379 impl Default for NodeData {
380 fn default() -> Self {
380 fn default() -> Self {
381 NodeData::None
381 NodeData::None
382 }
382 }
383 }
383 }
384
384
385 impl NodeData {
385 impl NodeData {
386 fn has_entry(&self) -> bool {
386 fn has_entry(&self) -> bool {
387 match self {
387 match self {
388 NodeData::Entry(_) => true,
388 NodeData::Entry(_) => true,
389 _ => false,
389 _ => false,
390 }
390 }
391 }
391 }
392
392
393 fn as_entry(&self) -> Option<&DirstateEntry> {
393 fn as_entry(&self) -> Option<&DirstateEntry> {
394 match self {
394 match self {
395 NodeData::Entry(entry) => Some(entry),
395 NodeData::Entry(entry) => Some(entry),
396 _ => None,
396 _ => None,
397 }
397 }
398 }
398 }
399 }
399 }
400
400
401 impl<'on_disk> DirstateMap<'on_disk> {
401 impl<'on_disk> DirstateMap<'on_disk> {
402 pub(super) fn empty(on_disk: &'on_disk [u8]) -> Self {
402 pub(super) fn empty(on_disk: &'on_disk [u8]) -> Self {
403 Self {
403 Self {
404 on_disk,
404 on_disk,
405 root: ChildNodes::default(),
405 root: ChildNodes::default(),
406 nodes_with_entry_count: 0,
406 nodes_with_entry_count: 0,
407 nodes_with_copy_source_count: 0,
407 nodes_with_copy_source_count: 0,
408 ignore_patterns_hash: [0; on_disk::IGNORE_PATTERNS_HASH_LEN],
408 ignore_patterns_hash: [0; on_disk::IGNORE_PATTERNS_HASH_LEN],
409 }
409 }
410 }
410 }
411
411
412 #[timed]
412 #[timed]
413 pub fn new_v2(
413 pub fn new_v2(
414 on_disk: &'on_disk [u8],
414 on_disk: &'on_disk [u8],
415 data_size: usize,
415 data_size: usize,
416 ) -> Result<Self, DirstateError> {
416 ) -> Result<Self, DirstateError> {
417 if let Some(data) = on_disk.get(..data_size) {
417 if let Some(data) = on_disk.get(..data_size) {
418 Ok(on_disk::read(data)?)
418 Ok(on_disk::read(data)?)
419 } else {
419 } else {
420 Err(DirstateV2ParseError.into())
420 Err(DirstateV2ParseError.into())
421 }
421 }
422 }
422 }
423
423
424 #[timed]
424 #[timed]
425 pub fn new_v1(
425 pub fn new_v1(
426 on_disk: &'on_disk [u8],
426 on_disk: &'on_disk [u8],
427 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
427 ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
428 let mut map = Self::empty(on_disk);
428 let mut map = Self::empty(on_disk);
429 if map.on_disk.is_empty() {
429 if map.on_disk.is_empty() {
430 return Ok((map, None));
430 return Ok((map, None));
431 }
431 }
432
432
433 let parents = parse_dirstate_entries(
433 let parents = parse_dirstate_entries(
434 map.on_disk,
434 map.on_disk,
435 |path, entry, copy_source| {
435 |path, entry, copy_source| {
436 let tracked = entry.state.is_tracked();
436 let tracked = entry.state.is_tracked();
437 let node = Self::get_or_insert_node(
437 let node = Self::get_or_insert_node(
438 map.on_disk,
438 map.on_disk,
439 &mut map.root,
439 &mut map.root,
440 path,
440 path,
441 WithBasename::to_cow_borrowed,
441 WithBasename::to_cow_borrowed,
442 |ancestor| {
442 |ancestor| {
443 if tracked {
443 if tracked {
444 ancestor.tracked_descendants_count += 1
444 ancestor.tracked_descendants_count += 1
445 }
445 }
446 ancestor.descendants_with_entry_count += 1
446 ancestor.descendants_with_entry_count += 1
447 },
447 },
448 )?;
448 )?;
449 assert!(
449 assert!(
450 !node.data.has_entry(),
450 !node.data.has_entry(),
451 "duplicate dirstate entry in read"
451 "duplicate dirstate entry in read"
452 );
452 );
453 assert!(
453 assert!(
454 node.copy_source.is_none(),
454 node.copy_source.is_none(),
455 "duplicate dirstate entry in read"
455 "duplicate dirstate entry in read"
456 );
456 );
457 node.data = NodeData::Entry(*entry);
457 node.data = NodeData::Entry(*entry);
458 node.copy_source = copy_source.map(Cow::Borrowed);
458 node.copy_source = copy_source.map(Cow::Borrowed);
459 map.nodes_with_entry_count += 1;
459 map.nodes_with_entry_count += 1;
460 if copy_source.is_some() {
460 if copy_source.is_some() {
461 map.nodes_with_copy_source_count += 1
461 map.nodes_with_copy_source_count += 1
462 }
462 }
463 Ok(())
463 Ok(())
464 },
464 },
465 )?;
465 )?;
466 let parents = Some(parents.clone());
466 let parents = Some(parents.clone());
467
467
468 Ok((map, parents))
468 Ok((map, parents))
469 }
469 }
470
470
471 /// Assuming dirstate-v2 format, returns whether the next write should
472 /// append to the existing data file that contains `self.on_disk` (true),
473 /// or create a new data file from scratch (false).
474 pub(super) fn write_should_append(&self) -> bool {
475 // Soon this will be a heuristic based on the amount of unreachable
476 // data. For now it’s pseudo-random in order to make tests exercise
477 // both code paths.
478
479 fn bad_rng() -> u32 {
480 std::time::SystemTime::now()
481 .duration_since(std::time::UNIX_EPOCH)
482 .unwrap()
483 .subsec_millis()
484 }
485
486 bad_rng() % 2 == 0
487 }
488
471 fn get_node<'tree>(
489 fn get_node<'tree>(
472 &'tree self,
490 &'tree self,
473 path: &HgPath,
491 path: &HgPath,
474 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
492 ) -> Result<Option<NodeRef<'tree, 'on_disk>>, DirstateV2ParseError> {
475 let mut children = self.root.as_ref();
493 let mut children = self.root.as_ref();
476 let mut components = path.components();
494 let mut components = path.components();
477 let mut component =
495 let mut component =
478 components.next().expect("expected at least one components");
496 components.next().expect("expected at least one components");
479 loop {
497 loop {
480 if let Some(child) = children.get(component, self.on_disk)? {
498 if let Some(child) = children.get(component, self.on_disk)? {
481 if let Some(next_component) = components.next() {
499 if let Some(next_component) = components.next() {
482 component = next_component;
500 component = next_component;
483 children = child.children(self.on_disk)?;
501 children = child.children(self.on_disk)?;
484 } else {
502 } else {
485 return Ok(Some(child));
503 return Ok(Some(child));
486 }
504 }
487 } else {
505 } else {
488 return Ok(None);
506 return Ok(None);
489 }
507 }
490 }
508 }
491 }
509 }
492
510
493 /// Returns a mutable reference to the node at `path` if it exists
511 /// Returns a mutable reference to the node at `path` if it exists
494 ///
512 ///
495 /// This takes `root` instead of `&mut self` so that callers can mutate
513 /// This takes `root` instead of `&mut self` so that callers can mutate
496 /// other fields while the returned borrow is still valid
514 /// other fields while the returned borrow is still valid
497 fn get_node_mut<'tree>(
515 fn get_node_mut<'tree>(
498 on_disk: &'on_disk [u8],
516 on_disk: &'on_disk [u8],
499 root: &'tree mut ChildNodes<'on_disk>,
517 root: &'tree mut ChildNodes<'on_disk>,
500 path: &HgPath,
518 path: &HgPath,
501 ) -> Result<Option<&'tree mut Node<'on_disk>>, DirstateV2ParseError> {
519 ) -> Result<Option<&'tree mut Node<'on_disk>>, DirstateV2ParseError> {
502 let mut children = root;
520 let mut children = root;
503 let mut components = path.components();
521 let mut components = path.components();
504 let mut component =
522 let mut component =
505 components.next().expect("expected at least one components");
523 components.next().expect("expected at least one components");
506 loop {
524 loop {
507 if let Some(child) = children.make_mut(on_disk)?.get_mut(component)
525 if let Some(child) = children.make_mut(on_disk)?.get_mut(component)
508 {
526 {
509 if let Some(next_component) = components.next() {
527 if let Some(next_component) = components.next() {
510 component = next_component;
528 component = next_component;
511 children = &mut child.children;
529 children = &mut child.children;
512 } else {
530 } else {
513 return Ok(Some(child));
531 return Ok(Some(child));
514 }
532 }
515 } else {
533 } else {
516 return Ok(None);
534 return Ok(None);
517 }
535 }
518 }
536 }
519 }
537 }
520
538
521 pub(super) fn get_or_insert<'tree, 'path>(
539 pub(super) fn get_or_insert<'tree, 'path>(
522 &'tree mut self,
540 &'tree mut self,
523 path: &HgPath,
541 path: &HgPath,
524 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
542 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
525 Self::get_or_insert_node(
543 Self::get_or_insert_node(
526 self.on_disk,
544 self.on_disk,
527 &mut self.root,
545 &mut self.root,
528 path,
546 path,
529 WithBasename::to_cow_owned,
547 WithBasename::to_cow_owned,
530 |_| {},
548 |_| {},
531 )
549 )
532 }
550 }
533
551
534 pub(super) fn get_or_insert_node<'tree, 'path>(
552 pub(super) fn get_or_insert_node<'tree, 'path>(
535 on_disk: &'on_disk [u8],
553 on_disk: &'on_disk [u8],
536 root: &'tree mut ChildNodes<'on_disk>,
554 root: &'tree mut ChildNodes<'on_disk>,
537 path: &'path HgPath,
555 path: &'path HgPath,
538 to_cow: impl Fn(
556 to_cow: impl Fn(
539 WithBasename<&'path HgPath>,
557 WithBasename<&'path HgPath>,
540 ) -> WithBasename<Cow<'on_disk, HgPath>>,
558 ) -> WithBasename<Cow<'on_disk, HgPath>>,
541 mut each_ancestor: impl FnMut(&mut Node),
559 mut each_ancestor: impl FnMut(&mut Node),
542 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
560 ) -> Result<&'tree mut Node<'on_disk>, DirstateV2ParseError> {
543 let mut child_nodes = root;
561 let mut child_nodes = root;
544 let mut inclusive_ancestor_paths =
562 let mut inclusive_ancestor_paths =
545 WithBasename::inclusive_ancestors_of(path);
563 WithBasename::inclusive_ancestors_of(path);
546 let mut ancestor_path = inclusive_ancestor_paths
564 let mut ancestor_path = inclusive_ancestor_paths
547 .next()
565 .next()
548 .expect("expected at least one inclusive ancestor");
566 .expect("expected at least one inclusive ancestor");
549 loop {
567 loop {
550 // TODO: can we avoid allocating an owned key in cases where the
568 // TODO: can we avoid allocating an owned key in cases where the
551 // map already contains that key, without introducing double
569 // map already contains that key, without introducing double
552 // lookup?
570 // lookup?
553 let child_node = child_nodes
571 let child_node = child_nodes
554 .make_mut(on_disk)?
572 .make_mut(on_disk)?
555 .entry(to_cow(ancestor_path))
573 .entry(to_cow(ancestor_path))
556 .or_default();
574 .or_default();
557 if let Some(next) = inclusive_ancestor_paths.next() {
575 if let Some(next) = inclusive_ancestor_paths.next() {
558 each_ancestor(child_node);
576 each_ancestor(child_node);
559 ancestor_path = next;
577 ancestor_path = next;
560 child_nodes = &mut child_node.children;
578 child_nodes = &mut child_node.children;
561 } else {
579 } else {
562 return Ok(child_node);
580 return Ok(child_node);
563 }
581 }
564 }
582 }
565 }
583 }
566
584
567 fn add_or_remove_file(
585 fn add_or_remove_file(
568 &mut self,
586 &mut self,
569 path: &HgPath,
587 path: &HgPath,
570 old_state: EntryState,
588 old_state: EntryState,
571 new_entry: DirstateEntry,
589 new_entry: DirstateEntry,
572 ) -> Result<(), DirstateV2ParseError> {
590 ) -> Result<(), DirstateV2ParseError> {
573 let had_entry = old_state != EntryState::Unknown;
591 let had_entry = old_state != EntryState::Unknown;
574 let tracked_count_increment =
592 let tracked_count_increment =
575 match (old_state.is_tracked(), new_entry.state.is_tracked()) {
593 match (old_state.is_tracked(), new_entry.state.is_tracked()) {
576 (false, true) => 1,
594 (false, true) => 1,
577 (true, false) => -1,
595 (true, false) => -1,
578 _ => 0,
596 _ => 0,
579 };
597 };
580
598
581 let node = Self::get_or_insert_node(
599 let node = Self::get_or_insert_node(
582 self.on_disk,
600 self.on_disk,
583 &mut self.root,
601 &mut self.root,
584 path,
602 path,
585 WithBasename::to_cow_owned,
603 WithBasename::to_cow_owned,
586 |ancestor| {
604 |ancestor| {
587 if !had_entry {
605 if !had_entry {
588 ancestor.descendants_with_entry_count += 1;
606 ancestor.descendants_with_entry_count += 1;
589 }
607 }
590
608
591 // We can’t use `+= increment` because the counter is unsigned,
609 // We can’t use `+= increment` because the counter is unsigned,
592 // and we want debug builds to detect accidental underflow
610 // and we want debug builds to detect accidental underflow
593 // through zero
611 // through zero
594 match tracked_count_increment {
612 match tracked_count_increment {
595 1 => ancestor.tracked_descendants_count += 1,
613 1 => ancestor.tracked_descendants_count += 1,
596 -1 => ancestor.tracked_descendants_count -= 1,
614 -1 => ancestor.tracked_descendants_count -= 1,
597 _ => {}
615 _ => {}
598 }
616 }
599 },
617 },
600 )?;
618 )?;
601 if !had_entry {
619 if !had_entry {
602 self.nodes_with_entry_count += 1
620 self.nodes_with_entry_count += 1
603 }
621 }
604 node.data = NodeData::Entry(new_entry);
622 node.data = NodeData::Entry(new_entry);
605 Ok(())
623 Ok(())
606 }
624 }
607
625
608 fn iter_nodes<'tree>(
626 fn iter_nodes<'tree>(
609 &'tree self,
627 &'tree self,
610 ) -> impl Iterator<
628 ) -> impl Iterator<
611 Item = Result<NodeRef<'tree, 'on_disk>, DirstateV2ParseError>,
629 Item = Result<NodeRef<'tree, 'on_disk>, DirstateV2ParseError>,
612 > + 'tree {
630 > + 'tree {
613 // Depth first tree traversal.
631 // Depth first tree traversal.
614 //
632 //
615 // If we could afford internal iteration and recursion,
633 // If we could afford internal iteration and recursion,
616 // this would look like:
634 // this would look like:
617 //
635 //
618 // ```
636 // ```
619 // fn traverse_children(
637 // fn traverse_children(
620 // children: &ChildNodes,
638 // children: &ChildNodes,
621 // each: &mut impl FnMut(&Node),
639 // each: &mut impl FnMut(&Node),
622 // ) {
640 // ) {
623 // for child in children.values() {
641 // for child in children.values() {
624 // traverse_children(&child.children, each);
642 // traverse_children(&child.children, each);
625 // each(child);
643 // each(child);
626 // }
644 // }
627 // }
645 // }
628 // ```
646 // ```
629 //
647 //
630 // However we want an external iterator and therefore can’t use the
648 // However we want an external iterator and therefore can’t use the
631 // call stack. Use an explicit stack instead:
649 // call stack. Use an explicit stack instead:
632 let mut stack = Vec::new();
650 let mut stack = Vec::new();
633 let mut iter = self.root.as_ref().iter();
651 let mut iter = self.root.as_ref().iter();
634 std::iter::from_fn(move || {
652 std::iter::from_fn(move || {
635 while let Some(child_node) = iter.next() {
653 while let Some(child_node) = iter.next() {
636 let children = match child_node.children(self.on_disk) {
654 let children = match child_node.children(self.on_disk) {
637 Ok(children) => children,
655 Ok(children) => children,
638 Err(error) => return Some(Err(error)),
656 Err(error) => return Some(Err(error)),
639 };
657 };
640 // Pseudo-recursion
658 // Pseudo-recursion
641 let new_iter = children.iter();
659 let new_iter = children.iter();
642 let old_iter = std::mem::replace(&mut iter, new_iter);
660 let old_iter = std::mem::replace(&mut iter, new_iter);
643 stack.push((child_node, old_iter));
661 stack.push((child_node, old_iter));
644 }
662 }
645 // Found the end of a `children.iter()` iterator.
663 // Found the end of a `children.iter()` iterator.
646 if let Some((child_node, next_iter)) = stack.pop() {
664 if let Some((child_node, next_iter)) = stack.pop() {
647 // "Return" from pseudo-recursion by restoring state from the
665 // "Return" from pseudo-recursion by restoring state from the
648 // explicit stack
666 // explicit stack
649 iter = next_iter;
667 iter = next_iter;
650
668
651 Some(Ok(child_node))
669 Some(Ok(child_node))
652 } else {
670 } else {
653 // Reached the bottom of the stack, we’re done
671 // Reached the bottom of the stack, we’re done
654 None
672 None
655 }
673 }
656 })
674 })
657 }
675 }
658
676
659 fn clear_known_ambiguous_mtimes(
677 fn clear_known_ambiguous_mtimes(
660 &mut self,
678 &mut self,
661 paths: &[impl AsRef<HgPath>],
679 paths: &[impl AsRef<HgPath>],
662 ) -> Result<(), DirstateV2ParseError> {
680 ) -> Result<(), DirstateV2ParseError> {
663 for path in paths {
681 for path in paths {
664 if let Some(node) = Self::get_node_mut(
682 if let Some(node) = Self::get_node_mut(
665 self.on_disk,
683 self.on_disk,
666 &mut self.root,
684 &mut self.root,
667 path.as_ref(),
685 path.as_ref(),
668 )? {
686 )? {
669 if let NodeData::Entry(entry) = &mut node.data {
687 if let NodeData::Entry(entry) = &mut node.data {
670 entry.clear_mtime();
688 entry.clear_mtime();
671 }
689 }
672 }
690 }
673 }
691 }
674 Ok(())
692 Ok(())
675 }
693 }
676
694
677 /// Return a faillilble iterator of full paths of nodes that have an
695 /// Return a faillilble iterator of full paths of nodes that have an
678 /// `entry` for which the given `predicate` returns true.
696 /// `entry` for which the given `predicate` returns true.
679 ///
697 ///
680 /// Fallibility means that each iterator item is a `Result`, which may
698 /// Fallibility means that each iterator item is a `Result`, which may
681 /// indicate a parse error of the on-disk dirstate-v2 format. Such errors
699 /// indicate a parse error of the on-disk dirstate-v2 format. Such errors
682 /// should only happen if Mercurial is buggy or a repository is corrupted.
700 /// should only happen if Mercurial is buggy or a repository is corrupted.
683 fn filter_full_paths<'tree>(
701 fn filter_full_paths<'tree>(
684 &'tree self,
702 &'tree self,
685 predicate: impl Fn(&DirstateEntry) -> bool + 'tree,
703 predicate: impl Fn(&DirstateEntry) -> bool + 'tree,
686 ) -> impl Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + 'tree
704 ) -> impl Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + 'tree
687 {
705 {
688 filter_map_results(self.iter_nodes(), move |node| {
706 filter_map_results(self.iter_nodes(), move |node| {
689 if let Some(entry) = node.entry()? {
707 if let Some(entry) = node.entry()? {
690 if predicate(&entry) {
708 if predicate(&entry) {
691 return Ok(Some(node.full_path(self.on_disk)?));
709 return Ok(Some(node.full_path(self.on_disk)?));
692 }
710 }
693 }
711 }
694 Ok(None)
712 Ok(None)
695 })
713 })
696 }
714 }
697 }
715 }
698
716
699 /// Like `Iterator::filter_map`, but over a fallible iterator of `Result`s.
717 /// Like `Iterator::filter_map`, but over a fallible iterator of `Result`s.
700 ///
718 ///
701 /// The callback is only called for incoming `Ok` values. Errors are passed
719 /// The callback is only called for incoming `Ok` values. Errors are passed
702 /// through as-is. In order to let it use the `?` operator the callback is
720 /// through as-is. In order to let it use the `?` operator the callback is
703 /// expected to return a `Result` of `Option`, instead of an `Option` of
721 /// expected to return a `Result` of `Option`, instead of an `Option` of
704 /// `Result`.
722 /// `Result`.
705 fn filter_map_results<'a, I, F, A, B, E>(
723 fn filter_map_results<'a, I, F, A, B, E>(
706 iter: I,
724 iter: I,
707 f: F,
725 f: F,
708 ) -> impl Iterator<Item = Result<B, E>> + 'a
726 ) -> impl Iterator<Item = Result<B, E>> + 'a
709 where
727 where
710 I: Iterator<Item = Result<A, E>> + 'a,
728 I: Iterator<Item = Result<A, E>> + 'a,
711 F: Fn(A) -> Result<Option<B>, E> + 'a,
729 F: Fn(A) -> Result<Option<B>, E> + 'a,
712 {
730 {
713 iter.filter_map(move |result| match result {
731 iter.filter_map(move |result| match result {
714 Ok(node) => f(node).transpose(),
732 Ok(node) => f(node).transpose(),
715 Err(e) => Some(Err(e)),
733 Err(e) => Some(Err(e)),
716 })
734 })
717 }
735 }
718
736
719 impl<'on_disk> super::dispatch::DirstateMapMethods for DirstateMap<'on_disk> {
737 impl<'on_disk> super::dispatch::DirstateMapMethods for DirstateMap<'on_disk> {
720 fn clear(&mut self) {
738 fn clear(&mut self) {
721 self.root = Default::default();
739 self.root = Default::default();
722 self.nodes_with_entry_count = 0;
740 self.nodes_with_entry_count = 0;
723 self.nodes_with_copy_source_count = 0;
741 self.nodes_with_copy_source_count = 0;
724 }
742 }
725
743
726 fn add_file(
744 fn add_file(
727 &mut self,
745 &mut self,
728 filename: &HgPath,
746 filename: &HgPath,
729 entry: DirstateEntry,
747 entry: DirstateEntry,
730 added: bool,
748 added: bool,
731 merged: bool,
749 merged: bool,
732 from_p2: bool,
750 from_p2: bool,
733 possibly_dirty: bool,
751 possibly_dirty: bool,
734 ) -> Result<(), DirstateError> {
752 ) -> Result<(), DirstateError> {
735 let mut entry = entry;
753 let mut entry = entry;
736 if added {
754 if added {
737 assert!(!possibly_dirty);
755 assert!(!possibly_dirty);
738 assert!(!from_p2);
756 assert!(!from_p2);
739 entry.state = EntryState::Added;
757 entry.state = EntryState::Added;
740 entry.size = SIZE_NON_NORMAL;
758 entry.size = SIZE_NON_NORMAL;
741 entry.mtime = MTIME_UNSET;
759 entry.mtime = MTIME_UNSET;
742 } else if merged {
760 } else if merged {
743 assert!(!possibly_dirty);
761 assert!(!possibly_dirty);
744 assert!(!from_p2);
762 assert!(!from_p2);
745 entry.state = EntryState::Merged;
763 entry.state = EntryState::Merged;
746 entry.size = SIZE_FROM_OTHER_PARENT;
764 entry.size = SIZE_FROM_OTHER_PARENT;
747 entry.mtime = MTIME_UNSET;
765 entry.mtime = MTIME_UNSET;
748 } else if from_p2 {
766 } else if from_p2 {
749 assert!(!possibly_dirty);
767 assert!(!possibly_dirty);
750 entry.state = EntryState::Normal;
768 entry.state = EntryState::Normal;
751 entry.size = SIZE_FROM_OTHER_PARENT;
769 entry.size = SIZE_FROM_OTHER_PARENT;
752 entry.mtime = MTIME_UNSET;
770 entry.mtime = MTIME_UNSET;
753 } else if possibly_dirty {
771 } else if possibly_dirty {
754 entry.state = EntryState::Normal;
772 entry.state = EntryState::Normal;
755 entry.size = SIZE_NON_NORMAL;
773 entry.size = SIZE_NON_NORMAL;
756 entry.mtime = MTIME_UNSET;
774 entry.mtime = MTIME_UNSET;
757 } else {
775 } else {
758 entry.state = EntryState::Normal;
776 entry.state = EntryState::Normal;
759 entry.size = entry.size & V1_RANGEMASK;
777 entry.size = entry.size & V1_RANGEMASK;
760 entry.mtime = entry.mtime & V1_RANGEMASK;
778 entry.mtime = entry.mtime & V1_RANGEMASK;
761 }
779 }
762
780
763 let old_state = match self.get(filename)? {
781 let old_state = match self.get(filename)? {
764 Some(e) => e.state,
782 Some(e) => e.state,
765 None => EntryState::Unknown,
783 None => EntryState::Unknown,
766 };
784 };
767
785
768 Ok(self.add_or_remove_file(filename, old_state, entry)?)
786 Ok(self.add_or_remove_file(filename, old_state, entry)?)
769 }
787 }
770
788
771 fn remove_file(
789 fn remove_file(
772 &mut self,
790 &mut self,
773 filename: &HgPath,
791 filename: &HgPath,
774 in_merge: bool,
792 in_merge: bool,
775 ) -> Result<(), DirstateError> {
793 ) -> Result<(), DirstateError> {
776 let old_entry_opt = self.get(filename)?;
794 let old_entry_opt = self.get(filename)?;
777 let old_state = match old_entry_opt {
795 let old_state = match old_entry_opt {
778 Some(e) => e.state,
796 Some(e) => e.state,
779 None => EntryState::Unknown,
797 None => EntryState::Unknown,
780 };
798 };
781 let mut size = 0;
799 let mut size = 0;
782 if in_merge {
800 if in_merge {
783 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
801 // XXX we should not be able to have 'm' state and 'FROM_P2' if not
784 // during a merge. So I (marmoute) am not sure we need the
802 // during a merge. So I (marmoute) am not sure we need the
785 // conditionnal at all. Adding double checking this with assert
803 // conditionnal at all. Adding double checking this with assert
786 // would be nice.
804 // would be nice.
787 if let Some(old_entry) = old_entry_opt {
805 if let Some(old_entry) = old_entry_opt {
788 // backup the previous state
806 // backup the previous state
789 if old_entry.state == EntryState::Merged {
807 if old_entry.state == EntryState::Merged {
790 size = SIZE_NON_NORMAL;
808 size = SIZE_NON_NORMAL;
791 } else if old_entry.state == EntryState::Normal
809 } else if old_entry.state == EntryState::Normal
792 && old_entry.size == SIZE_FROM_OTHER_PARENT
810 && old_entry.size == SIZE_FROM_OTHER_PARENT
793 {
811 {
794 // other parent
812 // other parent
795 size = SIZE_FROM_OTHER_PARENT;
813 size = SIZE_FROM_OTHER_PARENT;
796 }
814 }
797 }
815 }
798 }
816 }
799 if size == 0 {
817 if size == 0 {
800 self.copy_map_remove(filename)?;
818 self.copy_map_remove(filename)?;
801 }
819 }
802 let entry = DirstateEntry {
820 let entry = DirstateEntry {
803 state: EntryState::Removed,
821 state: EntryState::Removed,
804 mode: 0,
822 mode: 0,
805 size,
823 size,
806 mtime: 0,
824 mtime: 0,
807 };
825 };
808 Ok(self.add_or_remove_file(filename, old_state, entry)?)
826 Ok(self.add_or_remove_file(filename, old_state, entry)?)
809 }
827 }
810
828
811 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
829 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
812 let old_state = match self.get(filename)? {
830 let old_state = match self.get(filename)? {
813 Some(e) => e.state,
831 Some(e) => e.state,
814 None => EntryState::Unknown,
832 None => EntryState::Unknown,
815 };
833 };
816 struct Dropped {
834 struct Dropped {
817 was_tracked: bool,
835 was_tracked: bool,
818 had_entry: bool,
836 had_entry: bool,
819 had_copy_source: bool,
837 had_copy_source: bool,
820 }
838 }
821
839
822 /// If this returns `Ok(Some((dropped, removed)))`, then
840 /// If this returns `Ok(Some((dropped, removed)))`, then
823 ///
841 ///
824 /// * `dropped` is about the leaf node that was at `filename`
842 /// * `dropped` is about the leaf node that was at `filename`
825 /// * `removed` is whether this particular level of recursion just
843 /// * `removed` is whether this particular level of recursion just
826 /// removed a node in `nodes`.
844 /// removed a node in `nodes`.
827 fn recur<'on_disk>(
845 fn recur<'on_disk>(
828 on_disk: &'on_disk [u8],
846 on_disk: &'on_disk [u8],
829 nodes: &mut ChildNodes<'on_disk>,
847 nodes: &mut ChildNodes<'on_disk>,
830 path: &HgPath,
848 path: &HgPath,
831 ) -> Result<Option<(Dropped, bool)>, DirstateV2ParseError> {
849 ) -> Result<Option<(Dropped, bool)>, DirstateV2ParseError> {
832 let (first_path_component, rest_of_path) =
850 let (first_path_component, rest_of_path) =
833 path.split_first_component();
851 path.split_first_component();
834 let node = if let Some(node) =
852 let node = if let Some(node) =
835 nodes.make_mut(on_disk)?.get_mut(first_path_component)
853 nodes.make_mut(on_disk)?.get_mut(first_path_component)
836 {
854 {
837 node
855 node
838 } else {
856 } else {
839 return Ok(None);
857 return Ok(None);
840 };
858 };
841 let dropped;
859 let dropped;
842 if let Some(rest) = rest_of_path {
860 if let Some(rest) = rest_of_path {
843 if let Some((d, removed)) =
861 if let Some((d, removed)) =
844 recur(on_disk, &mut node.children, rest)?
862 recur(on_disk, &mut node.children, rest)?
845 {
863 {
846 dropped = d;
864 dropped = d;
847 if dropped.had_entry {
865 if dropped.had_entry {
848 node.descendants_with_entry_count -= 1;
866 node.descendants_with_entry_count -= 1;
849 }
867 }
850 if dropped.was_tracked {
868 if dropped.was_tracked {
851 node.tracked_descendants_count -= 1;
869 node.tracked_descendants_count -= 1;
852 }
870 }
853
871
854 // Directory caches must be invalidated when removing a
872 // Directory caches must be invalidated when removing a
855 // child node
873 // child node
856 if removed {
874 if removed {
857 if let NodeData::CachedDirectory { .. } = &node.data {
875 if let NodeData::CachedDirectory { .. } = &node.data {
858 node.data = NodeData::None
876 node.data = NodeData::None
859 }
877 }
860 }
878 }
861 } else {
879 } else {
862 return Ok(None);
880 return Ok(None);
863 }
881 }
864 } else {
882 } else {
865 let had_entry = node.data.has_entry();
883 let had_entry = node.data.has_entry();
866 if had_entry {
884 if had_entry {
867 node.data = NodeData::None
885 node.data = NodeData::None
868 }
886 }
869 dropped = Dropped {
887 dropped = Dropped {
870 was_tracked: node
888 was_tracked: node
871 .data
889 .data
872 .as_entry()
890 .as_entry()
873 .map_or(false, |entry| entry.state.is_tracked()),
891 .map_or(false, |entry| entry.state.is_tracked()),
874 had_entry,
892 had_entry,
875 had_copy_source: node.copy_source.take().is_some(),
893 had_copy_source: node.copy_source.take().is_some(),
876 };
894 };
877 }
895 }
878 // After recursion, for both leaf (rest_of_path is None) nodes and
896 // After recursion, for both leaf (rest_of_path is None) nodes and
879 // parent nodes, remove a node if it just became empty.
897 // parent nodes, remove a node if it just became empty.
880 let remove = !node.data.has_entry()
898 let remove = !node.data.has_entry()
881 && node.copy_source.is_none()
899 && node.copy_source.is_none()
882 && node.children.is_empty();
900 && node.children.is_empty();
883 if remove {
901 if remove {
884 nodes.make_mut(on_disk)?.remove(first_path_component);
902 nodes.make_mut(on_disk)?.remove(first_path_component);
885 }
903 }
886 Ok(Some((dropped, remove)))
904 Ok(Some((dropped, remove)))
887 }
905 }
888
906
889 if let Some((dropped, _removed)) =
907 if let Some((dropped, _removed)) =
890 recur(self.on_disk, &mut self.root, filename)?
908 recur(self.on_disk, &mut self.root, filename)?
891 {
909 {
892 if dropped.had_entry {
910 if dropped.had_entry {
893 self.nodes_with_entry_count -= 1
911 self.nodes_with_entry_count -= 1
894 }
912 }
895 if dropped.had_copy_source {
913 if dropped.had_copy_source {
896 self.nodes_with_copy_source_count -= 1
914 self.nodes_with_copy_source_count -= 1
897 }
915 }
898 Ok(dropped.had_entry)
916 Ok(dropped.had_entry)
899 } else {
917 } else {
900 debug_assert!(!old_state.is_tracked());
918 debug_assert!(!old_state.is_tracked());
901 Ok(false)
919 Ok(false)
902 }
920 }
903 }
921 }
904
922
905 fn clear_ambiguous_times(
923 fn clear_ambiguous_times(
906 &mut self,
924 &mut self,
907 filenames: Vec<HgPathBuf>,
925 filenames: Vec<HgPathBuf>,
908 now: i32,
926 now: i32,
909 ) -> Result<(), DirstateV2ParseError> {
927 ) -> Result<(), DirstateV2ParseError> {
910 for filename in filenames {
928 for filename in filenames {
911 if let Some(node) =
929 if let Some(node) =
912 Self::get_node_mut(self.on_disk, &mut self.root, &filename)?
930 Self::get_node_mut(self.on_disk, &mut self.root, &filename)?
913 {
931 {
914 if let NodeData::Entry(entry) = &mut node.data {
932 if let NodeData::Entry(entry) = &mut node.data {
915 entry.clear_ambiguous_mtime(now);
933 entry.clear_ambiguous_mtime(now);
916 }
934 }
917 }
935 }
918 }
936 }
919 Ok(())
937 Ok(())
920 }
938 }
921
939
922 fn non_normal_entries_contains(
940 fn non_normal_entries_contains(
923 &mut self,
941 &mut self,
924 key: &HgPath,
942 key: &HgPath,
925 ) -> Result<bool, DirstateV2ParseError> {
943 ) -> Result<bool, DirstateV2ParseError> {
926 Ok(if let Some(node) = self.get_node(key)? {
944 Ok(if let Some(node) = self.get_node(key)? {
927 node.entry()?.map_or(false, |entry| entry.is_non_normal())
945 node.entry()?.map_or(false, |entry| entry.is_non_normal())
928 } else {
946 } else {
929 false
947 false
930 })
948 })
931 }
949 }
932
950
933 fn non_normal_entries_remove(&mut self, _key: &HgPath) {
951 fn non_normal_entries_remove(&mut self, _key: &HgPath) {
934 // Do nothing, this `DirstateMap` does not have a separate "non normal
952 // Do nothing, this `DirstateMap` does not have a separate "non normal
935 // entries" set that need to be kept up to date
953 // entries" set that need to be kept up to date
936 }
954 }
937
955
938 fn non_normal_or_other_parent_paths(
956 fn non_normal_or_other_parent_paths(
939 &mut self,
957 &mut self,
940 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
958 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
941 {
959 {
942 Box::new(self.filter_full_paths(|entry| {
960 Box::new(self.filter_full_paths(|entry| {
943 entry.is_non_normal() || entry.is_from_other_parent()
961 entry.is_non_normal() || entry.is_from_other_parent()
944 }))
962 }))
945 }
963 }
946
964
947 fn set_non_normal_other_parent_entries(&mut self, _force: bool) {
965 fn set_non_normal_other_parent_entries(&mut self, _force: bool) {
948 // Do nothing, this `DirstateMap` does not have a separate "non normal
966 // Do nothing, this `DirstateMap` does not have a separate "non normal
949 // entries" and "from other parent" sets that need to be recomputed
967 // entries" and "from other parent" sets that need to be recomputed
950 }
968 }
951
969
952 fn iter_non_normal_paths(
970 fn iter_non_normal_paths(
953 &mut self,
971 &mut self,
954 ) -> Box<
972 ) -> Box<
955 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
973 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
956 > {
974 > {
957 self.iter_non_normal_paths_panic()
975 self.iter_non_normal_paths_panic()
958 }
976 }
959
977
960 fn iter_non_normal_paths_panic(
978 fn iter_non_normal_paths_panic(
961 &self,
979 &self,
962 ) -> Box<
980 ) -> Box<
963 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
981 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
964 > {
982 > {
965 Box::new(self.filter_full_paths(|entry| entry.is_non_normal()))
983 Box::new(self.filter_full_paths(|entry| entry.is_non_normal()))
966 }
984 }
967
985
968 fn iter_other_parent_paths(
986 fn iter_other_parent_paths(
969 &mut self,
987 &mut self,
970 ) -> Box<
988 ) -> Box<
971 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
989 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
972 > {
990 > {
973 Box::new(self.filter_full_paths(|entry| entry.is_from_other_parent()))
991 Box::new(self.filter_full_paths(|entry| entry.is_from_other_parent()))
974 }
992 }
975
993
976 fn has_tracked_dir(
994 fn has_tracked_dir(
977 &mut self,
995 &mut self,
978 directory: &HgPath,
996 directory: &HgPath,
979 ) -> Result<bool, DirstateError> {
997 ) -> Result<bool, DirstateError> {
980 if let Some(node) = self.get_node(directory)? {
998 if let Some(node) = self.get_node(directory)? {
981 // A node without a `DirstateEntry` was created to hold child
999 // A node without a `DirstateEntry` was created to hold child
982 // nodes, and is therefore a directory.
1000 // nodes, and is therefore a directory.
983 let state = node.state()?;
1001 let state = node.state()?;
984 Ok(state.is_none() && node.tracked_descendants_count() > 0)
1002 Ok(state.is_none() && node.tracked_descendants_count() > 0)
985 } else {
1003 } else {
986 Ok(false)
1004 Ok(false)
987 }
1005 }
988 }
1006 }
989
1007
990 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
1008 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
991 if let Some(node) = self.get_node(directory)? {
1009 if let Some(node) = self.get_node(directory)? {
992 // A node without a `DirstateEntry` was created to hold child
1010 // A node without a `DirstateEntry` was created to hold child
993 // nodes, and is therefore a directory.
1011 // nodes, and is therefore a directory.
994 let state = node.state()?;
1012 let state = node.state()?;
995 Ok(state.is_none() && node.descendants_with_entry_count() > 0)
1013 Ok(state.is_none() && node.descendants_with_entry_count() > 0)
996 } else {
1014 } else {
997 Ok(false)
1015 Ok(false)
998 }
1016 }
999 }
1017 }
1000
1018
1001 #[timed]
1019 #[timed]
1002 fn pack_v1(
1020 fn pack_v1(
1003 &mut self,
1021 &mut self,
1004 parents: DirstateParents,
1022 parents: DirstateParents,
1005 now: Timestamp,
1023 now: Timestamp,
1006 ) -> Result<Vec<u8>, DirstateError> {
1024 ) -> Result<Vec<u8>, DirstateError> {
1007 let now: i32 = now.0.try_into().expect("time overflow");
1025 let now: i32 = now.0.try_into().expect("time overflow");
1008 let mut ambiguous_mtimes = Vec::new();
1026 let mut ambiguous_mtimes = Vec::new();
1009 // Optizimation (to be measured?): pre-compute size to avoid `Vec`
1027 // Optizimation (to be measured?): pre-compute size to avoid `Vec`
1010 // reallocations
1028 // reallocations
1011 let mut size = parents.as_bytes().len();
1029 let mut size = parents.as_bytes().len();
1012 for node in self.iter_nodes() {
1030 for node in self.iter_nodes() {
1013 let node = node?;
1031 let node = node?;
1014 if let Some(entry) = node.entry()? {
1032 if let Some(entry) = node.entry()? {
1015 size += packed_entry_size(
1033 size += packed_entry_size(
1016 node.full_path(self.on_disk)?,
1034 node.full_path(self.on_disk)?,
1017 node.copy_source(self.on_disk)?,
1035 node.copy_source(self.on_disk)?,
1018 );
1036 );
1019 if entry.mtime_is_ambiguous(now) {
1037 if entry.mtime_is_ambiguous(now) {
1020 ambiguous_mtimes.push(
1038 ambiguous_mtimes.push(
1021 node.full_path_borrowed(self.on_disk)?
1039 node.full_path_borrowed(self.on_disk)?
1022 .detach_from_tree(),
1040 .detach_from_tree(),
1023 )
1041 )
1024 }
1042 }
1025 }
1043 }
1026 }
1044 }
1027 self.clear_known_ambiguous_mtimes(&ambiguous_mtimes)?;
1045 self.clear_known_ambiguous_mtimes(&ambiguous_mtimes)?;
1028
1046
1029 let mut packed = Vec::with_capacity(size);
1047 let mut packed = Vec::with_capacity(size);
1030 packed.extend(parents.as_bytes());
1048 packed.extend(parents.as_bytes());
1031
1049
1032 for node in self.iter_nodes() {
1050 for node in self.iter_nodes() {
1033 let node = node?;
1051 let node = node?;
1034 if let Some(entry) = node.entry()? {
1052 if let Some(entry) = node.entry()? {
1035 pack_entry(
1053 pack_entry(
1036 node.full_path(self.on_disk)?,
1054 node.full_path(self.on_disk)?,
1037 &entry,
1055 &entry,
1038 node.copy_source(self.on_disk)?,
1056 node.copy_source(self.on_disk)?,
1039 &mut packed,
1057 &mut packed,
1040 );
1058 );
1041 }
1059 }
1042 }
1060 }
1043 Ok(packed)
1061 Ok(packed)
1044 }
1062 }
1045
1063
1064 /// Returns new data together with whether that data should be appended to
1065 /// the existing data file whose content is at `self.on_disk` (true),
1066 /// instead of written to a new data file (false).
1046 #[timed]
1067 #[timed]
1047 fn pack_v2(&mut self, now: Timestamp) -> Result<Vec<u8>, DirstateError> {
1068 fn pack_v2(
1069 &mut self,
1070 now: Timestamp,
1071 can_append: bool,
1072 ) -> Result<(Vec<u8>, bool), DirstateError> {
1048 // TODO: how do we want to handle this in 2038?
1073 // TODO: how do we want to handle this in 2038?
1049 let now: i32 = now.0.try_into().expect("time overflow");
1074 let now: i32 = now.0.try_into().expect("time overflow");
1050 let mut paths = Vec::new();
1075 let mut paths = Vec::new();
1051 for node in self.iter_nodes() {
1076 for node in self.iter_nodes() {
1052 let node = node?;
1077 let node = node?;
1053 if let Some(entry) = node.entry()? {
1078 if let Some(entry) = node.entry()? {
1054 if entry.mtime_is_ambiguous(now) {
1079 if entry.mtime_is_ambiguous(now) {
1055 paths.push(
1080 paths.push(
1056 node.full_path_borrowed(self.on_disk)?
1081 node.full_path_borrowed(self.on_disk)?
1057 .detach_from_tree(),
1082 .detach_from_tree(),
1058 )
1083 )
1059 }
1084 }
1060 }
1085 }
1061 }
1086 }
1062 // Borrow of `self` ends here since we collect cloned paths
1087 // Borrow of `self` ends here since we collect cloned paths
1063
1088
1064 self.clear_known_ambiguous_mtimes(&paths)?;
1089 self.clear_known_ambiguous_mtimes(&paths)?;
1065
1090
1066 on_disk::write(self)
1091 on_disk::write(self, can_append)
1067 }
1092 }
1068
1093
1069 fn status<'a>(
1094 fn status<'a>(
1070 &'a mut self,
1095 &'a mut self,
1071 matcher: &'a (dyn Matcher + Sync),
1096 matcher: &'a (dyn Matcher + Sync),
1072 root_dir: PathBuf,
1097 root_dir: PathBuf,
1073 ignore_files: Vec<PathBuf>,
1098 ignore_files: Vec<PathBuf>,
1074 options: StatusOptions,
1099 options: StatusOptions,
1075 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
1100 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
1076 {
1101 {
1077 super::status::status(self, matcher, root_dir, ignore_files, options)
1102 super::status::status(self, matcher, root_dir, ignore_files, options)
1078 }
1103 }
1079
1104
1080 fn copy_map_len(&self) -> usize {
1105 fn copy_map_len(&self) -> usize {
1081 self.nodes_with_copy_source_count as usize
1106 self.nodes_with_copy_source_count as usize
1082 }
1107 }
1083
1108
1084 fn copy_map_iter(&self) -> CopyMapIter<'_> {
1109 fn copy_map_iter(&self) -> CopyMapIter<'_> {
1085 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1110 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1086 Ok(if let Some(source) = node.copy_source(self.on_disk)? {
1111 Ok(if let Some(source) = node.copy_source(self.on_disk)? {
1087 Some((node.full_path(self.on_disk)?, source))
1112 Some((node.full_path(self.on_disk)?, source))
1088 } else {
1113 } else {
1089 None
1114 None
1090 })
1115 })
1091 }))
1116 }))
1092 }
1117 }
1093
1118
1094 fn copy_map_contains_key(
1119 fn copy_map_contains_key(
1095 &self,
1120 &self,
1096 key: &HgPath,
1121 key: &HgPath,
1097 ) -> Result<bool, DirstateV2ParseError> {
1122 ) -> Result<bool, DirstateV2ParseError> {
1098 Ok(if let Some(node) = self.get_node(key)? {
1123 Ok(if let Some(node) = self.get_node(key)? {
1099 node.has_copy_source()
1124 node.has_copy_source()
1100 } else {
1125 } else {
1101 false
1126 false
1102 })
1127 })
1103 }
1128 }
1104
1129
1105 fn copy_map_get(
1130 fn copy_map_get(
1106 &self,
1131 &self,
1107 key: &HgPath,
1132 key: &HgPath,
1108 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
1133 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
1109 if let Some(node) = self.get_node(key)? {
1134 if let Some(node) = self.get_node(key)? {
1110 if let Some(source) = node.copy_source(self.on_disk)? {
1135 if let Some(source) = node.copy_source(self.on_disk)? {
1111 return Ok(Some(source));
1136 return Ok(Some(source));
1112 }
1137 }
1113 }
1138 }
1114 Ok(None)
1139 Ok(None)
1115 }
1140 }
1116
1141
1117 fn copy_map_remove(
1142 fn copy_map_remove(
1118 &mut self,
1143 &mut self,
1119 key: &HgPath,
1144 key: &HgPath,
1120 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1145 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1121 let count = &mut self.nodes_with_copy_source_count;
1146 let count = &mut self.nodes_with_copy_source_count;
1122 Ok(
1147 Ok(
1123 Self::get_node_mut(self.on_disk, &mut self.root, key)?.and_then(
1148 Self::get_node_mut(self.on_disk, &mut self.root, key)?.and_then(
1124 |node| {
1149 |node| {
1125 if node.copy_source.is_some() {
1150 if node.copy_source.is_some() {
1126 *count -= 1
1151 *count -= 1
1127 }
1152 }
1128 node.copy_source.take().map(Cow::into_owned)
1153 node.copy_source.take().map(Cow::into_owned)
1129 },
1154 },
1130 ),
1155 ),
1131 )
1156 )
1132 }
1157 }
1133
1158
1134 fn copy_map_insert(
1159 fn copy_map_insert(
1135 &mut self,
1160 &mut self,
1136 key: HgPathBuf,
1161 key: HgPathBuf,
1137 value: HgPathBuf,
1162 value: HgPathBuf,
1138 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1163 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
1139 let node = Self::get_or_insert_node(
1164 let node = Self::get_or_insert_node(
1140 self.on_disk,
1165 self.on_disk,
1141 &mut self.root,
1166 &mut self.root,
1142 &key,
1167 &key,
1143 WithBasename::to_cow_owned,
1168 WithBasename::to_cow_owned,
1144 |_ancestor| {},
1169 |_ancestor| {},
1145 )?;
1170 )?;
1146 if node.copy_source.is_none() {
1171 if node.copy_source.is_none() {
1147 self.nodes_with_copy_source_count += 1
1172 self.nodes_with_copy_source_count += 1
1148 }
1173 }
1149 Ok(node.copy_source.replace(value.into()).map(Cow::into_owned))
1174 Ok(node.copy_source.replace(value.into()).map(Cow::into_owned))
1150 }
1175 }
1151
1176
1152 fn len(&self) -> usize {
1177 fn len(&self) -> usize {
1153 self.nodes_with_entry_count as usize
1178 self.nodes_with_entry_count as usize
1154 }
1179 }
1155
1180
1156 fn contains_key(
1181 fn contains_key(
1157 &self,
1182 &self,
1158 key: &HgPath,
1183 key: &HgPath,
1159 ) -> Result<bool, DirstateV2ParseError> {
1184 ) -> Result<bool, DirstateV2ParseError> {
1160 Ok(self.get(key)?.is_some())
1185 Ok(self.get(key)?.is_some())
1161 }
1186 }
1162
1187
1163 fn get(
1188 fn get(
1164 &self,
1189 &self,
1165 key: &HgPath,
1190 key: &HgPath,
1166 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
1191 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
1167 Ok(if let Some(node) = self.get_node(key)? {
1192 Ok(if let Some(node) = self.get_node(key)? {
1168 node.entry()?
1193 node.entry()?
1169 } else {
1194 } else {
1170 None
1195 None
1171 })
1196 })
1172 }
1197 }
1173
1198
1174 fn iter(&self) -> StateMapIter<'_> {
1199 fn iter(&self) -> StateMapIter<'_> {
1175 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1200 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1176 Ok(if let Some(entry) = node.entry()? {
1201 Ok(if let Some(entry) = node.entry()? {
1177 Some((node.full_path(self.on_disk)?, entry))
1202 Some((node.full_path(self.on_disk)?, entry))
1178 } else {
1203 } else {
1179 None
1204 None
1180 })
1205 })
1181 }))
1206 }))
1182 }
1207 }
1183
1208
1184 fn iter_directories(
1209 fn iter_directories(
1185 &self,
1210 &self,
1186 ) -> Box<
1211 ) -> Box<
1187 dyn Iterator<
1212 dyn Iterator<
1188 Item = Result<
1213 Item = Result<
1189 (&HgPath, Option<Timestamp>),
1214 (&HgPath, Option<Timestamp>),
1190 DirstateV2ParseError,
1215 DirstateV2ParseError,
1191 >,
1216 >,
1192 > + Send
1217 > + Send
1193 + '_,
1218 + '_,
1194 > {
1219 > {
1195 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1220 Box::new(filter_map_results(self.iter_nodes(), move |node| {
1196 Ok(if node.state()?.is_none() {
1221 Ok(if node.state()?.is_none() {
1197 Some((
1222 Some((
1198 node.full_path(self.on_disk)?,
1223 node.full_path(self.on_disk)?,
1199 node.cached_directory_mtime()
1224 node.cached_directory_mtime()
1200 .map(|mtime| Timestamp(mtime.seconds())),
1225 .map(|mtime| Timestamp(mtime.seconds())),
1201 ))
1226 ))
1202 } else {
1227 } else {
1203 None
1228 None
1204 })
1229 })
1205 }))
1230 }))
1206 }
1231 }
1207 }
1232 }
@@ -1,479 +1,491 b''
1 use std::path::PathBuf;
1 use std::path::PathBuf;
2
2
3 use crate::dirstate::parsers::Timestamp;
3 use crate::dirstate::parsers::Timestamp;
4 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
4 use crate::dirstate_tree::on_disk::DirstateV2ParseError;
5 use crate::matchers::Matcher;
5 use crate::matchers::Matcher;
6 use crate::utils::hg_path::{HgPath, HgPathBuf};
6 use crate::utils::hg_path::{HgPath, HgPathBuf};
7 use crate::CopyMapIter;
7 use crate::CopyMapIter;
8 use crate::DirstateEntry;
8 use crate::DirstateEntry;
9 use crate::DirstateError;
9 use crate::DirstateError;
10 use crate::DirstateMap;
10 use crate::DirstateMap;
11 use crate::DirstateParents;
11 use crate::DirstateParents;
12 use crate::DirstateStatus;
12 use crate::DirstateStatus;
13 use crate::PatternFileWarning;
13 use crate::PatternFileWarning;
14 use crate::StateMapIter;
14 use crate::StateMapIter;
15 use crate::StatusError;
15 use crate::StatusError;
16 use crate::StatusOptions;
16 use crate::StatusOptions;
17
17
18 /// `rust/hg-cpython/src/dirstate/dirstate_map.rs` implements in Rust a
18 /// `rust/hg-cpython/src/dirstate/dirstate_map.rs` implements in Rust a
19 /// `DirstateMap` Python class that wraps `Box<dyn DirstateMapMethods + Send>`,
19 /// `DirstateMap` Python class that wraps `Box<dyn DirstateMapMethods + Send>`,
20 /// a trait object of this trait. Except for constructors, this trait defines
20 /// a trait object of this trait. Except for constructors, this trait defines
21 /// all APIs that the class needs to interact with its inner dirstate map.
21 /// all APIs that the class needs to interact with its inner dirstate map.
22 ///
22 ///
23 /// A trait object is used to support two different concrete types:
23 /// A trait object is used to support two different concrete types:
24 ///
24 ///
25 /// * `rust/hg-core/src/dirstate/dirstate_map.rs` defines the "flat dirstate
25 /// * `rust/hg-core/src/dirstate/dirstate_map.rs` defines the "flat dirstate
26 /// map" which is based on a few large `HgPath`-keyed `HashMap` and `HashSet`
26 /// map" which is based on a few large `HgPath`-keyed `HashMap` and `HashSet`
27 /// fields.
27 /// fields.
28 /// * `rust/hg-core/src/dirstate_tree/dirstate_map.rs` defines the "tree
28 /// * `rust/hg-core/src/dirstate_tree/dirstate_map.rs` defines the "tree
29 /// dirstate map" based on a tree data struture with nodes for directories
29 /// dirstate map" based on a tree data struture with nodes for directories
30 /// containing child nodes for their files and sub-directories. This tree
30 /// containing child nodes for their files and sub-directories. This tree
31 /// enables a more efficient algorithm for `hg status`, but its details are
31 /// enables a more efficient algorithm for `hg status`, but its details are
32 /// abstracted in this trait.
32 /// abstracted in this trait.
33 ///
33 ///
34 /// The dirstate map associates paths of files in the working directory to
34 /// The dirstate map associates paths of files in the working directory to
35 /// various information about the state of those files.
35 /// various information about the state of those files.
36 pub trait DirstateMapMethods {
36 pub trait DirstateMapMethods {
37 /// Remove information about all files in this map
37 /// Remove information about all files in this map
38 fn clear(&mut self);
38 fn clear(&mut self);
39
39
40 /// Add or change the information associated to a given file.
40 /// Add or change the information associated to a given file.
41 ///
41 ///
42 /// `old_state` is the state in the entry that `get` would have returned
42 /// `old_state` is the state in the entry that `get` would have returned
43 /// before this call, or `EntryState::Unknown` if there was no such entry.
43 /// before this call, or `EntryState::Unknown` if there was no such entry.
44 ///
44 ///
45 /// `entry.state` should never be `EntryState::Unknown`.
45 /// `entry.state` should never be `EntryState::Unknown`.
46 fn add_file(
46 fn add_file(
47 &mut self,
47 &mut self,
48 filename: &HgPath,
48 filename: &HgPath,
49 entry: DirstateEntry,
49 entry: DirstateEntry,
50 added: bool,
50 added: bool,
51 merged: bool,
51 merged: bool,
52 from_p2: bool,
52 from_p2: bool,
53 possibly_dirty: bool,
53 possibly_dirty: bool,
54 ) -> Result<(), DirstateError>;
54 ) -> Result<(), DirstateError>;
55
55
56 /// Mark a file as "removed" (as in `hg rm`).
56 /// Mark a file as "removed" (as in `hg rm`).
57 ///
57 ///
58 /// `old_state` is the state in the entry that `get` would have returned
58 /// `old_state` is the state in the entry that `get` would have returned
59 /// before this call, or `EntryState::Unknown` if there was no such entry.
59 /// before this call, or `EntryState::Unknown` if there was no such entry.
60 ///
60 ///
61 /// `size` is not actually a size but the 0 or -1 or -2 value that would be
61 /// `size` is not actually a size but the 0 or -1 or -2 value that would be
62 /// put in the size field in the dirstate-v1 format.
62 /// put in the size field in the dirstate-v1 format.
63 fn remove_file(
63 fn remove_file(
64 &mut self,
64 &mut self,
65 filename: &HgPath,
65 filename: &HgPath,
66 in_merge: bool,
66 in_merge: bool,
67 ) -> Result<(), DirstateError>;
67 ) -> Result<(), DirstateError>;
68
68
69 /// Drop information about this file from the map if any, and return
69 /// Drop information about this file from the map if any, and return
70 /// whether there was any.
70 /// whether there was any.
71 ///
71 ///
72 /// `get` will now return `None` for this filename.
72 /// `get` will now return `None` for this filename.
73 ///
73 ///
74 /// `old_state` is the state in the entry that `get` would have returned
74 /// `old_state` is the state in the entry that `get` would have returned
75 /// before this call, or `EntryState::Unknown` if there was no such entry.
75 /// before this call, or `EntryState::Unknown` if there was no such entry.
76 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError>;
76 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError>;
77
77
78 /// Among given files, mark the stored `mtime` as ambiguous if there is one
78 /// Among given files, mark the stored `mtime` as ambiguous if there is one
79 /// (if `state == EntryState::Normal`) equal to the given current Unix
79 /// (if `state == EntryState::Normal`) equal to the given current Unix
80 /// timestamp.
80 /// timestamp.
81 fn clear_ambiguous_times(
81 fn clear_ambiguous_times(
82 &mut self,
82 &mut self,
83 filenames: Vec<HgPathBuf>,
83 filenames: Vec<HgPathBuf>,
84 now: i32,
84 now: i32,
85 ) -> Result<(), DirstateV2ParseError>;
85 ) -> Result<(), DirstateV2ParseError>;
86
86
87 /// Return whether the map has an "non-normal" entry for the given
87 /// Return whether the map has an "non-normal" entry for the given
88 /// filename. That is, any entry with a `state` other than
88 /// filename. That is, any entry with a `state` other than
89 /// `EntryState::Normal` or with an ambiguous `mtime`.
89 /// `EntryState::Normal` or with an ambiguous `mtime`.
90 fn non_normal_entries_contains(
90 fn non_normal_entries_contains(
91 &mut self,
91 &mut self,
92 key: &HgPath,
92 key: &HgPath,
93 ) -> Result<bool, DirstateV2ParseError>;
93 ) -> Result<bool, DirstateV2ParseError>;
94
94
95 /// Mark the given path as "normal" file. This is only relevant in the flat
95 /// Mark the given path as "normal" file. This is only relevant in the flat
96 /// dirstate map where there is a separate `HashSet` that needs to be kept
96 /// dirstate map where there is a separate `HashSet` that needs to be kept
97 /// up to date.
97 /// up to date.
98 fn non_normal_entries_remove(&mut self, key: &HgPath);
98 fn non_normal_entries_remove(&mut self, key: &HgPath);
99
99
100 /// Return an iterator of paths whose respective entry are either
100 /// Return an iterator of paths whose respective entry are either
101 /// "non-normal" (see `non_normal_entries_contains`) or "from other
101 /// "non-normal" (see `non_normal_entries_contains`) or "from other
102 /// parent".
102 /// parent".
103 ///
103 ///
104 /// If that information is cached, create the cache as needed.
104 /// If that information is cached, create the cache as needed.
105 ///
105 ///
106 /// "From other parent" is defined as `state == Normal && size == -2`.
106 /// "From other parent" is defined as `state == Normal && size == -2`.
107 ///
107 ///
108 /// Because parse errors can happen during iteration, the iterated items
108 /// Because parse errors can happen during iteration, the iterated items
109 /// are `Result`s.
109 /// are `Result`s.
110 fn non_normal_or_other_parent_paths(
110 fn non_normal_or_other_parent_paths(
111 &mut self,
111 &mut self,
112 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>;
112 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>;
113
113
114 /// Create the cache for `non_normal_or_other_parent_paths` if needed.
114 /// Create the cache for `non_normal_or_other_parent_paths` if needed.
115 ///
115 ///
116 /// If `force` is true, the cache is re-created even if it already exists.
116 /// If `force` is true, the cache is re-created even if it already exists.
117 fn set_non_normal_other_parent_entries(&mut self, force: bool);
117 fn set_non_normal_other_parent_entries(&mut self, force: bool);
118
118
119 /// Return an iterator of paths whose respective entry are "non-normal"
119 /// Return an iterator of paths whose respective entry are "non-normal"
120 /// (see `non_normal_entries_contains`).
120 /// (see `non_normal_entries_contains`).
121 ///
121 ///
122 /// If that information is cached, create the cache as needed.
122 /// If that information is cached, create the cache as needed.
123 ///
123 ///
124 /// Because parse errors can happen during iteration, the iterated items
124 /// Because parse errors can happen during iteration, the iterated items
125 /// are `Result`s.
125 /// are `Result`s.
126 fn iter_non_normal_paths(
126 fn iter_non_normal_paths(
127 &mut self,
127 &mut self,
128 ) -> Box<
128 ) -> Box<
129 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
129 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
130 >;
130 >;
131
131
132 /// Same as `iter_non_normal_paths`, but takes `&self` instead of `&mut
132 /// Same as `iter_non_normal_paths`, but takes `&self` instead of `&mut
133 /// self`.
133 /// self`.
134 ///
134 ///
135 /// Panics if a cache is necessary but does not exist yet.
135 /// Panics if a cache is necessary but does not exist yet.
136 fn iter_non_normal_paths_panic(
136 fn iter_non_normal_paths_panic(
137 &self,
137 &self,
138 ) -> Box<
138 ) -> Box<
139 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
139 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
140 >;
140 >;
141
141
142 /// Return an iterator of paths whose respective entry are "from other
142 /// Return an iterator of paths whose respective entry are "from other
143 /// parent".
143 /// parent".
144 ///
144 ///
145 /// If that information is cached, create the cache as needed.
145 /// If that information is cached, create the cache as needed.
146 ///
146 ///
147 /// "From other parent" is defined as `state == Normal && size == -2`.
147 /// "From other parent" is defined as `state == Normal && size == -2`.
148 ///
148 ///
149 /// Because parse errors can happen during iteration, the iterated items
149 /// Because parse errors can happen during iteration, the iterated items
150 /// are `Result`s.
150 /// are `Result`s.
151 fn iter_other_parent_paths(
151 fn iter_other_parent_paths(
152 &mut self,
152 &mut self,
153 ) -> Box<
153 ) -> Box<
154 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
154 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
155 >;
155 >;
156
156
157 /// Returns whether the sub-tree rooted at the given directory contains any
157 /// Returns whether the sub-tree rooted at the given directory contains any
158 /// tracked file.
158 /// tracked file.
159 ///
159 ///
160 /// A file is tracked if it has a `state` other than `EntryState::Removed`.
160 /// A file is tracked if it has a `state` other than `EntryState::Removed`.
161 fn has_tracked_dir(
161 fn has_tracked_dir(
162 &mut self,
162 &mut self,
163 directory: &HgPath,
163 directory: &HgPath,
164 ) -> Result<bool, DirstateError>;
164 ) -> Result<bool, DirstateError>;
165
165
166 /// Returns whether the sub-tree rooted at the given directory contains any
166 /// Returns whether the sub-tree rooted at the given directory contains any
167 /// file with a dirstate entry.
167 /// file with a dirstate entry.
168 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError>;
168 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError>;
169
169
170 /// Clear mtimes that are ambigous with `now` (similar to
170 /// Clear mtimes that are ambigous with `now` (similar to
171 /// `clear_ambiguous_times` but for all files in the dirstate map), and
171 /// `clear_ambiguous_times` but for all files in the dirstate map), and
172 /// serialize bytes to write the `.hg/dirstate` file to disk in dirstate-v1
172 /// serialize bytes to write the `.hg/dirstate` file to disk in dirstate-v1
173 /// format.
173 /// format.
174 fn pack_v1(
174 fn pack_v1(
175 &mut self,
175 &mut self,
176 parents: DirstateParents,
176 parents: DirstateParents,
177 now: Timestamp,
177 now: Timestamp,
178 ) -> Result<Vec<u8>, DirstateError>;
178 ) -> Result<Vec<u8>, DirstateError>;
179
179
180 /// Clear mtimes that are ambigous with `now` (similar to
180 /// Clear mtimes that are ambigous with `now` (similar to
181 /// `clear_ambiguous_times` but for all files in the dirstate map), and
181 /// `clear_ambiguous_times` but for all files in the dirstate map), and
182 /// serialize bytes to write the `.hg/dirstate` file to disk in dirstate-v2
182 /// serialize bytes to write a dirstate data file to disk in dirstate-v2
183 /// format.
183 /// format.
184 ///
184 ///
185 /// Returns new data together with whether that data should be appended to
186 /// the existing data file whose content is at `self.on_disk` (true),
187 /// instead of written to a new data file (false).
188 ///
185 /// Note: this is only supported by the tree dirstate map.
189 /// Note: this is only supported by the tree dirstate map.
186 fn pack_v2(&mut self, now: Timestamp) -> Result<Vec<u8>, DirstateError>;
190 fn pack_v2(
191 &mut self,
192 now: Timestamp,
193 can_append: bool,
194 ) -> Result<(Vec<u8>, bool), DirstateError>;
187
195
188 /// Run the status algorithm.
196 /// Run the status algorithm.
189 ///
197 ///
190 /// This is not sematically a method of the dirstate map, but a different
198 /// This is not sematically a method of the dirstate map, but a different
191 /// algorithm is used for the flat v.s. tree dirstate map so having it in
199 /// algorithm is used for the flat v.s. tree dirstate map so having it in
192 /// this trait enables the same dynamic dispatch as with other methods.
200 /// this trait enables the same dynamic dispatch as with other methods.
193 fn status<'a>(
201 fn status<'a>(
194 &'a mut self,
202 &'a mut self,
195 matcher: &'a (dyn Matcher + Sync),
203 matcher: &'a (dyn Matcher + Sync),
196 root_dir: PathBuf,
204 root_dir: PathBuf,
197 ignore_files: Vec<PathBuf>,
205 ignore_files: Vec<PathBuf>,
198 options: StatusOptions,
206 options: StatusOptions,
199 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>;
207 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>;
200
208
201 /// Returns how many files in the dirstate map have a recorded copy source.
209 /// Returns how many files in the dirstate map have a recorded copy source.
202 fn copy_map_len(&self) -> usize;
210 fn copy_map_len(&self) -> usize;
203
211
204 /// Returns an iterator of `(path, copy_source)` for all files that have a
212 /// Returns an iterator of `(path, copy_source)` for all files that have a
205 /// copy source.
213 /// copy source.
206 fn copy_map_iter(&self) -> CopyMapIter<'_>;
214 fn copy_map_iter(&self) -> CopyMapIter<'_>;
207
215
208 /// Returns whether the givef file has a copy source.
216 /// Returns whether the givef file has a copy source.
209 fn copy_map_contains_key(
217 fn copy_map_contains_key(
210 &self,
218 &self,
211 key: &HgPath,
219 key: &HgPath,
212 ) -> Result<bool, DirstateV2ParseError>;
220 ) -> Result<bool, DirstateV2ParseError>;
213
221
214 /// Returns the copy source for the given file.
222 /// Returns the copy source for the given file.
215 fn copy_map_get(
223 fn copy_map_get(
216 &self,
224 &self,
217 key: &HgPath,
225 key: &HgPath,
218 ) -> Result<Option<&HgPath>, DirstateV2ParseError>;
226 ) -> Result<Option<&HgPath>, DirstateV2ParseError>;
219
227
220 /// Removes the recorded copy source if any for the given file, and returns
228 /// Removes the recorded copy source if any for the given file, and returns
221 /// it.
229 /// it.
222 fn copy_map_remove(
230 fn copy_map_remove(
223 &mut self,
231 &mut self,
224 key: &HgPath,
232 key: &HgPath,
225 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
233 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
226
234
227 /// Set the given `value` copy source for the given `key` file.
235 /// Set the given `value` copy source for the given `key` file.
228 fn copy_map_insert(
236 fn copy_map_insert(
229 &mut self,
237 &mut self,
230 key: HgPathBuf,
238 key: HgPathBuf,
231 value: HgPathBuf,
239 value: HgPathBuf,
232 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
240 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError>;
233
241
234 /// Returns the number of files that have an entry.
242 /// Returns the number of files that have an entry.
235 fn len(&self) -> usize;
243 fn len(&self) -> usize;
236
244
237 /// Returns whether the given file has an entry.
245 /// Returns whether the given file has an entry.
238 fn contains_key(&self, key: &HgPath)
246 fn contains_key(&self, key: &HgPath)
239 -> Result<bool, DirstateV2ParseError>;
247 -> Result<bool, DirstateV2ParseError>;
240
248
241 /// Returns the entry, if any, for the given file.
249 /// Returns the entry, if any, for the given file.
242 fn get(
250 fn get(
243 &self,
251 &self,
244 key: &HgPath,
252 key: &HgPath,
245 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError>;
253 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError>;
246
254
247 /// Returns a `(path, entry)` iterator of files that have an entry.
255 /// Returns a `(path, entry)` iterator of files that have an entry.
248 ///
256 ///
249 /// Because parse errors can happen during iteration, the iterated items
257 /// Because parse errors can happen during iteration, the iterated items
250 /// are `Result`s.
258 /// are `Result`s.
251 fn iter(&self) -> StateMapIter<'_>;
259 fn iter(&self) -> StateMapIter<'_>;
252
260
253 /// In the tree dirstate, return an iterator of "directory" (entry-less)
261 /// In the tree dirstate, return an iterator of "directory" (entry-less)
254 /// nodes with the data stored for them. This is for `hg debugdirstate
262 /// nodes with the data stored for them. This is for `hg debugdirstate
255 /// --dirs`.
263 /// --dirs`.
256 ///
264 ///
257 /// In the flat dirstate, returns an empty iterator.
265 /// In the flat dirstate, returns an empty iterator.
258 ///
266 ///
259 /// Because parse errors can happen during iteration, the iterated items
267 /// Because parse errors can happen during iteration, the iterated items
260 /// are `Result`s.
268 /// are `Result`s.
261 fn iter_directories(
269 fn iter_directories(
262 &self,
270 &self,
263 ) -> Box<
271 ) -> Box<
264 dyn Iterator<
272 dyn Iterator<
265 Item = Result<
273 Item = Result<
266 (&HgPath, Option<Timestamp>),
274 (&HgPath, Option<Timestamp>),
267 DirstateV2ParseError,
275 DirstateV2ParseError,
268 >,
276 >,
269 > + Send
277 > + Send
270 + '_,
278 + '_,
271 >;
279 >;
272 }
280 }
273
281
274 impl DirstateMapMethods for DirstateMap {
282 impl DirstateMapMethods for DirstateMap {
275 fn clear(&mut self) {
283 fn clear(&mut self) {
276 self.clear()
284 self.clear()
277 }
285 }
278
286
279 fn add_file(
287 fn add_file(
280 &mut self,
288 &mut self,
281 filename: &HgPath,
289 filename: &HgPath,
282 entry: DirstateEntry,
290 entry: DirstateEntry,
283 added: bool,
291 added: bool,
284 merged: bool,
292 merged: bool,
285 from_p2: bool,
293 from_p2: bool,
286 possibly_dirty: bool,
294 possibly_dirty: bool,
287 ) -> Result<(), DirstateError> {
295 ) -> Result<(), DirstateError> {
288 self.add_file(filename, entry, added, merged, from_p2, possibly_dirty)
296 self.add_file(filename, entry, added, merged, from_p2, possibly_dirty)
289 }
297 }
290
298
291 fn remove_file(
299 fn remove_file(
292 &mut self,
300 &mut self,
293 filename: &HgPath,
301 filename: &HgPath,
294 in_merge: bool,
302 in_merge: bool,
295 ) -> Result<(), DirstateError> {
303 ) -> Result<(), DirstateError> {
296 self.remove_file(filename, in_merge)
304 self.remove_file(filename, in_merge)
297 }
305 }
298
306
299 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
307 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
300 self.drop_file(filename)
308 self.drop_file(filename)
301 }
309 }
302
310
303 fn clear_ambiguous_times(
311 fn clear_ambiguous_times(
304 &mut self,
312 &mut self,
305 filenames: Vec<HgPathBuf>,
313 filenames: Vec<HgPathBuf>,
306 now: i32,
314 now: i32,
307 ) -> Result<(), DirstateV2ParseError> {
315 ) -> Result<(), DirstateV2ParseError> {
308 Ok(self.clear_ambiguous_times(filenames, now))
316 Ok(self.clear_ambiguous_times(filenames, now))
309 }
317 }
310
318
311 fn non_normal_entries_contains(
319 fn non_normal_entries_contains(
312 &mut self,
320 &mut self,
313 key: &HgPath,
321 key: &HgPath,
314 ) -> Result<bool, DirstateV2ParseError> {
322 ) -> Result<bool, DirstateV2ParseError> {
315 let (non_normal, _other_parent) =
323 let (non_normal, _other_parent) =
316 self.get_non_normal_other_parent_entries();
324 self.get_non_normal_other_parent_entries();
317 Ok(non_normal.contains(key))
325 Ok(non_normal.contains(key))
318 }
326 }
319
327
320 fn non_normal_entries_remove(&mut self, key: &HgPath) {
328 fn non_normal_entries_remove(&mut self, key: &HgPath) {
321 self.non_normal_entries_remove(key)
329 self.non_normal_entries_remove(key)
322 }
330 }
323
331
324 fn non_normal_or_other_parent_paths(
332 fn non_normal_or_other_parent_paths(
325 &mut self,
333 &mut self,
326 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
334 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
327 {
335 {
328 let (non_normal, other_parent) =
336 let (non_normal, other_parent) =
329 self.get_non_normal_other_parent_entries();
337 self.get_non_normal_other_parent_entries();
330 Box::new(non_normal.union(other_parent).map(|p| Ok(&**p)))
338 Box::new(non_normal.union(other_parent).map(|p| Ok(&**p)))
331 }
339 }
332
340
333 fn set_non_normal_other_parent_entries(&mut self, force: bool) {
341 fn set_non_normal_other_parent_entries(&mut self, force: bool) {
334 self.set_non_normal_other_parent_entries(force)
342 self.set_non_normal_other_parent_entries(force)
335 }
343 }
336
344
337 fn iter_non_normal_paths(
345 fn iter_non_normal_paths(
338 &mut self,
346 &mut self,
339 ) -> Box<
347 ) -> Box<
340 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
348 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
341 > {
349 > {
342 let (non_normal, _other_parent) =
350 let (non_normal, _other_parent) =
343 self.get_non_normal_other_parent_entries();
351 self.get_non_normal_other_parent_entries();
344 Box::new(non_normal.iter().map(|p| Ok(&**p)))
352 Box::new(non_normal.iter().map(|p| Ok(&**p)))
345 }
353 }
346
354
347 fn iter_non_normal_paths_panic(
355 fn iter_non_normal_paths_panic(
348 &self,
356 &self,
349 ) -> Box<
357 ) -> Box<
350 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
358 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
351 > {
359 > {
352 let (non_normal, _other_parent) =
360 let (non_normal, _other_parent) =
353 self.get_non_normal_other_parent_entries_panic();
361 self.get_non_normal_other_parent_entries_panic();
354 Box::new(non_normal.iter().map(|p| Ok(&**p)))
362 Box::new(non_normal.iter().map(|p| Ok(&**p)))
355 }
363 }
356
364
357 fn iter_other_parent_paths(
365 fn iter_other_parent_paths(
358 &mut self,
366 &mut self,
359 ) -> Box<
367 ) -> Box<
360 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
368 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
361 > {
369 > {
362 let (_non_normal, other_parent) =
370 let (_non_normal, other_parent) =
363 self.get_non_normal_other_parent_entries();
371 self.get_non_normal_other_parent_entries();
364 Box::new(other_parent.iter().map(|p| Ok(&**p)))
372 Box::new(other_parent.iter().map(|p| Ok(&**p)))
365 }
373 }
366
374
367 fn has_tracked_dir(
375 fn has_tracked_dir(
368 &mut self,
376 &mut self,
369 directory: &HgPath,
377 directory: &HgPath,
370 ) -> Result<bool, DirstateError> {
378 ) -> Result<bool, DirstateError> {
371 self.has_tracked_dir(directory)
379 self.has_tracked_dir(directory)
372 }
380 }
373
381
374 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
382 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
375 self.has_dir(directory)
383 self.has_dir(directory)
376 }
384 }
377
385
378 fn pack_v1(
386 fn pack_v1(
379 &mut self,
387 &mut self,
380 parents: DirstateParents,
388 parents: DirstateParents,
381 now: Timestamp,
389 now: Timestamp,
382 ) -> Result<Vec<u8>, DirstateError> {
390 ) -> Result<Vec<u8>, DirstateError> {
383 self.pack(parents, now)
391 self.pack(parents, now)
384 }
392 }
385
393
386 fn pack_v2(&mut self, _now: Timestamp) -> Result<Vec<u8>, DirstateError> {
394 fn pack_v2(
395 &mut self,
396 _now: Timestamp,
397 _can_append: bool,
398 ) -> Result<(Vec<u8>, bool), DirstateError> {
387 panic!(
399 panic!(
388 "should have used dirstate_tree::DirstateMap to use the v2 format"
400 "should have used dirstate_tree::DirstateMap to use the v2 format"
389 )
401 )
390 }
402 }
391
403
392 fn status<'a>(
404 fn status<'a>(
393 &'a mut self,
405 &'a mut self,
394 matcher: &'a (dyn Matcher + Sync),
406 matcher: &'a (dyn Matcher + Sync),
395 root_dir: PathBuf,
407 root_dir: PathBuf,
396 ignore_files: Vec<PathBuf>,
408 ignore_files: Vec<PathBuf>,
397 options: StatusOptions,
409 options: StatusOptions,
398 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
410 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
399 {
411 {
400 crate::status(self, matcher, root_dir, ignore_files, options)
412 crate::status(self, matcher, root_dir, ignore_files, options)
401 }
413 }
402
414
403 fn copy_map_len(&self) -> usize {
415 fn copy_map_len(&self) -> usize {
404 self.copy_map.len()
416 self.copy_map.len()
405 }
417 }
406
418
407 fn copy_map_iter(&self) -> CopyMapIter<'_> {
419 fn copy_map_iter(&self) -> CopyMapIter<'_> {
408 Box::new(
420 Box::new(
409 self.copy_map
421 self.copy_map
410 .iter()
422 .iter()
411 .map(|(key, value)| Ok((&**key, &**value))),
423 .map(|(key, value)| Ok((&**key, &**value))),
412 )
424 )
413 }
425 }
414
426
415 fn copy_map_contains_key(
427 fn copy_map_contains_key(
416 &self,
428 &self,
417 key: &HgPath,
429 key: &HgPath,
418 ) -> Result<bool, DirstateV2ParseError> {
430 ) -> Result<bool, DirstateV2ParseError> {
419 Ok(self.copy_map.contains_key(key))
431 Ok(self.copy_map.contains_key(key))
420 }
432 }
421
433
422 fn copy_map_get(
434 fn copy_map_get(
423 &self,
435 &self,
424 key: &HgPath,
436 key: &HgPath,
425 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
437 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
426 Ok(self.copy_map.get(key).map(|p| &**p))
438 Ok(self.copy_map.get(key).map(|p| &**p))
427 }
439 }
428
440
429 fn copy_map_remove(
441 fn copy_map_remove(
430 &mut self,
442 &mut self,
431 key: &HgPath,
443 key: &HgPath,
432 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
444 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
433 Ok(self.copy_map.remove(key))
445 Ok(self.copy_map.remove(key))
434 }
446 }
435
447
436 fn copy_map_insert(
448 fn copy_map_insert(
437 &mut self,
449 &mut self,
438 key: HgPathBuf,
450 key: HgPathBuf,
439 value: HgPathBuf,
451 value: HgPathBuf,
440 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
452 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
441 Ok(self.copy_map.insert(key, value))
453 Ok(self.copy_map.insert(key, value))
442 }
454 }
443
455
444 fn len(&self) -> usize {
456 fn len(&self) -> usize {
445 (&**self).len()
457 (&**self).len()
446 }
458 }
447
459
448 fn contains_key(
460 fn contains_key(
449 &self,
461 &self,
450 key: &HgPath,
462 key: &HgPath,
451 ) -> Result<bool, DirstateV2ParseError> {
463 ) -> Result<bool, DirstateV2ParseError> {
452 Ok((&**self).contains_key(key))
464 Ok((&**self).contains_key(key))
453 }
465 }
454
466
455 fn get(
467 fn get(
456 &self,
468 &self,
457 key: &HgPath,
469 key: &HgPath,
458 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
470 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
459 Ok((&**self).get(key).cloned())
471 Ok((&**self).get(key).cloned())
460 }
472 }
461
473
462 fn iter(&self) -> StateMapIter<'_> {
474 fn iter(&self) -> StateMapIter<'_> {
463 Box::new((&**self).iter().map(|(key, value)| Ok((&**key, *value))))
475 Box::new((&**self).iter().map(|(key, value)| Ok((&**key, *value))))
464 }
476 }
465
477
466 fn iter_directories(
478 fn iter_directories(
467 &self,
479 &self,
468 ) -> Box<
480 ) -> Box<
469 dyn Iterator<
481 dyn Iterator<
470 Item = Result<
482 Item = Result<
471 (&HgPath, Option<Timestamp>),
483 (&HgPath, Option<Timestamp>),
472 DirstateV2ParseError,
484 DirstateV2ParseError,
473 >,
485 >,
474 > + Send
486 > + Send
475 + '_,
487 + '_,
476 > {
488 > {
477 Box::new(std::iter::empty())
489 Box::new(std::iter::empty())
478 }
490 }
479 }
491 }
@@ -1,678 +1,695 b''
1 //! The "version 2" disk representation of the dirstate
1 //! The "version 2" disk representation of the dirstate
2 //!
2 //!
3 //! # File format
3 //! # File format
4 //!
4 //!
5 //! In dirstate-v2 format, the `.hg/dirstate` file is a "docket that starts
5 //! In dirstate-v2 format, the `.hg/dirstate` file is a "docket that starts
6 //! with a fixed-sized header whose layout is defined by the `DocketHeader`
6 //! with a fixed-sized header whose layout is defined by the `DocketHeader`
7 //! struct, followed by the data file identifier.
7 //! struct, followed by the data file identifier.
8 //!
8 //!
9 //! A separate `.hg/dirstate.{uuid}.d` file contains most of the data. That
9 //! A separate `.hg/dirstate.{uuid}.d` file contains most of the data. That
10 //! file may be longer than the size given in the docket, but not shorter. Only
10 //! file may be longer than the size given in the docket, but not shorter. Only
11 //! the start of the data file up to the given size is considered. The
11 //! the start of the data file up to the given size is considered. The
12 //! fixed-size "root" of the dirstate tree whose layout is defined by the
12 //! fixed-size "root" of the dirstate tree whose layout is defined by the
13 //! `Root` struct is found at the end of that slice of data.
13 //! `Root` struct is found at the end of that slice of data.
14 //!
14 //!
15 //! Its `root_nodes` field contains the slice (offset and length) to
15 //! Its `root_nodes` field contains the slice (offset and length) to
16 //! the nodes representing the files and directories at the root of the
16 //! the nodes representing the files and directories at the root of the
17 //! repository. Each node is also fixed-size, defined by the `Node` struct.
17 //! repository. Each node is also fixed-size, defined by the `Node` struct.
18 //! Nodes in turn contain slices to variable-size paths, and to their own child
18 //! Nodes in turn contain slices to variable-size paths, and to their own child
19 //! nodes (if any) for nested files and directories.
19 //! nodes (if any) for nested files and directories.
20
20
21 use crate::dirstate_tree::dirstate_map::{self, DirstateMap, NodeRef};
21 use crate::dirstate_tree::dirstate_map::{self, DirstateMap, NodeRef};
22 use crate::dirstate_tree::path_with_basename::WithBasename;
22 use crate::dirstate_tree::path_with_basename::WithBasename;
23 use crate::errors::HgError;
23 use crate::errors::HgError;
24 use crate::utils::hg_path::HgPath;
24 use crate::utils::hg_path::HgPath;
25 use crate::DirstateEntry;
25 use crate::DirstateEntry;
26 use crate::DirstateError;
26 use crate::DirstateError;
27 use crate::DirstateParents;
27 use crate::DirstateParents;
28 use crate::EntryState;
28 use crate::EntryState;
29 use bytes_cast::unaligned::{I32Be, I64Be, U16Be, U32Be};
29 use bytes_cast::unaligned::{I32Be, I64Be, U16Be, U32Be};
30 use bytes_cast::BytesCast;
30 use bytes_cast::BytesCast;
31 use format_bytes::format_bytes;
31 use format_bytes::format_bytes;
32 use std::borrow::Cow;
32 use std::borrow::Cow;
33 use std::convert::{TryFrom, TryInto};
33 use std::convert::{TryFrom, TryInto};
34 use std::time::{Duration, SystemTime, UNIX_EPOCH};
34 use std::time::{Duration, SystemTime, UNIX_EPOCH};
35
35
36 /// Added at the start of `.hg/dirstate` when the "v2" format is used.
36 /// Added at the start of `.hg/dirstate` when the "v2" format is used.
37 /// This a redundant sanity check more than an actual "magic number" since
37 /// This a redundant sanity check more than an actual "magic number" since
38 /// `.hg/requires` already governs which format should be used.
38 /// `.hg/requires` already governs which format should be used.
39 pub const V2_FORMAT_MARKER: &[u8; 12] = b"dirstate-v2\n";
39 pub const V2_FORMAT_MARKER: &[u8; 12] = b"dirstate-v2\n";
40
40
41 /// Keep space for 256-bit hashes
41 /// Keep space for 256-bit hashes
42 const STORED_NODE_ID_BYTES: usize = 32;
42 const STORED_NODE_ID_BYTES: usize = 32;
43
43
44 /// … even though only 160 bits are used for now, with SHA-1
44 /// … even though only 160 bits are used for now, with SHA-1
45 const USED_NODE_ID_BYTES: usize = 20;
45 const USED_NODE_ID_BYTES: usize = 20;
46
46
47 pub(super) const IGNORE_PATTERNS_HASH_LEN: usize = 20;
47 pub(super) const IGNORE_PATTERNS_HASH_LEN: usize = 20;
48 pub(super) type IgnorePatternsHash = [u8; IGNORE_PATTERNS_HASH_LEN];
48 pub(super) type IgnorePatternsHash = [u8; IGNORE_PATTERNS_HASH_LEN];
49
49
50 // Must match `HEADER` in `mercurial/dirstateutils/docket.py`
50 // Must match `HEADER` in `mercurial/dirstateutils/docket.py`
51 #[derive(BytesCast)]
51 #[derive(BytesCast)]
52 #[repr(C)]
52 #[repr(C)]
53 struct DocketHeader {
53 struct DocketHeader {
54 marker: [u8; V2_FORMAT_MARKER.len()],
54 marker: [u8; V2_FORMAT_MARKER.len()],
55 parent_1: [u8; STORED_NODE_ID_BYTES],
55 parent_1: [u8; STORED_NODE_ID_BYTES],
56 parent_2: [u8; STORED_NODE_ID_BYTES],
56 parent_2: [u8; STORED_NODE_ID_BYTES],
57
57
58 /// Counted in bytes
58 /// Counted in bytes
59 data_size: Size,
59 data_size: Size,
60
60
61 uuid_size: u8,
61 uuid_size: u8,
62 }
62 }
63
63
64 pub struct Docket<'on_disk> {
64 pub struct Docket<'on_disk> {
65 header: &'on_disk DocketHeader,
65 header: &'on_disk DocketHeader,
66 uuid: &'on_disk [u8],
66 uuid: &'on_disk [u8],
67 }
67 }
68
68
69 #[derive(BytesCast)]
69 #[derive(BytesCast)]
70 #[repr(C)]
70 #[repr(C)]
71 struct Root {
71 struct Root {
72 root_nodes: ChildNodes,
72 root_nodes: ChildNodes,
73 nodes_with_entry_count: Size,
73 nodes_with_entry_count: Size,
74 nodes_with_copy_source_count: Size,
74 nodes_with_copy_source_count: Size,
75
75
76 /// If non-zero, a hash of ignore files that were used for some previous
76 /// If non-zero, a hash of ignore files that were used for some previous
77 /// run of the `status` algorithm.
77 /// run of the `status` algorithm.
78 ///
78 ///
79 /// We define:
79 /// We define:
80 ///
80 ///
81 /// * "Root" ignore files are `.hgignore` at the root of the repository if
81 /// * "Root" ignore files are `.hgignore` at the root of the repository if
82 /// it exists, and files from `ui.ignore.*` config. This set of files is
82 /// it exists, and files from `ui.ignore.*` config. This set of files is
83 /// then sorted by the string representation of their path.
83 /// then sorted by the string representation of their path.
84 /// * The "expanded contents" of an ignore files is the byte string made
84 /// * The "expanded contents" of an ignore files is the byte string made
85 /// by concatenating its contents with the "expanded contents" of other
85 /// by concatenating its contents with the "expanded contents" of other
86 /// files included with `include:` or `subinclude:` files, in inclusion
86 /// files included with `include:` or `subinclude:` files, in inclusion
87 /// order. This definition is recursive, as included files can
87 /// order. This definition is recursive, as included files can
88 /// themselves include more files.
88 /// themselves include more files.
89 ///
89 ///
90 /// This hash is defined as the SHA-1 of the concatenation (in sorted
90 /// This hash is defined as the SHA-1 of the concatenation (in sorted
91 /// order) of the "expanded contents" of each "root" ignore file.
91 /// order) of the "expanded contents" of each "root" ignore file.
92 /// (Note that computing this does not require actually concatenating byte
92 /// (Note that computing this does not require actually concatenating byte
93 /// strings into contiguous memory, instead SHA-1 hashing can be done
93 /// strings into contiguous memory, instead SHA-1 hashing can be done
94 /// incrementally.)
94 /// incrementally.)
95 ignore_patterns_hash: IgnorePatternsHash,
95 ignore_patterns_hash: IgnorePatternsHash,
96 }
96 }
97
97
98 #[derive(BytesCast)]
98 #[derive(BytesCast)]
99 #[repr(C)]
99 #[repr(C)]
100 pub(super) struct Node {
100 pub(super) struct Node {
101 full_path: PathSlice,
101 full_path: PathSlice,
102
102
103 /// In bytes from `self.full_path.start`
103 /// In bytes from `self.full_path.start`
104 base_name_start: PathSize,
104 base_name_start: PathSize,
105
105
106 copy_source: OptPathSlice,
106 copy_source: OptPathSlice,
107 children: ChildNodes,
107 children: ChildNodes,
108 pub(super) descendants_with_entry_count: Size,
108 pub(super) descendants_with_entry_count: Size,
109 pub(super) tracked_descendants_count: Size,
109 pub(super) tracked_descendants_count: Size,
110
110
111 /// Depending on the value of `state`:
111 /// Depending on the value of `state`:
112 ///
112 ///
113 /// * A null byte: `data` is not used.
113 /// * A null byte: `data` is not used.
114 ///
114 ///
115 /// * A `n`, `a`, `r`, or `m` ASCII byte: `state` and `data` together
115 /// * A `n`, `a`, `r`, or `m` ASCII byte: `state` and `data` together
116 /// represent a dirstate entry like in the v1 format.
116 /// represent a dirstate entry like in the v1 format.
117 ///
117 ///
118 /// * A `d` ASCII byte: the bytes of `data` should instead be interpreted
118 /// * A `d` ASCII byte: the bytes of `data` should instead be interpreted
119 /// as the `Timestamp` for the mtime of a cached directory.
119 /// as the `Timestamp` for the mtime of a cached directory.
120 ///
120 ///
121 /// The presence of this state means that at some point, this path in
121 /// The presence of this state means that at some point, this path in
122 /// the working directory was observed:
122 /// the working directory was observed:
123 ///
123 ///
124 /// - To be a directory
124 /// - To be a directory
125 /// - With the modification time as given by `Timestamp`
125 /// - With the modification time as given by `Timestamp`
126 /// - That timestamp was already strictly in the past when observed,
126 /// - That timestamp was already strictly in the past when observed,
127 /// meaning that later changes cannot happen in the same clock tick
127 /// meaning that later changes cannot happen in the same clock tick
128 /// and must cause a different modification time (unless the system
128 /// and must cause a different modification time (unless the system
129 /// clock jumps back and we get unlucky, which is not impossible but
129 /// clock jumps back and we get unlucky, which is not impossible but
130 /// but deemed unlikely enough).
130 /// but deemed unlikely enough).
131 /// - All direct children of this directory (as returned by
131 /// - All direct children of this directory (as returned by
132 /// `std::fs::read_dir`) either have a corresponding dirstate node, or
132 /// `std::fs::read_dir`) either have a corresponding dirstate node, or
133 /// are ignored by ignore patterns whose hash is in
133 /// are ignored by ignore patterns whose hash is in
134 /// `Root::ignore_patterns_hash`.
134 /// `Root::ignore_patterns_hash`.
135 ///
135 ///
136 /// This means that if `std::fs::symlink_metadata` later reports the
136 /// This means that if `std::fs::symlink_metadata` later reports the
137 /// same modification time and ignored patterns haven’t changed, a run
137 /// same modification time and ignored patterns haven’t changed, a run
138 /// of status that is not listing ignored files can skip calling
138 /// of status that is not listing ignored files can skip calling
139 /// `std::fs::read_dir` again for this directory, iterate child
139 /// `std::fs::read_dir` again for this directory, iterate child
140 /// dirstate nodes instead.
140 /// dirstate nodes instead.
141 state: u8,
141 state: u8,
142 data: Entry,
142 data: Entry,
143 }
143 }
144
144
145 #[derive(BytesCast, Copy, Clone)]
145 #[derive(BytesCast, Copy, Clone)]
146 #[repr(C)]
146 #[repr(C)]
147 struct Entry {
147 struct Entry {
148 mode: I32Be,
148 mode: I32Be,
149 mtime: I32Be,
149 mtime: I32Be,
150 size: I32Be,
150 size: I32Be,
151 }
151 }
152
152
153 /// Duration since the Unix epoch
153 /// Duration since the Unix epoch
154 #[derive(BytesCast, Copy, Clone, PartialEq)]
154 #[derive(BytesCast, Copy, Clone, PartialEq)]
155 #[repr(C)]
155 #[repr(C)]
156 pub(super) struct Timestamp {
156 pub(super) struct Timestamp {
157 seconds: I64Be,
157 seconds: I64Be,
158
158
159 /// In `0 .. 1_000_000_000`.
159 /// In `0 .. 1_000_000_000`.
160 ///
160 ///
161 /// This timestamp is later or earlier than `(seconds, 0)` by this many
161 /// This timestamp is later or earlier than `(seconds, 0)` by this many
162 /// nanoseconds, if `seconds` is non-negative or negative, respectively.
162 /// nanoseconds, if `seconds` is non-negative or negative, respectively.
163 nanoseconds: U32Be,
163 nanoseconds: U32Be,
164 }
164 }
165
165
166 /// Counted in bytes from the start of the file
166 /// Counted in bytes from the start of the file
167 ///
167 ///
168 /// NOTE: not supporting `.hg/dirstate` files larger than 4 GiB.
168 /// NOTE: not supporting `.hg/dirstate` files larger than 4 GiB.
169 type Offset = U32Be;
169 type Offset = U32Be;
170
170
171 /// Counted in number of items
171 /// Counted in number of items
172 ///
172 ///
173 /// NOTE: we choose not to support counting more than 4 billion nodes anywhere.
173 /// NOTE: we choose not to support counting more than 4 billion nodes anywhere.
174 type Size = U32Be;
174 type Size = U32Be;
175
175
176 /// Counted in bytes
176 /// Counted in bytes
177 ///
177 ///
178 /// NOTE: we choose not to support file names/paths longer than 64 KiB.
178 /// NOTE: we choose not to support file names/paths longer than 64 KiB.
179 type PathSize = U16Be;
179 type PathSize = U16Be;
180
180
181 /// A contiguous sequence of `len` times `Node`, representing the child nodes
181 /// A contiguous sequence of `len` times `Node`, representing the child nodes
182 /// of either some other node or of the repository root.
182 /// of either some other node or of the repository root.
183 ///
183 ///
184 /// Always sorted by ascending `full_path`, to allow binary search.
184 /// Always sorted by ascending `full_path`, to allow binary search.
185 /// Since nodes with the same parent nodes also have the same parent path,
185 /// Since nodes with the same parent nodes also have the same parent path,
186 /// only the `base_name`s need to be compared during binary search.
186 /// only the `base_name`s need to be compared during binary search.
187 #[derive(BytesCast, Copy, Clone)]
187 #[derive(BytesCast, Copy, Clone)]
188 #[repr(C)]
188 #[repr(C)]
189 struct ChildNodes {
189 struct ChildNodes {
190 start: Offset,
190 start: Offset,
191 len: Size,
191 len: Size,
192 }
192 }
193
193
194 /// A `HgPath` of `len` bytes
194 /// A `HgPath` of `len` bytes
195 #[derive(BytesCast, Copy, Clone)]
195 #[derive(BytesCast, Copy, Clone)]
196 #[repr(C)]
196 #[repr(C)]
197 struct PathSlice {
197 struct PathSlice {
198 start: Offset,
198 start: Offset,
199 len: PathSize,
199 len: PathSize,
200 }
200 }
201
201
202 /// Either nothing if `start == 0`, or a `HgPath` of `len` bytes
202 /// Either nothing if `start == 0`, or a `HgPath` of `len` bytes
203 type OptPathSlice = PathSlice;
203 type OptPathSlice = PathSlice;
204
204
205 /// Make sure that size-affecting changes are made knowingly
205 /// Make sure that size-affecting changes are made knowingly
206 fn _static_assert_size_of() {
206 fn _static_assert_size_of() {
207 let _ = std::mem::transmute::<DocketHeader, [u8; 81]>;
207 let _ = std::mem::transmute::<DocketHeader, [u8; 81]>;
208 let _ = std::mem::transmute::<Root, [u8; 36]>;
208 let _ = std::mem::transmute::<Root, [u8; 36]>;
209 let _ = std::mem::transmute::<Node, [u8; 43]>;
209 let _ = std::mem::transmute::<Node, [u8; 43]>;
210 }
210 }
211
211
212 /// Unexpected file format found in `.hg/dirstate` with the "v2" format.
212 /// Unexpected file format found in `.hg/dirstate` with the "v2" format.
213 ///
213 ///
214 /// This should only happen if Mercurial is buggy or a repository is corrupted.
214 /// This should only happen if Mercurial is buggy or a repository is corrupted.
215 #[derive(Debug)]
215 #[derive(Debug)]
216 pub struct DirstateV2ParseError;
216 pub struct DirstateV2ParseError;
217
217
218 impl From<DirstateV2ParseError> for HgError {
218 impl From<DirstateV2ParseError> for HgError {
219 fn from(_: DirstateV2ParseError) -> Self {
219 fn from(_: DirstateV2ParseError) -> Self {
220 HgError::corrupted("dirstate-v2 parse error")
220 HgError::corrupted("dirstate-v2 parse error")
221 }
221 }
222 }
222 }
223
223
224 impl From<DirstateV2ParseError> for crate::DirstateError {
224 impl From<DirstateV2ParseError> for crate::DirstateError {
225 fn from(error: DirstateV2ParseError) -> Self {
225 fn from(error: DirstateV2ParseError) -> Self {
226 HgError::from(error).into()
226 HgError::from(error).into()
227 }
227 }
228 }
228 }
229
229
230 impl<'on_disk> Docket<'on_disk> {
230 impl<'on_disk> Docket<'on_disk> {
231 pub fn parents(&self) -> DirstateParents {
231 pub fn parents(&self) -> DirstateParents {
232 use crate::Node;
232 use crate::Node;
233 let p1 = Node::try_from(&self.header.parent_1[..USED_NODE_ID_BYTES])
233 let p1 = Node::try_from(&self.header.parent_1[..USED_NODE_ID_BYTES])
234 .unwrap()
234 .unwrap()
235 .clone();
235 .clone();
236 let p2 = Node::try_from(&self.header.parent_2[..USED_NODE_ID_BYTES])
236 let p2 = Node::try_from(&self.header.parent_2[..USED_NODE_ID_BYTES])
237 .unwrap()
237 .unwrap()
238 .clone();
238 .clone();
239 DirstateParents { p1, p2 }
239 DirstateParents { p1, p2 }
240 }
240 }
241
241
242 pub fn data_size(&self) -> usize {
242 pub fn data_size(&self) -> usize {
243 // This `unwrap` could only panic on a 16-bit CPU
243 // This `unwrap` could only panic on a 16-bit CPU
244 self.header.data_size.get().try_into().unwrap()
244 self.header.data_size.get().try_into().unwrap()
245 }
245 }
246
246
247 pub fn data_filename(&self) -> String {
247 pub fn data_filename(&self) -> String {
248 String::from_utf8(format_bytes!(b"dirstate.{}.d", self.uuid)).unwrap()
248 String::from_utf8(format_bytes!(b"dirstate.{}.d", self.uuid)).unwrap()
249 }
249 }
250 }
250 }
251
251
252 pub fn read_docket(
252 pub fn read_docket(
253 on_disk: &[u8],
253 on_disk: &[u8],
254 ) -> Result<Docket<'_>, DirstateV2ParseError> {
254 ) -> Result<Docket<'_>, DirstateV2ParseError> {
255 let (header, uuid) =
255 let (header, uuid) =
256 DocketHeader::from_bytes(on_disk).map_err(|_| DirstateV2ParseError)?;
256 DocketHeader::from_bytes(on_disk).map_err(|_| DirstateV2ParseError)?;
257 let uuid_size = header.uuid_size as usize;
257 let uuid_size = header.uuid_size as usize;
258 if header.marker == *V2_FORMAT_MARKER && uuid.len() == uuid_size {
258 if header.marker == *V2_FORMAT_MARKER && uuid.len() == uuid_size {
259 Ok(Docket { header, uuid })
259 Ok(Docket { header, uuid })
260 } else {
260 } else {
261 Err(DirstateV2ParseError)
261 Err(DirstateV2ParseError)
262 }
262 }
263 }
263 }
264
264
265 fn read_root<'on_disk>(
265 fn read_root<'on_disk>(
266 on_disk: &'on_disk [u8],
266 on_disk: &'on_disk [u8],
267 ) -> Result<&'on_disk Root, DirstateV2ParseError> {
267 ) -> Result<&'on_disk Root, DirstateV2ParseError> {
268 // Find the `Root` at the end of the given slice
268 // Find the `Root` at the end of the given slice
269 let root_offset = on_disk
269 let root_offset = on_disk
270 .len()
270 .len()
271 .checked_sub(std::mem::size_of::<Root>())
271 .checked_sub(std::mem::size_of::<Root>())
272 // A non-empty slice too short is an error
272 // A non-empty slice too short is an error
273 .ok_or(DirstateV2ParseError)?;
273 .ok_or(DirstateV2ParseError)?;
274 let (root, _) = Root::from_bytes(&on_disk[root_offset..])
274 let (root, _) = Root::from_bytes(&on_disk[root_offset..])
275 .map_err(|_| DirstateV2ParseError)?;
275 .map_err(|_| DirstateV2ParseError)?;
276 Ok(root)
276 Ok(root)
277 }
277 }
278
278
279 pub(super) fn read<'on_disk>(
279 pub(super) fn read<'on_disk>(
280 on_disk: &'on_disk [u8],
280 on_disk: &'on_disk [u8],
281 ) -> Result<DirstateMap<'on_disk>, DirstateV2ParseError> {
281 ) -> Result<DirstateMap<'on_disk>, DirstateV2ParseError> {
282 if on_disk.is_empty() {
282 if on_disk.is_empty() {
283 return Ok(DirstateMap::empty(on_disk));
283 return Ok(DirstateMap::empty(on_disk));
284 }
284 }
285 let root = read_root(on_disk)?;
285 let root = read_root(on_disk)?;
286 let dirstate_map = DirstateMap {
286 let dirstate_map = DirstateMap {
287 on_disk,
287 on_disk,
288 root: dirstate_map::ChildNodes::OnDisk(read_nodes(
288 root: dirstate_map::ChildNodes::OnDisk(read_nodes(
289 on_disk,
289 on_disk,
290 root.root_nodes,
290 root.root_nodes,
291 )?),
291 )?),
292 nodes_with_entry_count: root.nodes_with_entry_count.get(),
292 nodes_with_entry_count: root.nodes_with_entry_count.get(),
293 nodes_with_copy_source_count: root.nodes_with_copy_source_count.get(),
293 nodes_with_copy_source_count: root.nodes_with_copy_source_count.get(),
294 ignore_patterns_hash: root.ignore_patterns_hash,
294 ignore_patterns_hash: root.ignore_patterns_hash,
295 };
295 };
296 Ok(dirstate_map)
296 Ok(dirstate_map)
297 }
297 }
298
298
299 impl Node {
299 impl Node {
300 pub(super) fn full_path<'on_disk>(
300 pub(super) fn full_path<'on_disk>(
301 &self,
301 &self,
302 on_disk: &'on_disk [u8],
302 on_disk: &'on_disk [u8],
303 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
303 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
304 read_hg_path(on_disk, self.full_path)
304 read_hg_path(on_disk, self.full_path)
305 }
305 }
306
306
307 pub(super) fn base_name_start<'on_disk>(
307 pub(super) fn base_name_start<'on_disk>(
308 &self,
308 &self,
309 ) -> Result<usize, DirstateV2ParseError> {
309 ) -> Result<usize, DirstateV2ParseError> {
310 let start = self.base_name_start.get();
310 let start = self.base_name_start.get();
311 if start < self.full_path.len.get() {
311 if start < self.full_path.len.get() {
312 let start = usize::try_from(start)
312 let start = usize::try_from(start)
313 // u32 -> usize, could only panic on a 16-bit CPU
313 // u32 -> usize, could only panic on a 16-bit CPU
314 .expect("dirstate-v2 base_name_start out of bounds");
314 .expect("dirstate-v2 base_name_start out of bounds");
315 Ok(start)
315 Ok(start)
316 } else {
316 } else {
317 Err(DirstateV2ParseError)
317 Err(DirstateV2ParseError)
318 }
318 }
319 }
319 }
320
320
321 pub(super) fn base_name<'on_disk>(
321 pub(super) fn base_name<'on_disk>(
322 &self,
322 &self,
323 on_disk: &'on_disk [u8],
323 on_disk: &'on_disk [u8],
324 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
324 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
325 let full_path = self.full_path(on_disk)?;
325 let full_path = self.full_path(on_disk)?;
326 let base_name_start = self.base_name_start()?;
326 let base_name_start = self.base_name_start()?;
327 Ok(HgPath::new(&full_path.as_bytes()[base_name_start..]))
327 Ok(HgPath::new(&full_path.as_bytes()[base_name_start..]))
328 }
328 }
329
329
330 pub(super) fn path<'on_disk>(
330 pub(super) fn path<'on_disk>(
331 &self,
331 &self,
332 on_disk: &'on_disk [u8],
332 on_disk: &'on_disk [u8],
333 ) -> Result<dirstate_map::NodeKey<'on_disk>, DirstateV2ParseError> {
333 ) -> Result<dirstate_map::NodeKey<'on_disk>, DirstateV2ParseError> {
334 Ok(WithBasename::from_raw_parts(
334 Ok(WithBasename::from_raw_parts(
335 Cow::Borrowed(self.full_path(on_disk)?),
335 Cow::Borrowed(self.full_path(on_disk)?),
336 self.base_name_start()?,
336 self.base_name_start()?,
337 ))
337 ))
338 }
338 }
339
339
340 pub(super) fn has_copy_source<'on_disk>(&self) -> bool {
340 pub(super) fn has_copy_source<'on_disk>(&self) -> bool {
341 self.copy_source.start.get() != 0
341 self.copy_source.start.get() != 0
342 }
342 }
343
343
344 pub(super) fn copy_source<'on_disk>(
344 pub(super) fn copy_source<'on_disk>(
345 &self,
345 &self,
346 on_disk: &'on_disk [u8],
346 on_disk: &'on_disk [u8],
347 ) -> Result<Option<&'on_disk HgPath>, DirstateV2ParseError> {
347 ) -> Result<Option<&'on_disk HgPath>, DirstateV2ParseError> {
348 Ok(if self.has_copy_source() {
348 Ok(if self.has_copy_source() {
349 Some(read_hg_path(on_disk, self.copy_source)?)
349 Some(read_hg_path(on_disk, self.copy_source)?)
350 } else {
350 } else {
351 None
351 None
352 })
352 })
353 }
353 }
354
354
355 pub(super) fn node_data(
355 pub(super) fn node_data(
356 &self,
356 &self,
357 ) -> Result<dirstate_map::NodeData, DirstateV2ParseError> {
357 ) -> Result<dirstate_map::NodeData, DirstateV2ParseError> {
358 let entry = |state| {
358 let entry = |state| {
359 dirstate_map::NodeData::Entry(self.entry_with_given_state(state))
359 dirstate_map::NodeData::Entry(self.entry_with_given_state(state))
360 };
360 };
361
361
362 match self.state {
362 match self.state {
363 b'\0' => Ok(dirstate_map::NodeData::None),
363 b'\0' => Ok(dirstate_map::NodeData::None),
364 b'd' => Ok(dirstate_map::NodeData::CachedDirectory {
364 b'd' => Ok(dirstate_map::NodeData::CachedDirectory {
365 mtime: *self.data.as_timestamp(),
365 mtime: *self.data.as_timestamp(),
366 }),
366 }),
367 b'n' => Ok(entry(EntryState::Normal)),
367 b'n' => Ok(entry(EntryState::Normal)),
368 b'a' => Ok(entry(EntryState::Added)),
368 b'a' => Ok(entry(EntryState::Added)),
369 b'r' => Ok(entry(EntryState::Removed)),
369 b'r' => Ok(entry(EntryState::Removed)),
370 b'm' => Ok(entry(EntryState::Merged)),
370 b'm' => Ok(entry(EntryState::Merged)),
371 _ => Err(DirstateV2ParseError),
371 _ => Err(DirstateV2ParseError),
372 }
372 }
373 }
373 }
374
374
375 pub(super) fn cached_directory_mtime(&self) -> Option<&Timestamp> {
375 pub(super) fn cached_directory_mtime(&self) -> Option<&Timestamp> {
376 if self.state == b'd' {
376 if self.state == b'd' {
377 Some(self.data.as_timestamp())
377 Some(self.data.as_timestamp())
378 } else {
378 } else {
379 None
379 None
380 }
380 }
381 }
381 }
382
382
383 pub(super) fn state(
383 pub(super) fn state(
384 &self,
384 &self,
385 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
385 ) -> Result<Option<EntryState>, DirstateV2ParseError> {
386 match self.state {
386 match self.state {
387 b'\0' | b'd' => Ok(None),
387 b'\0' | b'd' => Ok(None),
388 b'n' => Ok(Some(EntryState::Normal)),
388 b'n' => Ok(Some(EntryState::Normal)),
389 b'a' => Ok(Some(EntryState::Added)),
389 b'a' => Ok(Some(EntryState::Added)),
390 b'r' => Ok(Some(EntryState::Removed)),
390 b'r' => Ok(Some(EntryState::Removed)),
391 b'm' => Ok(Some(EntryState::Merged)),
391 b'm' => Ok(Some(EntryState::Merged)),
392 _ => Err(DirstateV2ParseError),
392 _ => Err(DirstateV2ParseError),
393 }
393 }
394 }
394 }
395
395
396 fn entry_with_given_state(&self, state: EntryState) -> DirstateEntry {
396 fn entry_with_given_state(&self, state: EntryState) -> DirstateEntry {
397 DirstateEntry {
397 DirstateEntry {
398 state,
398 state,
399 mode: self.data.mode.get(),
399 mode: self.data.mode.get(),
400 mtime: self.data.mtime.get(),
400 mtime: self.data.mtime.get(),
401 size: self.data.size.get(),
401 size: self.data.size.get(),
402 }
402 }
403 }
403 }
404
404
405 pub(super) fn entry(
405 pub(super) fn entry(
406 &self,
406 &self,
407 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
407 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
408 Ok(self
408 Ok(self
409 .state()?
409 .state()?
410 .map(|state| self.entry_with_given_state(state)))
410 .map(|state| self.entry_with_given_state(state)))
411 }
411 }
412
412
413 pub(super) fn children<'on_disk>(
413 pub(super) fn children<'on_disk>(
414 &self,
414 &self,
415 on_disk: &'on_disk [u8],
415 on_disk: &'on_disk [u8],
416 ) -> Result<&'on_disk [Node], DirstateV2ParseError> {
416 ) -> Result<&'on_disk [Node], DirstateV2ParseError> {
417 read_nodes(on_disk, self.children)
417 read_nodes(on_disk, self.children)
418 }
418 }
419
419
420 pub(super) fn to_in_memory_node<'on_disk>(
420 pub(super) fn to_in_memory_node<'on_disk>(
421 &self,
421 &self,
422 on_disk: &'on_disk [u8],
422 on_disk: &'on_disk [u8],
423 ) -> Result<dirstate_map::Node<'on_disk>, DirstateV2ParseError> {
423 ) -> Result<dirstate_map::Node<'on_disk>, DirstateV2ParseError> {
424 Ok(dirstate_map::Node {
424 Ok(dirstate_map::Node {
425 children: dirstate_map::ChildNodes::OnDisk(
425 children: dirstate_map::ChildNodes::OnDisk(
426 self.children(on_disk)?,
426 self.children(on_disk)?,
427 ),
427 ),
428 copy_source: self.copy_source(on_disk)?.map(Cow::Borrowed),
428 copy_source: self.copy_source(on_disk)?.map(Cow::Borrowed),
429 data: self.node_data()?,
429 data: self.node_data()?,
430 descendants_with_entry_count: self
430 descendants_with_entry_count: self
431 .descendants_with_entry_count
431 .descendants_with_entry_count
432 .get(),
432 .get(),
433 tracked_descendants_count: self.tracked_descendants_count.get(),
433 tracked_descendants_count: self.tracked_descendants_count.get(),
434 })
434 })
435 }
435 }
436 }
436 }
437
437
438 impl Entry {
438 impl Entry {
439 fn from_timestamp(timestamp: Timestamp) -> Self {
439 fn from_timestamp(timestamp: Timestamp) -> Self {
440 // Safety: both types implement the `ByteCast` trait, so we could
440 // Safety: both types implement the `ByteCast` trait, so we could
441 // safely use `as_bytes` and `from_bytes` to do this conversion. Using
441 // safely use `as_bytes` and `from_bytes` to do this conversion. Using
442 // `transmute` instead makes the compiler check that the two types
442 // `transmute` instead makes the compiler check that the two types
443 // have the same size, which eliminates the error case of
443 // have the same size, which eliminates the error case of
444 // `from_bytes`.
444 // `from_bytes`.
445 unsafe { std::mem::transmute::<Timestamp, Entry>(timestamp) }
445 unsafe { std::mem::transmute::<Timestamp, Entry>(timestamp) }
446 }
446 }
447
447
448 fn as_timestamp(&self) -> &Timestamp {
448 fn as_timestamp(&self) -> &Timestamp {
449 // Safety: same as above in `from_timestamp`
449 // Safety: same as above in `from_timestamp`
450 unsafe { &*(self as *const Entry as *const Timestamp) }
450 unsafe { &*(self as *const Entry as *const Timestamp) }
451 }
451 }
452 }
452 }
453
453
454 impl Timestamp {
454 impl Timestamp {
455 pub fn seconds(&self) -> i64 {
455 pub fn seconds(&self) -> i64 {
456 self.seconds.get()
456 self.seconds.get()
457 }
457 }
458 }
458 }
459
459
460 impl From<SystemTime> for Timestamp {
460 impl From<SystemTime> for Timestamp {
461 fn from(system_time: SystemTime) -> Self {
461 fn from(system_time: SystemTime) -> Self {
462 let (secs, nanos) = match system_time.duration_since(UNIX_EPOCH) {
462 let (secs, nanos) = match system_time.duration_since(UNIX_EPOCH) {
463 Ok(duration) => {
463 Ok(duration) => {
464 (duration.as_secs() as i64, duration.subsec_nanos())
464 (duration.as_secs() as i64, duration.subsec_nanos())
465 }
465 }
466 Err(error) => {
466 Err(error) => {
467 let negative = error.duration();
467 let negative = error.duration();
468 (-(negative.as_secs() as i64), negative.subsec_nanos())
468 (-(negative.as_secs() as i64), negative.subsec_nanos())
469 }
469 }
470 };
470 };
471 Timestamp {
471 Timestamp {
472 seconds: secs.into(),
472 seconds: secs.into(),
473 nanoseconds: nanos.into(),
473 nanoseconds: nanos.into(),
474 }
474 }
475 }
475 }
476 }
476 }
477
477
478 impl From<&'_ Timestamp> for SystemTime {
478 impl From<&'_ Timestamp> for SystemTime {
479 fn from(timestamp: &'_ Timestamp) -> Self {
479 fn from(timestamp: &'_ Timestamp) -> Self {
480 let secs = timestamp.seconds.get();
480 let secs = timestamp.seconds.get();
481 let nanos = timestamp.nanoseconds.get();
481 let nanos = timestamp.nanoseconds.get();
482 if secs >= 0 {
482 if secs >= 0 {
483 UNIX_EPOCH + Duration::new(secs as u64, nanos)
483 UNIX_EPOCH + Duration::new(secs as u64, nanos)
484 } else {
484 } else {
485 UNIX_EPOCH - Duration::new((-secs) as u64, nanos)
485 UNIX_EPOCH - Duration::new((-secs) as u64, nanos)
486 }
486 }
487 }
487 }
488 }
488 }
489
489
490 fn read_hg_path(
490 fn read_hg_path(
491 on_disk: &[u8],
491 on_disk: &[u8],
492 slice: PathSlice,
492 slice: PathSlice,
493 ) -> Result<&HgPath, DirstateV2ParseError> {
493 ) -> Result<&HgPath, DirstateV2ParseError> {
494 read_slice(on_disk, slice.start, slice.len.get()).map(HgPath::new)
494 read_slice(on_disk, slice.start, slice.len.get()).map(HgPath::new)
495 }
495 }
496
496
497 fn read_nodes(
497 fn read_nodes(
498 on_disk: &[u8],
498 on_disk: &[u8],
499 slice: ChildNodes,
499 slice: ChildNodes,
500 ) -> Result<&[Node], DirstateV2ParseError> {
500 ) -> Result<&[Node], DirstateV2ParseError> {
501 read_slice(on_disk, slice.start, slice.len.get())
501 read_slice(on_disk, slice.start, slice.len.get())
502 }
502 }
503
503
504 fn read_slice<T, Len>(
504 fn read_slice<T, Len>(
505 on_disk: &[u8],
505 on_disk: &[u8],
506 start: Offset,
506 start: Offset,
507 len: Len,
507 len: Len,
508 ) -> Result<&[T], DirstateV2ParseError>
508 ) -> Result<&[T], DirstateV2ParseError>
509 where
509 where
510 T: BytesCast,
510 T: BytesCast,
511 Len: TryInto<usize>,
511 Len: TryInto<usize>,
512 {
512 {
513 // Either `usize::MAX` would result in "out of bounds" error since a single
513 // Either `usize::MAX` would result in "out of bounds" error since a single
514 // `&[u8]` cannot occupy the entire addess space.
514 // `&[u8]` cannot occupy the entire addess space.
515 let start = start.get().try_into().unwrap_or(std::usize::MAX);
515 let start = start.get().try_into().unwrap_or(std::usize::MAX);
516 let len = len.try_into().unwrap_or(std::usize::MAX);
516 let len = len.try_into().unwrap_or(std::usize::MAX);
517 on_disk
517 on_disk
518 .get(start..)
518 .get(start..)
519 .and_then(|bytes| T::slice_from_bytes(bytes, len).ok())
519 .and_then(|bytes| T::slice_from_bytes(bytes, len).ok())
520 .map(|(slice, _rest)| slice)
520 .map(|(slice, _rest)| slice)
521 .ok_or_else(|| DirstateV2ParseError)
521 .ok_or_else(|| DirstateV2ParseError)
522 }
522 }
523
523
524 pub(crate) fn for_each_tracked_path<'on_disk>(
524 pub(crate) fn for_each_tracked_path<'on_disk>(
525 on_disk: &'on_disk [u8],
525 on_disk: &'on_disk [u8],
526 mut f: impl FnMut(&'on_disk HgPath),
526 mut f: impl FnMut(&'on_disk HgPath),
527 ) -> Result<(), DirstateV2ParseError> {
527 ) -> Result<(), DirstateV2ParseError> {
528 let root = read_root(on_disk)?;
528 let root = read_root(on_disk)?;
529 fn recur<'on_disk>(
529 fn recur<'on_disk>(
530 on_disk: &'on_disk [u8],
530 on_disk: &'on_disk [u8],
531 nodes: ChildNodes,
531 nodes: ChildNodes,
532 f: &mut impl FnMut(&'on_disk HgPath),
532 f: &mut impl FnMut(&'on_disk HgPath),
533 ) -> Result<(), DirstateV2ParseError> {
533 ) -> Result<(), DirstateV2ParseError> {
534 for node in read_nodes(on_disk, nodes)? {
534 for node in read_nodes(on_disk, nodes)? {
535 if let Some(state) = node.state()? {
535 if let Some(state) = node.state()? {
536 if state.is_tracked() {
536 if state.is_tracked() {
537 f(node.full_path(on_disk)?)
537 f(node.full_path(on_disk)?)
538 }
538 }
539 }
539 }
540 recur(on_disk, node.children, f)?
540 recur(on_disk, node.children, f)?
541 }
541 }
542 Ok(())
542 Ok(())
543 }
543 }
544 recur(on_disk, root.root_nodes, &mut f)
544 recur(on_disk, root.root_nodes, &mut f)
545 }
545 }
546
546
547 /// Returns new data together with whether that data should be appended to the
548 /// existing data file whose content is at `dirstate_map.on_disk` (true),
549 /// instead of written to a new data file (false).
547 pub(super) fn write(
550 pub(super) fn write(
548 dirstate_map: &mut DirstateMap,
551 dirstate_map: &mut DirstateMap,
549 ) -> Result<Vec<u8>, DirstateError> {
552 can_append: bool,
550 let root_len = std::mem::size_of::<Root>();
553 ) -> Result<(Vec<u8>, bool), DirstateError> {
554 let append = can_append && dirstate_map.write_should_append();
551
555
552 // This ignores the space for paths, and for nodes without an entry.
556 // This ignores the space for paths, and for nodes without an entry.
553 // TODO: better estimate? Skip the `Vec` and write to a file directly?
557 // TODO: better estimate? Skip the `Vec` and write to a file directly?
554 let size_guess = root_len
558 let size_guess = std::mem::size_of::<Root>()
555 + std::mem::size_of::<Node>()
559 + std::mem::size_of::<Node>()
556 * dirstate_map.nodes_with_entry_count as usize;
560 * dirstate_map.nodes_with_entry_count as usize;
557 let mut out = Vec::with_capacity(size_guess);
558
561
559 let root_nodes =
562 let mut writer = Writer {
560 write_nodes(dirstate_map, dirstate_map.root.as_ref(), &mut out)?;
563 dirstate_map,
564 append,
565 out: Vec::with_capacity(size_guess),
566 };
567
568 let root_nodes = writer.write_nodes(dirstate_map.root.as_ref())?;
561
569
562 let root = Root {
570 let root = Root {
563 root_nodes,
571 root_nodes,
564 nodes_with_entry_count: dirstate_map.nodes_with_entry_count.into(),
572 nodes_with_entry_count: dirstate_map.nodes_with_entry_count.into(),
565 nodes_with_copy_source_count: dirstate_map
573 nodes_with_copy_source_count: dirstate_map
566 .nodes_with_copy_source_count
574 .nodes_with_copy_source_count
567 .into(),
575 .into(),
568 ignore_patterns_hash: dirstate_map.ignore_patterns_hash,
576 ignore_patterns_hash: dirstate_map.ignore_patterns_hash,
569 };
577 };
570 out.extend(root.as_bytes());
578 writer.out.extend(root.as_bytes());
571 Ok(out)
579 Ok((writer.out, append))
580 }
581
582 struct Writer<'dmap, 'on_disk> {
583 dirstate_map: &'dmap DirstateMap<'on_disk>,
584 append: bool,
585 out: Vec<u8>,
572 }
586 }
573
587
574 fn write_nodes(
588 impl Writer<'_, '_> {
575 dirstate_map: &DirstateMap,
589 fn write_nodes(
576 nodes: dirstate_map::ChildNodesRef,
590 &mut self,
577 out: &mut Vec<u8>,
591 nodes: dirstate_map::ChildNodesRef,
578 ) -> Result<ChildNodes, DirstateError> {
592 ) -> Result<ChildNodes, DirstateError> {
579 // `dirstate_map::ChildNodes` is a `HashMap` with undefined iteration
593 // `dirstate_map::ChildNodes` is a `HashMap` with undefined iteration
580 // order. Sort to enable binary search in the written file.
594 // order. Sort to enable binary search in the written file.
581 let nodes = nodes.sorted();
595 let nodes = nodes.sorted();
582 let nodes_len = nodes.len();
596 let nodes_len = nodes.len();
583
597
584 // First accumulate serialized nodes in a `Vec`
598 // First accumulate serialized nodes in a `Vec`
585 let mut on_disk_nodes = Vec::with_capacity(nodes_len);
599 let mut on_disk_nodes = Vec::with_capacity(nodes_len);
586 for node in nodes {
600 for node in nodes {
587 let children = write_nodes(
601 let children =
588 dirstate_map,
602 self.write_nodes(node.children(self.dirstate_map.on_disk)?)?;
589 node.children(dirstate_map.on_disk)?,
603 let full_path = node.full_path(self.dirstate_map.on_disk)?;
590 out,
604 let full_path = self.write_path(full_path.as_bytes());
591 )?;
605 let copy_source = if let Some(source) =
592 let full_path = node.full_path(dirstate_map.on_disk)?;
606 node.copy_source(self.dirstate_map.on_disk)?
593 let full_path = write_path(full_path.as_bytes(), out);
607 {
594 let copy_source =
608 self.write_path(source.as_bytes())
595 if let Some(source) = node.copy_source(dirstate_map.on_disk)? {
596 write_path(source.as_bytes(), out)
597 } else {
609 } else {
598 PathSlice {
610 PathSlice {
599 start: 0.into(),
611 start: 0.into(),
600 len: 0.into(),
612 len: 0.into(),
601 }
613 }
602 };
614 };
603 on_disk_nodes.push(match node {
615 on_disk_nodes.push(match node {
604 NodeRef::InMemory(path, node) => {
616 NodeRef::InMemory(path, node) => {
605 let (state, data) = match &node.data {
617 let (state, data) = match &node.data {
606 dirstate_map::NodeData::Entry(entry) => (
618 dirstate_map::NodeData::Entry(entry) => (
607 entry.state.into(),
619 entry.state.into(),
608 Entry {
620 Entry {
609 mode: entry.mode.into(),
621 mode: entry.mode.into(),
610 mtime: entry.mtime.into(),
622 mtime: entry.mtime.into(),
611 size: entry.size.into(),
623 size: entry.size.into(),
612 },
624 },
613 ),
625 ),
614 dirstate_map::NodeData::CachedDirectory { mtime } => {
626 dirstate_map::NodeData::CachedDirectory { mtime } => {
615 (b'd', Entry::from_timestamp(*mtime))
627 (b'd', Entry::from_timestamp(*mtime))
628 }
629 dirstate_map::NodeData::None => (
630 b'\0',
631 Entry {
632 mode: 0.into(),
633 mtime: 0.into(),
634 size: 0.into(),
635 },
636 ),
637 };
638 Node {
639 children,
640 copy_source,
641 full_path,
642 base_name_start: u16::try_from(path.base_name_start())
643 // Could only panic for paths over 64 KiB
644 .expect("dirstate-v2 path length overflow")
645 .into(),
646 descendants_with_entry_count: node
647 .descendants_with_entry_count
648 .into(),
649 tracked_descendants_count: node
650 .tracked_descendants_count
651 .into(),
652 state,
653 data,
616 }
654 }
617 dirstate_map::NodeData::None => (
655 }
618 b'\0',
656 NodeRef::OnDisk(node) => Node {
619 Entry {
620 mode: 0.into(),
621 mtime: 0.into(),
622 size: 0.into(),
623 },
624 ),
625 };
626 Node {
627 children,
657 children,
628 copy_source,
658 copy_source,
629 full_path,
659 full_path,
630 base_name_start: u16::try_from(path.base_name_start())
660 ..*node
631 // Could only panic for paths over 64 KiB
661 },
632 .expect("dirstate-v2 path length overflow")
662 })
633 .into(),
663 }
634 descendants_with_entry_count: node
664 // … so we can write them contiguously, after writing everything else
635 .descendants_with_entry_count
665 // they refer to.
636 .into(),
666 let start = self.current_offset();
637 tracked_descendants_count: node
667 let len = u32::try_from(nodes_len)
638 .tracked_descendants_count
668 // Could only panic with over 4 billion nodes
639 .into(),
669 .expect("dirstate-v2 path length overflow")
640 state,
670 .into();
641 data,
671 self.out.extend(on_disk_nodes.as_bytes());
642 }
672 Ok(ChildNodes { start, len })
643 }
644 NodeRef::OnDisk(node) => Node {
645 children,
646 copy_source,
647 full_path,
648 ..*node
649 },
650 })
651 }
673 }
652 // … so we can write them contiguously, after writing everything else they
653 // refer to.
654 let start = current_offset(out);
655 let len = u32::try_from(nodes_len)
656 // Could only panic with over 4 billion nodes
657 .expect("dirstate-v2 path length overflow")
658 .into();
659 out.extend(on_disk_nodes.as_bytes());
660 Ok(ChildNodes { start, len })
661 }
662
674
663 fn current_offset(out: &Vec<u8>) -> Offset {
675 fn current_offset(&mut self) -> Offset {
664 u32::try_from(out.len())
676 let mut offset = self.out.len();
665 // Could only panic for a dirstate file larger than 4 GiB
677 if self.append {
666 .expect("dirstate-v2 offset overflow")
678 offset += self.dirstate_map.on_disk.len()
667 .into()
679 }
668 }
680 u32::try_from(offset)
681 // Could only panic for a dirstate file larger than 4 GiB
682 .expect("dirstate-v2 offset overflow")
683 .into()
684 }
669
685
670 fn write_path(slice: &[u8], out: &mut Vec<u8>) -> PathSlice {
686 fn write_path(&mut self, slice: &[u8]) -> PathSlice {
671 let start = current_offset(out);
687 let start = self.current_offset();
672 let len = u16::try_from(slice.len())
688 let len = u16::try_from(slice.len())
673 // Could only panic for paths over 64 KiB
689 // Could only panic for paths over 64 KiB
674 .expect("dirstate-v2 path length overflow")
690 .expect("dirstate-v2 path length overflow")
675 .into();
691 .into();
676 out.extend(slice.as_bytes());
692 self.out.extend(slice.as_bytes());
677 PathSlice { start, len }
693 PathSlice { start, len }
694 }
678 }
695 }
@@ -1,607 +1,614 b''
1 // dirstate_map.rs
1 // dirstate_map.rs
2 //
2 //
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
3 // Copyright 2019 Raphaël Gomès <rgomes@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 //! Bindings for the `hg::dirstate::dirstate_map` file provided by the
8 //! Bindings for the `hg::dirstate::dirstate_map` file provided by the
9 //! `hg-core` package.
9 //! `hg-core` package.
10
10
11 use std::cell::{RefCell, RefMut};
11 use std::cell::{RefCell, RefMut};
12 use std::convert::TryInto;
12 use std::convert::TryInto;
13
13
14 use cpython::{
14 use cpython::{
15 exc, ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList,
15 exc, ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList,
16 PyObject, PyResult, PySet, PyString, Python, PythonObject, ToPyObject,
16 PyObject, PyResult, PySet, PyString, Python, PythonObject, ToPyObject,
17 UnsafePyLeaked,
17 UnsafePyLeaked,
18 };
18 };
19
19
20 use crate::{
20 use crate::{
21 dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
21 dirstate::copymap::{CopyMap, CopyMapItemsIterator, CopyMapKeysIterator},
22 dirstate::make_directory_item,
22 dirstate::make_directory_item,
23 dirstate::make_dirstate_item,
23 dirstate::make_dirstate_item,
24 dirstate::non_normal_entries::{
24 dirstate::non_normal_entries::{
25 NonNormalEntries, NonNormalEntriesIterator,
25 NonNormalEntries, NonNormalEntriesIterator,
26 },
26 },
27 dirstate::owning::OwningDirstateMap,
27 dirstate::owning::OwningDirstateMap,
28 parsers::dirstate_parents_to_pytuple,
28 parsers::dirstate_parents_to_pytuple,
29 };
29 };
30 use hg::{
30 use hg::{
31 dirstate::parsers::Timestamp,
31 dirstate::parsers::Timestamp,
32 dirstate::MTIME_UNSET,
32 dirstate::MTIME_UNSET,
33 dirstate::SIZE_NON_NORMAL,
33 dirstate::SIZE_NON_NORMAL,
34 dirstate_tree::dispatch::DirstateMapMethods,
34 dirstate_tree::dispatch::DirstateMapMethods,
35 dirstate_tree::on_disk::DirstateV2ParseError,
35 dirstate_tree::on_disk::DirstateV2ParseError,
36 revlog::Node,
36 revlog::Node,
37 utils::files::normalize_case,
37 utils::files::normalize_case,
38 utils::hg_path::{HgPath, HgPathBuf},
38 utils::hg_path::{HgPath, HgPathBuf},
39 DirstateEntry, DirstateError, DirstateMap as RustDirstateMap,
39 DirstateEntry, DirstateError, DirstateMap as RustDirstateMap,
40 DirstateParents, EntryState, StateMapIter,
40 DirstateParents, EntryState, StateMapIter,
41 };
41 };
42
42
43 // TODO
43 // TODO
44 // This object needs to share references to multiple members of its Rust
44 // This object needs to share references to multiple members of its Rust
45 // inner struct, namely `copy_map`, `dirs` and `all_dirs`.
45 // inner struct, namely `copy_map`, `dirs` and `all_dirs`.
46 // Right now `CopyMap` is done, but it needs to have an explicit reference
46 // Right now `CopyMap` is done, but it needs to have an explicit reference
47 // to `RustDirstateMap` which itself needs to have an encapsulation for
47 // to `RustDirstateMap` which itself needs to have an encapsulation for
48 // every method in `CopyMap` (copymapcopy, etc.).
48 // every method in `CopyMap` (copymapcopy, etc.).
49 // This is ugly and hard to maintain.
49 // This is ugly and hard to maintain.
50 // The same logic applies to `dirs` and `all_dirs`, however the `Dirs`
50 // The same logic applies to `dirs` and `all_dirs`, however the `Dirs`
51 // `py_class!` is already implemented and does not mention
51 // `py_class!` is already implemented and does not mention
52 // `RustDirstateMap`, rightfully so.
52 // `RustDirstateMap`, rightfully so.
53 // All attributes also have to have a separate refcount data attribute for
53 // All attributes also have to have a separate refcount data attribute for
54 // leaks, with all methods that go along for reference sharing.
54 // leaks, with all methods that go along for reference sharing.
55 py_class!(pub class DirstateMap |py| {
55 py_class!(pub class DirstateMap |py| {
56 @shared data inner: Box<dyn DirstateMapMethods + Send>;
56 @shared data inner: Box<dyn DirstateMapMethods + Send>;
57
57
58 /// Returns a `(dirstate_map, parents)` tuple
58 /// Returns a `(dirstate_map, parents)` tuple
59 @staticmethod
59 @staticmethod
60 def new_v1(
60 def new_v1(
61 use_dirstate_tree: bool,
61 use_dirstate_tree: bool,
62 on_disk: PyBytes,
62 on_disk: PyBytes,
63 ) -> PyResult<PyObject> {
63 ) -> PyResult<PyObject> {
64 let dirstate_error = |e: DirstateError| {
64 let dirstate_error = |e: DirstateError| {
65 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
65 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
66 };
66 };
67 let (inner, parents) = if use_dirstate_tree {
67 let (inner, parents) = if use_dirstate_tree {
68 let (map, parents) = OwningDirstateMap::new_v1(py, on_disk)
68 let (map, parents) = OwningDirstateMap::new_v1(py, on_disk)
69 .map_err(dirstate_error)?;
69 .map_err(dirstate_error)?;
70 (Box::new(map) as _, parents)
70 (Box::new(map) as _, parents)
71 } else {
71 } else {
72 let bytes = on_disk.data(py);
72 let bytes = on_disk.data(py);
73 let mut map = RustDirstateMap::default();
73 let mut map = RustDirstateMap::default();
74 let parents = map.read(bytes).map_err(dirstate_error)?;
74 let parents = map.read(bytes).map_err(dirstate_error)?;
75 (Box::new(map) as _, parents)
75 (Box::new(map) as _, parents)
76 };
76 };
77 let map = Self::create_instance(py, inner)?;
77 let map = Self::create_instance(py, inner)?;
78 let parents = parents.map(|p| dirstate_parents_to_pytuple(py, &p));
78 let parents = parents.map(|p| dirstate_parents_to_pytuple(py, &p));
79 Ok((map, parents).to_py_object(py).into_object())
79 Ok((map, parents).to_py_object(py).into_object())
80 }
80 }
81
81
82 /// Returns a DirstateMap
82 /// Returns a DirstateMap
83 @staticmethod
83 @staticmethod
84 def new_v2(
84 def new_v2(
85 on_disk: PyBytes,
85 on_disk: PyBytes,
86 data_size: usize,
86 data_size: usize,
87 ) -> PyResult<PyObject> {
87 ) -> PyResult<PyObject> {
88 let dirstate_error = |e: DirstateError| {
88 let dirstate_error = |e: DirstateError| {
89 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
89 PyErr::new::<exc::OSError, _>(py, format!("Dirstate error: {:?}", e))
90 };
90 };
91 let inner = OwningDirstateMap::new_v2(py, on_disk, data_size)
91 let inner = OwningDirstateMap::new_v2(py, on_disk, data_size)
92 .map_err(dirstate_error)?;
92 .map_err(dirstate_error)?;
93 let map = Self::create_instance(py, Box::new(inner))?;
93 let map = Self::create_instance(py, Box::new(inner))?;
94 Ok(map.into_object())
94 Ok(map.into_object())
95 }
95 }
96
96
97 def clear(&self) -> PyResult<PyObject> {
97 def clear(&self) -> PyResult<PyObject> {
98 self.inner(py).borrow_mut().clear();
98 self.inner(py).borrow_mut().clear();
99 Ok(py.None())
99 Ok(py.None())
100 }
100 }
101
101
102 def get(
102 def get(
103 &self,
103 &self,
104 key: PyObject,
104 key: PyObject,
105 default: Option<PyObject> = None
105 default: Option<PyObject> = None
106 ) -> PyResult<Option<PyObject>> {
106 ) -> PyResult<Option<PyObject>> {
107 let key = key.extract::<PyBytes>(py)?;
107 let key = key.extract::<PyBytes>(py)?;
108 match self
108 match self
109 .inner(py)
109 .inner(py)
110 .borrow()
110 .borrow()
111 .get(HgPath::new(key.data(py)))
111 .get(HgPath::new(key.data(py)))
112 .map_err(|e| v2_error(py, e))?
112 .map_err(|e| v2_error(py, e))?
113 {
113 {
114 Some(entry) => {
114 Some(entry) => {
115 Ok(Some(make_dirstate_item(py, &entry)?))
115 Ok(Some(make_dirstate_item(py, &entry)?))
116 },
116 },
117 None => Ok(default)
117 None => Ok(default)
118 }
118 }
119 }
119 }
120
120
121 def addfile(
121 def addfile(
122 &self,
122 &self,
123 f: PyObject,
123 f: PyObject,
124 mode: PyObject,
124 mode: PyObject,
125 size: PyObject,
125 size: PyObject,
126 mtime: PyObject,
126 mtime: PyObject,
127 added: PyObject,
127 added: PyObject,
128 merged: PyObject,
128 merged: PyObject,
129 from_p2: PyObject,
129 from_p2: PyObject,
130 possibly_dirty: PyObject,
130 possibly_dirty: PyObject,
131 ) -> PyResult<PyObject> {
131 ) -> PyResult<PyObject> {
132 let f = f.extract::<PyBytes>(py)?;
132 let f = f.extract::<PyBytes>(py)?;
133 let filename = HgPath::new(f.data(py));
133 let filename = HgPath::new(f.data(py));
134 let mode = if mode.is_none(py) {
134 let mode = if mode.is_none(py) {
135 // fallback default value
135 // fallback default value
136 0
136 0
137 } else {
137 } else {
138 mode.extract(py)?
138 mode.extract(py)?
139 };
139 };
140 let size = if size.is_none(py) {
140 let size = if size.is_none(py) {
141 // fallback default value
141 // fallback default value
142 SIZE_NON_NORMAL
142 SIZE_NON_NORMAL
143 } else {
143 } else {
144 size.extract(py)?
144 size.extract(py)?
145 };
145 };
146 let mtime = if mtime.is_none(py) {
146 let mtime = if mtime.is_none(py) {
147 // fallback default value
147 // fallback default value
148 MTIME_UNSET
148 MTIME_UNSET
149 } else {
149 } else {
150 mtime.extract(py)?
150 mtime.extract(py)?
151 };
151 };
152 let entry = DirstateEntry {
152 let entry = DirstateEntry {
153 // XXX Arbitrary default value since the value is determined later
153 // XXX Arbitrary default value since the value is determined later
154 state: EntryState::Normal,
154 state: EntryState::Normal,
155 mode: mode,
155 mode: mode,
156 size: size,
156 size: size,
157 mtime: mtime,
157 mtime: mtime,
158 };
158 };
159 let added = added.extract::<PyBool>(py)?.is_true();
159 let added = added.extract::<PyBool>(py)?.is_true();
160 let merged = merged.extract::<PyBool>(py)?.is_true();
160 let merged = merged.extract::<PyBool>(py)?.is_true();
161 let from_p2 = from_p2.extract::<PyBool>(py)?.is_true();
161 let from_p2 = from_p2.extract::<PyBool>(py)?.is_true();
162 let possibly_dirty = possibly_dirty.extract::<PyBool>(py)?.is_true();
162 let possibly_dirty = possibly_dirty.extract::<PyBool>(py)?.is_true();
163 self.inner(py).borrow_mut().add_file(
163 self.inner(py).borrow_mut().add_file(
164 filename,
164 filename,
165 entry,
165 entry,
166 added,
166 added,
167 merged,
167 merged,
168 from_p2,
168 from_p2,
169 possibly_dirty
169 possibly_dirty
170 ).and(Ok(py.None())).or_else(|e: DirstateError| {
170 ).and(Ok(py.None())).or_else(|e: DirstateError| {
171 Err(PyErr::new::<exc::ValueError, _>(py, e.to_string()))
171 Err(PyErr::new::<exc::ValueError, _>(py, e.to_string()))
172 })
172 })
173 }
173 }
174
174
175 def removefile(
175 def removefile(
176 &self,
176 &self,
177 f: PyObject,
177 f: PyObject,
178 in_merge: PyObject
178 in_merge: PyObject
179 ) -> PyResult<PyObject> {
179 ) -> PyResult<PyObject> {
180 self.inner(py).borrow_mut()
180 self.inner(py).borrow_mut()
181 .remove_file(
181 .remove_file(
182 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
182 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
183 in_merge.extract::<PyBool>(py)?.is_true(),
183 in_merge.extract::<PyBool>(py)?.is_true(),
184 )
184 )
185 .or_else(|_| {
185 .or_else(|_| {
186 Err(PyErr::new::<exc::OSError, _>(
186 Err(PyErr::new::<exc::OSError, _>(
187 py,
187 py,
188 "Dirstate error".to_string(),
188 "Dirstate error".to_string(),
189 ))
189 ))
190 })?;
190 })?;
191 Ok(py.None())
191 Ok(py.None())
192 }
192 }
193
193
194 def dropfile(
194 def dropfile(
195 &self,
195 &self,
196 f: PyObject,
196 f: PyObject,
197 ) -> PyResult<PyBool> {
197 ) -> PyResult<PyBool> {
198 self.inner(py).borrow_mut()
198 self.inner(py).borrow_mut()
199 .drop_file(
199 .drop_file(
200 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
200 HgPath::new(f.extract::<PyBytes>(py)?.data(py)),
201 )
201 )
202 .and_then(|b| Ok(b.to_py_object(py)))
202 .and_then(|b| Ok(b.to_py_object(py)))
203 .or_else(|e| {
203 .or_else(|e| {
204 Err(PyErr::new::<exc::OSError, _>(
204 Err(PyErr::new::<exc::OSError, _>(
205 py,
205 py,
206 format!("Dirstate error: {}", e.to_string()),
206 format!("Dirstate error: {}", e.to_string()),
207 ))
207 ))
208 })
208 })
209 }
209 }
210
210
211 def clearambiguoustimes(
211 def clearambiguoustimes(
212 &self,
212 &self,
213 files: PyObject,
213 files: PyObject,
214 now: PyObject
214 now: PyObject
215 ) -> PyResult<PyObject> {
215 ) -> PyResult<PyObject> {
216 let files: PyResult<Vec<HgPathBuf>> = files
216 let files: PyResult<Vec<HgPathBuf>> = files
217 .iter(py)?
217 .iter(py)?
218 .map(|filename| {
218 .map(|filename| {
219 Ok(HgPathBuf::from_bytes(
219 Ok(HgPathBuf::from_bytes(
220 filename?.extract::<PyBytes>(py)?.data(py),
220 filename?.extract::<PyBytes>(py)?.data(py),
221 ))
221 ))
222 })
222 })
223 .collect();
223 .collect();
224 self.inner(py)
224 self.inner(py)
225 .borrow_mut()
225 .borrow_mut()
226 .clear_ambiguous_times(files?, now.extract(py)?)
226 .clear_ambiguous_times(files?, now.extract(py)?)
227 .map_err(|e| v2_error(py, e))?;
227 .map_err(|e| v2_error(py, e))?;
228 Ok(py.None())
228 Ok(py.None())
229 }
229 }
230
230
231 def other_parent_entries(&self) -> PyResult<PyObject> {
231 def other_parent_entries(&self) -> PyResult<PyObject> {
232 let mut inner_shared = self.inner(py).borrow_mut();
232 let mut inner_shared = self.inner(py).borrow_mut();
233 let set = PySet::empty(py)?;
233 let set = PySet::empty(py)?;
234 for path in inner_shared.iter_other_parent_paths() {
234 for path in inner_shared.iter_other_parent_paths() {
235 let path = path.map_err(|e| v2_error(py, e))?;
235 let path = path.map_err(|e| v2_error(py, e))?;
236 set.add(py, PyBytes::new(py, path.as_bytes()))?;
236 set.add(py, PyBytes::new(py, path.as_bytes()))?;
237 }
237 }
238 Ok(set.into_object())
238 Ok(set.into_object())
239 }
239 }
240
240
241 def non_normal_entries(&self) -> PyResult<NonNormalEntries> {
241 def non_normal_entries(&self) -> PyResult<NonNormalEntries> {
242 NonNormalEntries::from_inner(py, self.clone_ref(py))
242 NonNormalEntries::from_inner(py, self.clone_ref(py))
243 }
243 }
244
244
245 def non_normal_entries_contains(&self, key: PyObject) -> PyResult<bool> {
245 def non_normal_entries_contains(&self, key: PyObject) -> PyResult<bool> {
246 let key = key.extract::<PyBytes>(py)?;
246 let key = key.extract::<PyBytes>(py)?;
247 self.inner(py)
247 self.inner(py)
248 .borrow_mut()
248 .borrow_mut()
249 .non_normal_entries_contains(HgPath::new(key.data(py)))
249 .non_normal_entries_contains(HgPath::new(key.data(py)))
250 .map_err(|e| v2_error(py, e))
250 .map_err(|e| v2_error(py, e))
251 }
251 }
252
252
253 def non_normal_entries_display(&self) -> PyResult<PyString> {
253 def non_normal_entries_display(&self) -> PyResult<PyString> {
254 let mut inner = self.inner(py).borrow_mut();
254 let mut inner = self.inner(py).borrow_mut();
255 let paths = inner
255 let paths = inner
256 .iter_non_normal_paths()
256 .iter_non_normal_paths()
257 .collect::<Result<Vec<_>, _>>()
257 .collect::<Result<Vec<_>, _>>()
258 .map_err(|e| v2_error(py, e))?;
258 .map_err(|e| v2_error(py, e))?;
259 let formatted = format!("NonNormalEntries: {}", hg::utils::join_display(paths, ", "));
259 let formatted = format!("NonNormalEntries: {}", hg::utils::join_display(paths, ", "));
260 Ok(PyString::new(py, &formatted))
260 Ok(PyString::new(py, &formatted))
261 }
261 }
262
262
263 def non_normal_entries_remove(&self, key: PyObject) -> PyResult<PyObject> {
263 def non_normal_entries_remove(&self, key: PyObject) -> PyResult<PyObject> {
264 let key = key.extract::<PyBytes>(py)?;
264 let key = key.extract::<PyBytes>(py)?;
265 self
265 self
266 .inner(py)
266 .inner(py)
267 .borrow_mut()
267 .borrow_mut()
268 .non_normal_entries_remove(HgPath::new(key.data(py)));
268 .non_normal_entries_remove(HgPath::new(key.data(py)));
269 Ok(py.None())
269 Ok(py.None())
270 }
270 }
271
271
272 def non_normal_or_other_parent_paths(&self) -> PyResult<PyList> {
272 def non_normal_or_other_parent_paths(&self) -> PyResult<PyList> {
273 let mut inner = self.inner(py).borrow_mut();
273 let mut inner = self.inner(py).borrow_mut();
274
274
275 let ret = PyList::new(py, &[]);
275 let ret = PyList::new(py, &[]);
276 for filename in inner.non_normal_or_other_parent_paths() {
276 for filename in inner.non_normal_or_other_parent_paths() {
277 let filename = filename.map_err(|e| v2_error(py, e))?;
277 let filename = filename.map_err(|e| v2_error(py, e))?;
278 let as_pystring = PyBytes::new(py, filename.as_bytes());
278 let as_pystring = PyBytes::new(py, filename.as_bytes());
279 ret.append(py, as_pystring.into_object());
279 ret.append(py, as_pystring.into_object());
280 }
280 }
281 Ok(ret)
281 Ok(ret)
282 }
282 }
283
283
284 def non_normal_entries_iter(&self) -> PyResult<NonNormalEntriesIterator> {
284 def non_normal_entries_iter(&self) -> PyResult<NonNormalEntriesIterator> {
285 // Make sure the sets are defined before we no longer have a mutable
285 // Make sure the sets are defined before we no longer have a mutable
286 // reference to the dmap.
286 // reference to the dmap.
287 self.inner(py)
287 self.inner(py)
288 .borrow_mut()
288 .borrow_mut()
289 .set_non_normal_other_parent_entries(false);
289 .set_non_normal_other_parent_entries(false);
290
290
291 let leaked_ref = self.inner(py).leak_immutable();
291 let leaked_ref = self.inner(py).leak_immutable();
292
292
293 NonNormalEntriesIterator::from_inner(py, unsafe {
293 NonNormalEntriesIterator::from_inner(py, unsafe {
294 leaked_ref.map(py, |o| {
294 leaked_ref.map(py, |o| {
295 o.iter_non_normal_paths_panic()
295 o.iter_non_normal_paths_panic()
296 })
296 })
297 })
297 })
298 }
298 }
299
299
300 def hastrackeddir(&self, d: PyObject) -> PyResult<PyBool> {
300 def hastrackeddir(&self, d: PyObject) -> PyResult<PyBool> {
301 let d = d.extract::<PyBytes>(py)?;
301 let d = d.extract::<PyBytes>(py)?;
302 Ok(self.inner(py).borrow_mut()
302 Ok(self.inner(py).borrow_mut()
303 .has_tracked_dir(HgPath::new(d.data(py)))
303 .has_tracked_dir(HgPath::new(d.data(py)))
304 .map_err(|e| {
304 .map_err(|e| {
305 PyErr::new::<exc::ValueError, _>(py, e.to_string())
305 PyErr::new::<exc::ValueError, _>(py, e.to_string())
306 })?
306 })?
307 .to_py_object(py))
307 .to_py_object(py))
308 }
308 }
309
309
310 def hasdir(&self, d: PyObject) -> PyResult<PyBool> {
310 def hasdir(&self, d: PyObject) -> PyResult<PyBool> {
311 let d = d.extract::<PyBytes>(py)?;
311 let d = d.extract::<PyBytes>(py)?;
312 Ok(self.inner(py).borrow_mut()
312 Ok(self.inner(py).borrow_mut()
313 .has_dir(HgPath::new(d.data(py)))
313 .has_dir(HgPath::new(d.data(py)))
314 .map_err(|e| {
314 .map_err(|e| {
315 PyErr::new::<exc::ValueError, _>(py, e.to_string())
315 PyErr::new::<exc::ValueError, _>(py, e.to_string())
316 })?
316 })?
317 .to_py_object(py))
317 .to_py_object(py))
318 }
318 }
319
319
320 def write_v1(
320 def write_v1(
321 &self,
321 &self,
322 p1: PyObject,
322 p1: PyObject,
323 p2: PyObject,
323 p2: PyObject,
324 now: PyObject
324 now: PyObject
325 ) -> PyResult<PyBytes> {
325 ) -> PyResult<PyBytes> {
326 let now = Timestamp(now.extract(py)?);
326 let now = Timestamp(now.extract(py)?);
327
327
328 let mut inner = self.inner(py).borrow_mut();
328 let mut inner = self.inner(py).borrow_mut();
329 let parents = DirstateParents {
329 let parents = DirstateParents {
330 p1: extract_node_id(py, &p1)?,
330 p1: extract_node_id(py, &p1)?,
331 p2: extract_node_id(py, &p2)?,
331 p2: extract_node_id(py, &p2)?,
332 };
332 };
333 let result = inner.pack_v1(parents, now);
333 let result = inner.pack_v1(parents, now);
334 match result {
334 match result {
335 Ok(packed) => Ok(PyBytes::new(py, &packed)),
335 Ok(packed) => Ok(PyBytes::new(py, &packed)),
336 Err(_) => Err(PyErr::new::<exc::OSError, _>(
336 Err(_) => Err(PyErr::new::<exc::OSError, _>(
337 py,
337 py,
338 "Dirstate error".to_string(),
338 "Dirstate error".to_string(),
339 )),
339 )),
340 }
340 }
341 }
341 }
342
342
343 /// Returns new data together with whether that data should be appended to
344 /// the existing data file whose content is at `self.on_disk` (True),
345 /// instead of written to a new data file (False).
343 def write_v2(
346 def write_v2(
344 &self,
347 &self,
345 now: PyObject
348 now: PyObject,
346 ) -> PyResult<PyBytes> {
349 can_append: bool,
350 ) -> PyResult<PyObject> {
347 let now = Timestamp(now.extract(py)?);
351 let now = Timestamp(now.extract(py)?);
348
352
349 let mut inner = self.inner(py).borrow_mut();
353 let mut inner = self.inner(py).borrow_mut();
350 let result = inner.pack_v2(now);
354 let result = inner.pack_v2(now, can_append);
351 match result {
355 match result {
352 Ok(packed) => Ok(PyBytes::new(py, &packed)),
356 Ok((packed, append)) => {
357 let packed = PyBytes::new(py, &packed);
358 Ok((packed, append).to_py_object(py).into_object())
359 },
353 Err(_) => Err(PyErr::new::<exc::OSError, _>(
360 Err(_) => Err(PyErr::new::<exc::OSError, _>(
354 py,
361 py,
355 "Dirstate error".to_string(),
362 "Dirstate error".to_string(),
356 )),
363 )),
357 }
364 }
358 }
365 }
359
366
360 def filefoldmapasdict(&self) -> PyResult<PyDict> {
367 def filefoldmapasdict(&self) -> PyResult<PyDict> {
361 let dict = PyDict::new(py);
368 let dict = PyDict::new(py);
362 for item in self.inner(py).borrow_mut().iter() {
369 for item in self.inner(py).borrow_mut().iter() {
363 let (path, entry) = item.map_err(|e| v2_error(py, e))?;
370 let (path, entry) = item.map_err(|e| v2_error(py, e))?;
364 if entry.state != EntryState::Removed {
371 if entry.state != EntryState::Removed {
365 let key = normalize_case(path);
372 let key = normalize_case(path);
366 let value = path;
373 let value = path;
367 dict.set_item(
374 dict.set_item(
368 py,
375 py,
369 PyBytes::new(py, key.as_bytes()).into_object(),
376 PyBytes::new(py, key.as_bytes()).into_object(),
370 PyBytes::new(py, value.as_bytes()).into_object(),
377 PyBytes::new(py, value.as_bytes()).into_object(),
371 )?;
378 )?;
372 }
379 }
373 }
380 }
374 Ok(dict)
381 Ok(dict)
375 }
382 }
376
383
377 def __len__(&self) -> PyResult<usize> {
384 def __len__(&self) -> PyResult<usize> {
378 Ok(self.inner(py).borrow().len())
385 Ok(self.inner(py).borrow().len())
379 }
386 }
380
387
381 def __contains__(&self, key: PyObject) -> PyResult<bool> {
388 def __contains__(&self, key: PyObject) -> PyResult<bool> {
382 let key = key.extract::<PyBytes>(py)?;
389 let key = key.extract::<PyBytes>(py)?;
383 self.inner(py)
390 self.inner(py)
384 .borrow()
391 .borrow()
385 .contains_key(HgPath::new(key.data(py)))
392 .contains_key(HgPath::new(key.data(py)))
386 .map_err(|e| v2_error(py, e))
393 .map_err(|e| v2_error(py, e))
387 }
394 }
388
395
389 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
396 def __getitem__(&self, key: PyObject) -> PyResult<PyObject> {
390 let key = key.extract::<PyBytes>(py)?;
397 let key = key.extract::<PyBytes>(py)?;
391 let key = HgPath::new(key.data(py));
398 let key = HgPath::new(key.data(py));
392 match self
399 match self
393 .inner(py)
400 .inner(py)
394 .borrow()
401 .borrow()
395 .get(key)
402 .get(key)
396 .map_err(|e| v2_error(py, e))?
403 .map_err(|e| v2_error(py, e))?
397 {
404 {
398 Some(entry) => {
405 Some(entry) => {
399 Ok(make_dirstate_item(py, &entry)?)
406 Ok(make_dirstate_item(py, &entry)?)
400 },
407 },
401 None => Err(PyErr::new::<exc::KeyError, _>(
408 None => Err(PyErr::new::<exc::KeyError, _>(
402 py,
409 py,
403 String::from_utf8_lossy(key.as_bytes()),
410 String::from_utf8_lossy(key.as_bytes()),
404 )),
411 )),
405 }
412 }
406 }
413 }
407
414
408 def keys(&self) -> PyResult<DirstateMapKeysIterator> {
415 def keys(&self) -> PyResult<DirstateMapKeysIterator> {
409 let leaked_ref = self.inner(py).leak_immutable();
416 let leaked_ref = self.inner(py).leak_immutable();
410 DirstateMapKeysIterator::from_inner(
417 DirstateMapKeysIterator::from_inner(
411 py,
418 py,
412 unsafe { leaked_ref.map(py, |o| o.iter()) },
419 unsafe { leaked_ref.map(py, |o| o.iter()) },
413 )
420 )
414 }
421 }
415
422
416 def items(&self) -> PyResult<DirstateMapItemsIterator> {
423 def items(&self) -> PyResult<DirstateMapItemsIterator> {
417 let leaked_ref = self.inner(py).leak_immutable();
424 let leaked_ref = self.inner(py).leak_immutable();
418 DirstateMapItemsIterator::from_inner(
425 DirstateMapItemsIterator::from_inner(
419 py,
426 py,
420 unsafe { leaked_ref.map(py, |o| o.iter()) },
427 unsafe { leaked_ref.map(py, |o| o.iter()) },
421 )
428 )
422 }
429 }
423
430
424 def __iter__(&self) -> PyResult<DirstateMapKeysIterator> {
431 def __iter__(&self) -> PyResult<DirstateMapKeysIterator> {
425 let leaked_ref = self.inner(py).leak_immutable();
432 let leaked_ref = self.inner(py).leak_immutable();
426 DirstateMapKeysIterator::from_inner(
433 DirstateMapKeysIterator::from_inner(
427 py,
434 py,
428 unsafe { leaked_ref.map(py, |o| o.iter()) },
435 unsafe { leaked_ref.map(py, |o| o.iter()) },
429 )
436 )
430 }
437 }
431
438
432 // TODO all copymap* methods, see docstring above
439 // TODO all copymap* methods, see docstring above
433 def copymapcopy(&self) -> PyResult<PyDict> {
440 def copymapcopy(&self) -> PyResult<PyDict> {
434 let dict = PyDict::new(py);
441 let dict = PyDict::new(py);
435 for item in self.inner(py).borrow().copy_map_iter() {
442 for item in self.inner(py).borrow().copy_map_iter() {
436 let (key, value) = item.map_err(|e| v2_error(py, e))?;
443 let (key, value) = item.map_err(|e| v2_error(py, e))?;
437 dict.set_item(
444 dict.set_item(
438 py,
445 py,
439 PyBytes::new(py, key.as_bytes()),
446 PyBytes::new(py, key.as_bytes()),
440 PyBytes::new(py, value.as_bytes()),
447 PyBytes::new(py, value.as_bytes()),
441 )?;
448 )?;
442 }
449 }
443 Ok(dict)
450 Ok(dict)
444 }
451 }
445
452
446 def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> {
453 def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> {
447 let key = key.extract::<PyBytes>(py)?;
454 let key = key.extract::<PyBytes>(py)?;
448 match self
455 match self
449 .inner(py)
456 .inner(py)
450 .borrow()
457 .borrow()
451 .copy_map_get(HgPath::new(key.data(py)))
458 .copy_map_get(HgPath::new(key.data(py)))
452 .map_err(|e| v2_error(py, e))?
459 .map_err(|e| v2_error(py, e))?
453 {
460 {
454 Some(copy) => Ok(PyBytes::new(py, copy.as_bytes())),
461 Some(copy) => Ok(PyBytes::new(py, copy.as_bytes())),
455 None => Err(PyErr::new::<exc::KeyError, _>(
462 None => Err(PyErr::new::<exc::KeyError, _>(
456 py,
463 py,
457 String::from_utf8_lossy(key.data(py)),
464 String::from_utf8_lossy(key.data(py)),
458 )),
465 )),
459 }
466 }
460 }
467 }
461 def copymap(&self) -> PyResult<CopyMap> {
468 def copymap(&self) -> PyResult<CopyMap> {
462 CopyMap::from_inner(py, self.clone_ref(py))
469 CopyMap::from_inner(py, self.clone_ref(py))
463 }
470 }
464
471
465 def copymaplen(&self) -> PyResult<usize> {
472 def copymaplen(&self) -> PyResult<usize> {
466 Ok(self.inner(py).borrow().copy_map_len())
473 Ok(self.inner(py).borrow().copy_map_len())
467 }
474 }
468 def copymapcontains(&self, key: PyObject) -> PyResult<bool> {
475 def copymapcontains(&self, key: PyObject) -> PyResult<bool> {
469 let key = key.extract::<PyBytes>(py)?;
476 let key = key.extract::<PyBytes>(py)?;
470 self.inner(py)
477 self.inner(py)
471 .borrow()
478 .borrow()
472 .copy_map_contains_key(HgPath::new(key.data(py)))
479 .copy_map_contains_key(HgPath::new(key.data(py)))
473 .map_err(|e| v2_error(py, e))
480 .map_err(|e| v2_error(py, e))
474 }
481 }
475 def copymapget(
482 def copymapget(
476 &self,
483 &self,
477 key: PyObject,
484 key: PyObject,
478 default: Option<PyObject>
485 default: Option<PyObject>
479 ) -> PyResult<Option<PyObject>> {
486 ) -> PyResult<Option<PyObject>> {
480 let key = key.extract::<PyBytes>(py)?;
487 let key = key.extract::<PyBytes>(py)?;
481 match self
488 match self
482 .inner(py)
489 .inner(py)
483 .borrow()
490 .borrow()
484 .copy_map_get(HgPath::new(key.data(py)))
491 .copy_map_get(HgPath::new(key.data(py)))
485 .map_err(|e| v2_error(py, e))?
492 .map_err(|e| v2_error(py, e))?
486 {
493 {
487 Some(copy) => Ok(Some(
494 Some(copy) => Ok(Some(
488 PyBytes::new(py, copy.as_bytes()).into_object(),
495 PyBytes::new(py, copy.as_bytes()).into_object(),
489 )),
496 )),
490 None => Ok(default),
497 None => Ok(default),
491 }
498 }
492 }
499 }
493 def copymapsetitem(
500 def copymapsetitem(
494 &self,
501 &self,
495 key: PyObject,
502 key: PyObject,
496 value: PyObject
503 value: PyObject
497 ) -> PyResult<PyObject> {
504 ) -> PyResult<PyObject> {
498 let key = key.extract::<PyBytes>(py)?;
505 let key = key.extract::<PyBytes>(py)?;
499 let value = value.extract::<PyBytes>(py)?;
506 let value = value.extract::<PyBytes>(py)?;
500 self.inner(py)
507 self.inner(py)
501 .borrow_mut()
508 .borrow_mut()
502 .copy_map_insert(
509 .copy_map_insert(
503 HgPathBuf::from_bytes(key.data(py)),
510 HgPathBuf::from_bytes(key.data(py)),
504 HgPathBuf::from_bytes(value.data(py)),
511 HgPathBuf::from_bytes(value.data(py)),
505 )
512 )
506 .map_err(|e| v2_error(py, e))?;
513 .map_err(|e| v2_error(py, e))?;
507 Ok(py.None())
514 Ok(py.None())
508 }
515 }
509 def copymappop(
516 def copymappop(
510 &self,
517 &self,
511 key: PyObject,
518 key: PyObject,
512 default: Option<PyObject>
519 default: Option<PyObject>
513 ) -> PyResult<Option<PyObject>> {
520 ) -> PyResult<Option<PyObject>> {
514 let key = key.extract::<PyBytes>(py)?;
521 let key = key.extract::<PyBytes>(py)?;
515 match self
522 match self
516 .inner(py)
523 .inner(py)
517 .borrow_mut()
524 .borrow_mut()
518 .copy_map_remove(HgPath::new(key.data(py)))
525 .copy_map_remove(HgPath::new(key.data(py)))
519 .map_err(|e| v2_error(py, e))?
526 .map_err(|e| v2_error(py, e))?
520 {
527 {
521 Some(_) => Ok(None),
528 Some(_) => Ok(None),
522 None => Ok(default),
529 None => Ok(default),
523 }
530 }
524 }
531 }
525
532
526 def copymapiter(&self) -> PyResult<CopyMapKeysIterator> {
533 def copymapiter(&self) -> PyResult<CopyMapKeysIterator> {
527 let leaked_ref = self.inner(py).leak_immutable();
534 let leaked_ref = self.inner(py).leak_immutable();
528 CopyMapKeysIterator::from_inner(
535 CopyMapKeysIterator::from_inner(
529 py,
536 py,
530 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
537 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
531 )
538 )
532 }
539 }
533
540
534 def copymapitemsiter(&self) -> PyResult<CopyMapItemsIterator> {
541 def copymapitemsiter(&self) -> PyResult<CopyMapItemsIterator> {
535 let leaked_ref = self.inner(py).leak_immutable();
542 let leaked_ref = self.inner(py).leak_immutable();
536 CopyMapItemsIterator::from_inner(
543 CopyMapItemsIterator::from_inner(
537 py,
544 py,
538 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
545 unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
539 )
546 )
540 }
547 }
541
548
542 def directories(&self) -> PyResult<PyList> {
549 def directories(&self) -> PyResult<PyList> {
543 let dirs = PyList::new(py, &[]);
550 let dirs = PyList::new(py, &[]);
544 for item in self.inner(py).borrow().iter_directories() {
551 for item in self.inner(py).borrow().iter_directories() {
545 let (path, mtime) = item.map_err(|e| v2_error(py, e))?;
552 let (path, mtime) = item.map_err(|e| v2_error(py, e))?;
546 let path = PyBytes::new(py, path.as_bytes());
553 let path = PyBytes::new(py, path.as_bytes());
547 let mtime = mtime.map(|t| t.0).unwrap_or(-1);
554 let mtime = mtime.map(|t| t.0).unwrap_or(-1);
548 let item = make_directory_item(py, mtime as i32)?;
555 let item = make_directory_item(py, mtime as i32)?;
549 let tuple = (path, item);
556 let tuple = (path, item);
550 dirs.append(py, tuple.to_py_object(py).into_object())
557 dirs.append(py, tuple.to_py_object(py).into_object())
551 }
558 }
552 Ok(dirs)
559 Ok(dirs)
553 }
560 }
554
561
555 });
562 });
556
563
557 impl DirstateMap {
564 impl DirstateMap {
558 pub fn get_inner_mut<'a>(
565 pub fn get_inner_mut<'a>(
559 &'a self,
566 &'a self,
560 py: Python<'a>,
567 py: Python<'a>,
561 ) -> RefMut<'a, Box<dyn DirstateMapMethods + Send>> {
568 ) -> RefMut<'a, Box<dyn DirstateMapMethods + Send>> {
562 self.inner(py).borrow_mut()
569 self.inner(py).borrow_mut()
563 }
570 }
564 fn translate_key(
571 fn translate_key(
565 py: Python,
572 py: Python,
566 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
573 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
567 ) -> PyResult<Option<PyBytes>> {
574 ) -> PyResult<Option<PyBytes>> {
568 let (f, _entry) = res.map_err(|e| v2_error(py, e))?;
575 let (f, _entry) = res.map_err(|e| v2_error(py, e))?;
569 Ok(Some(PyBytes::new(py, f.as_bytes())))
576 Ok(Some(PyBytes::new(py, f.as_bytes())))
570 }
577 }
571 fn translate_key_value(
578 fn translate_key_value(
572 py: Python,
579 py: Python,
573 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
580 res: Result<(&HgPath, DirstateEntry), DirstateV2ParseError>,
574 ) -> PyResult<Option<(PyBytes, PyObject)>> {
581 ) -> PyResult<Option<(PyBytes, PyObject)>> {
575 let (f, entry) = res.map_err(|e| v2_error(py, e))?;
582 let (f, entry) = res.map_err(|e| v2_error(py, e))?;
576 Ok(Some((
583 Ok(Some((
577 PyBytes::new(py, f.as_bytes()),
584 PyBytes::new(py, f.as_bytes()),
578 make_dirstate_item(py, &entry)?,
585 make_dirstate_item(py, &entry)?,
579 )))
586 )))
580 }
587 }
581 }
588 }
582
589
583 py_shared_iterator!(
590 py_shared_iterator!(
584 DirstateMapKeysIterator,
591 DirstateMapKeysIterator,
585 UnsafePyLeaked<StateMapIter<'static>>,
592 UnsafePyLeaked<StateMapIter<'static>>,
586 DirstateMap::translate_key,
593 DirstateMap::translate_key,
587 Option<PyBytes>
594 Option<PyBytes>
588 );
595 );
589
596
590 py_shared_iterator!(
597 py_shared_iterator!(
591 DirstateMapItemsIterator,
598 DirstateMapItemsIterator,
592 UnsafePyLeaked<StateMapIter<'static>>,
599 UnsafePyLeaked<StateMapIter<'static>>,
593 DirstateMap::translate_key_value,
600 DirstateMap::translate_key_value,
594 Option<(PyBytes, PyObject)>
601 Option<(PyBytes, PyObject)>
595 );
602 );
596
603
597 fn extract_node_id(py: Python, obj: &PyObject) -> PyResult<Node> {
604 fn extract_node_id(py: Python, obj: &PyObject) -> PyResult<Node> {
598 let bytes = obj.extract::<PyBytes>(py)?;
605 let bytes = obj.extract::<PyBytes>(py)?;
599 match bytes.data(py).try_into() {
606 match bytes.data(py).try_into() {
600 Ok(s) => Ok(s),
607 Ok(s) => Ok(s),
601 Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())),
608 Err(e) => Err(PyErr::new::<exc::ValueError, _>(py, e.to_string())),
602 }
609 }
603 }
610 }
604
611
605 pub(super) fn v2_error(py: Python<'_>, _: DirstateV2ParseError) -> PyErr {
612 pub(super) fn v2_error(py: Python<'_>, _: DirstateV2ParseError) -> PyErr {
606 PyErr::new::<exc::ValueError, _>(py, "corrupted dirstate-v2")
613 PyErr::new::<exc::ValueError, _>(py, "corrupted dirstate-v2")
607 }
614 }
@@ -1,215 +1,219 b''
1 use crate::dirstate::owning::OwningDirstateMap;
1 use crate::dirstate::owning::OwningDirstateMap;
2 use hg::dirstate::parsers::Timestamp;
2 use hg::dirstate::parsers::Timestamp;
3 use hg::dirstate_tree::dispatch::DirstateMapMethods;
3 use hg::dirstate_tree::dispatch::DirstateMapMethods;
4 use hg::dirstate_tree::on_disk::DirstateV2ParseError;
4 use hg::dirstate_tree::on_disk::DirstateV2ParseError;
5 use hg::matchers::Matcher;
5 use hg::matchers::Matcher;
6 use hg::utils::hg_path::{HgPath, HgPathBuf};
6 use hg::utils::hg_path::{HgPath, HgPathBuf};
7 use hg::CopyMapIter;
7 use hg::CopyMapIter;
8 use hg::DirstateEntry;
8 use hg::DirstateEntry;
9 use hg::DirstateError;
9 use hg::DirstateError;
10 use hg::DirstateParents;
10 use hg::DirstateParents;
11 use hg::DirstateStatus;
11 use hg::DirstateStatus;
12 use hg::PatternFileWarning;
12 use hg::PatternFileWarning;
13 use hg::StateMapIter;
13 use hg::StateMapIter;
14 use hg::StatusError;
14 use hg::StatusError;
15 use hg::StatusOptions;
15 use hg::StatusOptions;
16 use std::path::PathBuf;
16 use std::path::PathBuf;
17
17
18 impl DirstateMapMethods for OwningDirstateMap {
18 impl DirstateMapMethods for OwningDirstateMap {
19 fn clear(&mut self) {
19 fn clear(&mut self) {
20 self.get_mut().clear()
20 self.get_mut().clear()
21 }
21 }
22
22
23 fn add_file(
23 fn add_file(
24 &mut self,
24 &mut self,
25 filename: &HgPath,
25 filename: &HgPath,
26 entry: DirstateEntry,
26 entry: DirstateEntry,
27 added: bool,
27 added: bool,
28 merged: bool,
28 merged: bool,
29 from_p2: bool,
29 from_p2: bool,
30 possibly_dirty: bool,
30 possibly_dirty: bool,
31 ) -> Result<(), DirstateError> {
31 ) -> Result<(), DirstateError> {
32 self.get_mut().add_file(
32 self.get_mut().add_file(
33 filename,
33 filename,
34 entry,
34 entry,
35 added,
35 added,
36 merged,
36 merged,
37 from_p2,
37 from_p2,
38 possibly_dirty,
38 possibly_dirty,
39 )
39 )
40 }
40 }
41
41
42 fn remove_file(
42 fn remove_file(
43 &mut self,
43 &mut self,
44 filename: &HgPath,
44 filename: &HgPath,
45 in_merge: bool,
45 in_merge: bool,
46 ) -> Result<(), DirstateError> {
46 ) -> Result<(), DirstateError> {
47 self.get_mut().remove_file(filename, in_merge)
47 self.get_mut().remove_file(filename, in_merge)
48 }
48 }
49
49
50 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
50 fn drop_file(&mut self, filename: &HgPath) -> Result<bool, DirstateError> {
51 self.get_mut().drop_file(filename)
51 self.get_mut().drop_file(filename)
52 }
52 }
53
53
54 fn clear_ambiguous_times(
54 fn clear_ambiguous_times(
55 &mut self,
55 &mut self,
56 filenames: Vec<HgPathBuf>,
56 filenames: Vec<HgPathBuf>,
57 now: i32,
57 now: i32,
58 ) -> Result<(), DirstateV2ParseError> {
58 ) -> Result<(), DirstateV2ParseError> {
59 self.get_mut().clear_ambiguous_times(filenames, now)
59 self.get_mut().clear_ambiguous_times(filenames, now)
60 }
60 }
61
61
62 fn non_normal_entries_contains(
62 fn non_normal_entries_contains(
63 &mut self,
63 &mut self,
64 key: &HgPath,
64 key: &HgPath,
65 ) -> Result<bool, DirstateV2ParseError> {
65 ) -> Result<bool, DirstateV2ParseError> {
66 self.get_mut().non_normal_entries_contains(key)
66 self.get_mut().non_normal_entries_contains(key)
67 }
67 }
68
68
69 fn non_normal_entries_remove(&mut self, key: &HgPath) {
69 fn non_normal_entries_remove(&mut self, key: &HgPath) {
70 self.get_mut().non_normal_entries_remove(key)
70 self.get_mut().non_normal_entries_remove(key)
71 }
71 }
72
72
73 fn non_normal_or_other_parent_paths(
73 fn non_normal_or_other_parent_paths(
74 &mut self,
74 &mut self,
75 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
75 ) -> Box<dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + '_>
76 {
76 {
77 self.get_mut().non_normal_or_other_parent_paths()
77 self.get_mut().non_normal_or_other_parent_paths()
78 }
78 }
79
79
80 fn set_non_normal_other_parent_entries(&mut self, force: bool) {
80 fn set_non_normal_other_parent_entries(&mut self, force: bool) {
81 self.get_mut().set_non_normal_other_parent_entries(force)
81 self.get_mut().set_non_normal_other_parent_entries(force)
82 }
82 }
83
83
84 fn iter_non_normal_paths(
84 fn iter_non_normal_paths(
85 &mut self,
85 &mut self,
86 ) -> Box<
86 ) -> Box<
87 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
87 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
88 > {
88 > {
89 self.get_mut().iter_non_normal_paths()
89 self.get_mut().iter_non_normal_paths()
90 }
90 }
91
91
92 fn iter_non_normal_paths_panic(
92 fn iter_non_normal_paths_panic(
93 &self,
93 &self,
94 ) -> Box<
94 ) -> Box<
95 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
95 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
96 > {
96 > {
97 self.get().iter_non_normal_paths_panic()
97 self.get().iter_non_normal_paths_panic()
98 }
98 }
99
99
100 fn iter_other_parent_paths(
100 fn iter_other_parent_paths(
101 &mut self,
101 &mut self,
102 ) -> Box<
102 ) -> Box<
103 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
103 dyn Iterator<Item = Result<&HgPath, DirstateV2ParseError>> + Send + '_,
104 > {
104 > {
105 self.get_mut().iter_other_parent_paths()
105 self.get_mut().iter_other_parent_paths()
106 }
106 }
107
107
108 fn has_tracked_dir(
108 fn has_tracked_dir(
109 &mut self,
109 &mut self,
110 directory: &HgPath,
110 directory: &HgPath,
111 ) -> Result<bool, DirstateError> {
111 ) -> Result<bool, DirstateError> {
112 self.get_mut().has_tracked_dir(directory)
112 self.get_mut().has_tracked_dir(directory)
113 }
113 }
114
114
115 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
115 fn has_dir(&mut self, directory: &HgPath) -> Result<bool, DirstateError> {
116 self.get_mut().has_dir(directory)
116 self.get_mut().has_dir(directory)
117 }
117 }
118
118
119 fn pack_v1(
119 fn pack_v1(
120 &mut self,
120 &mut self,
121 parents: DirstateParents,
121 parents: DirstateParents,
122 now: Timestamp,
122 now: Timestamp,
123 ) -> Result<Vec<u8>, DirstateError> {
123 ) -> Result<Vec<u8>, DirstateError> {
124 self.get_mut().pack_v1(parents, now)
124 self.get_mut().pack_v1(parents, now)
125 }
125 }
126
126
127 fn pack_v2(&mut self, now: Timestamp) -> Result<Vec<u8>, DirstateError> {
127 fn pack_v2(
128 self.get_mut().pack_v2(now)
128 &mut self,
129 now: Timestamp,
130 can_append: bool,
131 ) -> Result<(Vec<u8>, bool), DirstateError> {
132 self.get_mut().pack_v2(now, can_append)
129 }
133 }
130
134
131 fn status<'a>(
135 fn status<'a>(
132 &'a mut self,
136 &'a mut self,
133 matcher: &'a (dyn Matcher + Sync),
137 matcher: &'a (dyn Matcher + Sync),
134 root_dir: PathBuf,
138 root_dir: PathBuf,
135 ignore_files: Vec<PathBuf>,
139 ignore_files: Vec<PathBuf>,
136 options: StatusOptions,
140 options: StatusOptions,
137 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
141 ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
138 {
142 {
139 self.get_mut()
143 self.get_mut()
140 .status(matcher, root_dir, ignore_files, options)
144 .status(matcher, root_dir, ignore_files, options)
141 }
145 }
142
146
143 fn copy_map_len(&self) -> usize {
147 fn copy_map_len(&self) -> usize {
144 self.get().copy_map_len()
148 self.get().copy_map_len()
145 }
149 }
146
150
147 fn copy_map_iter(&self) -> CopyMapIter<'_> {
151 fn copy_map_iter(&self) -> CopyMapIter<'_> {
148 self.get().copy_map_iter()
152 self.get().copy_map_iter()
149 }
153 }
150
154
151 fn copy_map_contains_key(
155 fn copy_map_contains_key(
152 &self,
156 &self,
153 key: &HgPath,
157 key: &HgPath,
154 ) -> Result<bool, DirstateV2ParseError> {
158 ) -> Result<bool, DirstateV2ParseError> {
155 self.get().copy_map_contains_key(key)
159 self.get().copy_map_contains_key(key)
156 }
160 }
157
161
158 fn copy_map_get(
162 fn copy_map_get(
159 &self,
163 &self,
160 key: &HgPath,
164 key: &HgPath,
161 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
165 ) -> Result<Option<&HgPath>, DirstateV2ParseError> {
162 self.get().copy_map_get(key)
166 self.get().copy_map_get(key)
163 }
167 }
164
168
165 fn copy_map_remove(
169 fn copy_map_remove(
166 &mut self,
170 &mut self,
167 key: &HgPath,
171 key: &HgPath,
168 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
172 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
169 self.get_mut().copy_map_remove(key)
173 self.get_mut().copy_map_remove(key)
170 }
174 }
171
175
172 fn copy_map_insert(
176 fn copy_map_insert(
173 &mut self,
177 &mut self,
174 key: HgPathBuf,
178 key: HgPathBuf,
175 value: HgPathBuf,
179 value: HgPathBuf,
176 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
180 ) -> Result<Option<HgPathBuf>, DirstateV2ParseError> {
177 self.get_mut().copy_map_insert(key, value)
181 self.get_mut().copy_map_insert(key, value)
178 }
182 }
179
183
180 fn len(&self) -> usize {
184 fn len(&self) -> usize {
181 self.get().len()
185 self.get().len()
182 }
186 }
183
187
184 fn contains_key(
188 fn contains_key(
185 &self,
189 &self,
186 key: &HgPath,
190 key: &HgPath,
187 ) -> Result<bool, DirstateV2ParseError> {
191 ) -> Result<bool, DirstateV2ParseError> {
188 self.get().contains_key(key)
192 self.get().contains_key(key)
189 }
193 }
190
194
191 fn get(
195 fn get(
192 &self,
196 &self,
193 key: &HgPath,
197 key: &HgPath,
194 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
198 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
195 self.get().get(key)
199 self.get().get(key)
196 }
200 }
197
201
198 fn iter(&self) -> StateMapIter<'_> {
202 fn iter(&self) -> StateMapIter<'_> {
199 self.get().iter()
203 self.get().iter()
200 }
204 }
201
205
202 fn iter_directories(
206 fn iter_directories(
203 &self,
207 &self,
204 ) -> Box<
208 ) -> Box<
205 dyn Iterator<
209 dyn Iterator<
206 Item = Result<
210 Item = Result<
207 (&HgPath, Option<Timestamp>),
211 (&HgPath, Option<Timestamp>),
208 DirstateV2ParseError,
212 DirstateV2ParseError,
209 >,
213 >,
210 > + Send
214 > + Send
211 + '_,
215 + '_,
212 > {
216 > {
213 self.get().iter_directories()
217 self.get().iter_directories()
214 }
218 }
215 }
219 }
General Comments 0
You need to be logged in to leave comments. Login now