##// END OF EJS Templates
dirstate: move parent state handling in the dirstatemap...
marmoute -
r48873:5d68c4ee default
parent child Browse files
Show More
@@ -1,1549 +1,1520
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = dirstatemap.DirstateItem
48 DirstateItem = dirstatemap.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._mapcls = dirstatemap.dirstatemap
133 self._mapcls = dirstatemap.dirstatemap
134 # Access and cache cwd early, so we don't access it for the first time
134 # Access and cache cwd early, so we don't access it for the first time
135 # after a working-copy update caused it to not exist (accessing it then
135 # after a working-copy update caused it to not exist (accessing it then
136 # raises an exception).
136 # raises an exception).
137 self._cwd
137 self._cwd
138
138
139 def prefetch_parents(self):
139 def prefetch_parents(self):
140 """make sure the parents are loaded
140 """make sure the parents are loaded
141
141
142 Used to avoid a race condition.
142 Used to avoid a race condition.
143 """
143 """
144 self._pl
144 self._pl
145
145
146 @contextlib.contextmanager
146 @contextlib.contextmanager
147 def parentchange(self):
147 def parentchange(self):
148 """Context manager for handling dirstate parents.
148 """Context manager for handling dirstate parents.
149
149
150 If an exception occurs in the scope of the context manager,
150 If an exception occurs in the scope of the context manager,
151 the incoherent dirstate won't be written when wlock is
151 the incoherent dirstate won't be written when wlock is
152 released.
152 released.
153 """
153 """
154 self._parentwriters += 1
154 self._parentwriters += 1
155 yield
155 yield
156 # Typically we want the "undo" step of a context manager in a
156 # Typically we want the "undo" step of a context manager in a
157 # finally block so it happens even when an exception
157 # finally block so it happens even when an exception
158 # occurs. In this case, however, we only want to decrement
158 # occurs. In this case, however, we only want to decrement
159 # parentwriters if the code in the with statement exits
159 # parentwriters if the code in the with statement exits
160 # normally, so we don't have a try/finally here on purpose.
160 # normally, so we don't have a try/finally here on purpose.
161 self._parentwriters -= 1
161 self._parentwriters -= 1
162
162
163 def pendingparentchange(self):
163 def pendingparentchange(self):
164 """Returns true if the dirstate is in the middle of a set of changes
164 """Returns true if the dirstate is in the middle of a set of changes
165 that modify the dirstate parent.
165 that modify the dirstate parent.
166 """
166 """
167 return self._parentwriters > 0
167 return self._parentwriters > 0
168
168
169 @propertycache
169 @propertycache
170 def _map(self):
170 def _map(self):
171 """Return the dirstate contents (see documentation for dirstatemap)."""
171 """Return the dirstate contents (see documentation for dirstatemap)."""
172 self._map = self._mapcls(
172 self._map = self._mapcls(
173 self._ui,
173 self._ui,
174 self._opener,
174 self._opener,
175 self._root,
175 self._root,
176 self._nodeconstants,
176 self._nodeconstants,
177 self._use_dirstate_v2,
177 self._use_dirstate_v2,
178 )
178 )
179 return self._map
179 return self._map
180
180
181 @property
181 @property
182 def _sparsematcher(self):
182 def _sparsematcher(self):
183 """The matcher for the sparse checkout.
183 """The matcher for the sparse checkout.
184
184
185 The working directory may not include every file from a manifest. The
185 The working directory may not include every file from a manifest. The
186 matcher obtained by this property will match a path if it is to be
186 matcher obtained by this property will match a path if it is to be
187 included in the working directory.
187 included in the working directory.
188 """
188 """
189 # TODO there is potential to cache this property. For now, the matcher
189 # TODO there is potential to cache this property. For now, the matcher
190 # is resolved on every access. (But the called function does use a
190 # is resolved on every access. (But the called function does use a
191 # cache to keep the lookup fast.)
191 # cache to keep the lookup fast.)
192 return self._sparsematchfn()
192 return self._sparsematchfn()
193
193
194 @repocache(b'branch')
194 @repocache(b'branch')
195 def _branch(self):
195 def _branch(self):
196 try:
196 try:
197 return self._opener.read(b"branch").strip() or b"default"
197 return self._opener.read(b"branch").strip() or b"default"
198 except IOError as inst:
198 except IOError as inst:
199 if inst.errno != errno.ENOENT:
199 if inst.errno != errno.ENOENT:
200 raise
200 raise
201 return b"default"
201 return b"default"
202
202
203 @property
203 @property
204 def _pl(self):
204 def _pl(self):
205 return self._map.parents()
205 return self._map.parents()
206
206
207 def hasdir(self, d):
207 def hasdir(self, d):
208 return self._map.hastrackeddir(d)
208 return self._map.hastrackeddir(d)
209
209
210 @rootcache(b'.hgignore')
210 @rootcache(b'.hgignore')
211 def _ignore(self):
211 def _ignore(self):
212 files = self._ignorefiles()
212 files = self._ignorefiles()
213 if not files:
213 if not files:
214 return matchmod.never()
214 return matchmod.never()
215
215
216 pats = [b'include:%s' % f for f in files]
216 pats = [b'include:%s' % f for f in files]
217 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
217 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218
218
219 @propertycache
219 @propertycache
220 def _slash(self):
220 def _slash(self):
221 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
221 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222
222
223 @propertycache
223 @propertycache
224 def _checklink(self):
224 def _checklink(self):
225 return util.checklink(self._root)
225 return util.checklink(self._root)
226
226
227 @propertycache
227 @propertycache
228 def _checkexec(self):
228 def _checkexec(self):
229 return bool(util.checkexec(self._root))
229 return bool(util.checkexec(self._root))
230
230
231 @propertycache
231 @propertycache
232 def _checkcase(self):
232 def _checkcase(self):
233 return not util.fscasesensitive(self._join(b'.hg'))
233 return not util.fscasesensitive(self._join(b'.hg'))
234
234
235 def _join(self, f):
235 def _join(self, f):
236 # much faster than os.path.join()
236 # much faster than os.path.join()
237 # it's safe because f is always a relative path
237 # it's safe because f is always a relative path
238 return self._rootdir + f
238 return self._rootdir + f
239
239
240 def flagfunc(self, buildfallback):
240 def flagfunc(self, buildfallback):
241 if self._checklink and self._checkexec:
241 if self._checklink and self._checkexec:
242
242
243 def f(x):
243 def f(x):
244 try:
244 try:
245 st = os.lstat(self._join(x))
245 st = os.lstat(self._join(x))
246 if util.statislink(st):
246 if util.statislink(st):
247 return b'l'
247 return b'l'
248 if util.statisexec(st):
248 if util.statisexec(st):
249 return b'x'
249 return b'x'
250 except OSError:
250 except OSError:
251 pass
251 pass
252 return b''
252 return b''
253
253
254 return f
254 return f
255
255
256 fallback = buildfallback()
256 fallback = buildfallback()
257 if self._checklink:
257 if self._checklink:
258
258
259 def f(x):
259 def f(x):
260 if os.path.islink(self._join(x)):
260 if os.path.islink(self._join(x)):
261 return b'l'
261 return b'l'
262 if b'x' in fallback(x):
262 if b'x' in fallback(x):
263 return b'x'
263 return b'x'
264 return b''
264 return b''
265
265
266 return f
266 return f
267 if self._checkexec:
267 if self._checkexec:
268
268
269 def f(x):
269 def f(x):
270 if b'l' in fallback(x):
270 if b'l' in fallback(x):
271 return b'l'
271 return b'l'
272 if util.isexec(self._join(x)):
272 if util.isexec(self._join(x)):
273 return b'x'
273 return b'x'
274 return b''
274 return b''
275
275
276 return f
276 return f
277 else:
277 else:
278 return fallback
278 return fallback
279
279
280 @propertycache
280 @propertycache
281 def _cwd(self):
281 def _cwd(self):
282 # internal config: ui.forcecwd
282 # internal config: ui.forcecwd
283 forcecwd = self._ui.config(b'ui', b'forcecwd')
283 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 if forcecwd:
284 if forcecwd:
285 return forcecwd
285 return forcecwd
286 return encoding.getcwd()
286 return encoding.getcwd()
287
287
288 def getcwd(self):
288 def getcwd(self):
289 """Return the path from which a canonical path is calculated.
289 """Return the path from which a canonical path is calculated.
290
290
291 This path should be used to resolve file patterns or to convert
291 This path should be used to resolve file patterns or to convert
292 canonical paths back to file paths for display. It shouldn't be
292 canonical paths back to file paths for display. It shouldn't be
293 used to get real file paths. Use vfs functions instead.
293 used to get real file paths. Use vfs functions instead.
294 """
294 """
295 cwd = self._cwd
295 cwd = self._cwd
296 if cwd == self._root:
296 if cwd == self._root:
297 return b''
297 return b''
298 # self._root ends with a path separator if self._root is '/' or 'C:\'
298 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 rootsep = self._root
299 rootsep = self._root
300 if not util.endswithsep(rootsep):
300 if not util.endswithsep(rootsep):
301 rootsep += pycompat.ossep
301 rootsep += pycompat.ossep
302 if cwd.startswith(rootsep):
302 if cwd.startswith(rootsep):
303 return cwd[len(rootsep) :]
303 return cwd[len(rootsep) :]
304 else:
304 else:
305 # we're outside the repo. return an absolute path.
305 # we're outside the repo. return an absolute path.
306 return cwd
306 return cwd
307
307
308 def pathto(self, f, cwd=None):
308 def pathto(self, f, cwd=None):
309 if cwd is None:
309 if cwd is None:
310 cwd = self.getcwd()
310 cwd = self.getcwd()
311 path = util.pathto(self._root, cwd, f)
311 path = util.pathto(self._root, cwd, f)
312 if self._slash:
312 if self._slash:
313 return util.pconvert(path)
313 return util.pconvert(path)
314 return path
314 return path
315
315
316 def __getitem__(self, key):
316 def __getitem__(self, key):
317 """Return the current state of key (a filename) in the dirstate.
317 """Return the current state of key (a filename) in the dirstate.
318
318
319 States are:
319 States are:
320 n normal
320 n normal
321 m needs merging
321 m needs merging
322 r marked for removal
322 r marked for removal
323 a marked for addition
323 a marked for addition
324 ? not tracked
324 ? not tracked
325
325
326 XXX The "state" is a bit obscure to be in the "public" API. we should
326 XXX The "state" is a bit obscure to be in the "public" API. we should
327 consider migrating all user of this to going through the dirstate entry
327 consider migrating all user of this to going through the dirstate entry
328 instead.
328 instead.
329 """
329 """
330 entry = self._map.get(key)
330 entry = self._map.get(key)
331 if entry is not None:
331 if entry is not None:
332 return entry.state
332 return entry.state
333 return b'?'
333 return b'?'
334
334
335 def __contains__(self, key):
335 def __contains__(self, key):
336 return key in self._map
336 return key in self._map
337
337
338 def __iter__(self):
338 def __iter__(self):
339 return iter(sorted(self._map))
339 return iter(sorted(self._map))
340
340
341 def items(self):
341 def items(self):
342 return pycompat.iteritems(self._map)
342 return pycompat.iteritems(self._map)
343
343
344 iteritems = items
344 iteritems = items
345
345
346 def parents(self):
346 def parents(self):
347 return [self._validate(p) for p in self._pl]
347 return [self._validate(p) for p in self._pl]
348
348
349 def p1(self):
349 def p1(self):
350 return self._validate(self._pl[0])
350 return self._validate(self._pl[0])
351
351
352 def p2(self):
352 def p2(self):
353 return self._validate(self._pl[1])
353 return self._validate(self._pl[1])
354
354
355 @property
355 @property
356 def in_merge(self):
356 def in_merge(self):
357 """True if a merge is in progress"""
357 """True if a merge is in progress"""
358 return self._pl[1] != self._nodeconstants.nullid
358 return self._pl[1] != self._nodeconstants.nullid
359
359
360 def branch(self):
360 def branch(self):
361 return encoding.tolocal(self._branch)
361 return encoding.tolocal(self._branch)
362
362
363 def setparents(self, p1, p2=None):
363 def setparents(self, p1, p2=None):
364 """Set dirstate parents to p1 and p2.
364 """Set dirstate parents to p1 and p2.
365
365
366 When moving from two parents to one, "merged" entries a
366 When moving from two parents to one, "merged" entries a
367 adjusted to normal and previous copy records discarded and
367 adjusted to normal and previous copy records discarded and
368 returned by the call.
368 returned by the call.
369
369
370 See localrepo.setparents()
370 See localrepo.setparents()
371 """
371 """
372 if p2 is None:
372 if p2 is None:
373 p2 = self._nodeconstants.nullid
373 p2 = self._nodeconstants.nullid
374 if self._parentwriters == 0:
374 if self._parentwriters == 0:
375 raise ValueError(
375 raise ValueError(
376 b"cannot set dirstate parent outside of "
376 b"cannot set dirstate parent outside of "
377 b"dirstate.parentchange context manager"
377 b"dirstate.parentchange context manager"
378 )
378 )
379
379
380 self._dirty = True
380 self._dirty = True
381 oldp2 = self._pl[1]
381 oldp2 = self._pl[1]
382 if self._origpl is None:
382 if self._origpl is None:
383 self._origpl = self._pl
383 self._origpl = self._pl
384 self._map.setparents(p1, p2)
385 copies = {}
386 nullid = self._nodeconstants.nullid
384 nullid = self._nodeconstants.nullid
387 if oldp2 != nullid and p2 == nullid:
385 # True if we need to fold p2 related state back to a linear case
388 candidatefiles = self._map.non_normal_or_other_parent_paths()
386 fold_p2 = oldp2 != nullid and p2 == nullid
389
387 return self._map.setparents(p1, p2, fold_p2=fold_p2)
390 for f in candidatefiles:
391 s = self._map.get(f)
392 if s is None:
393 continue
394
395 # Discard "merged" markers when moving away from a merge state
396 if s.merged:
397 source = self._map.copymap.get(f)
398 if source:
399 copies[f] = source
400 self._map.reset_state(
401 f,
402 wc_tracked=True,
403 p1_tracked=True,
404 possibly_dirty=True,
405 )
406 # Also fix up otherparent markers
407 elif s.from_p2:
408 source = self._map.copymap.get(f)
409 if source:
410 copies[f] = source
411 self._map.reset_state(
412 f,
413 p1_tracked=False,
414 wc_tracked=True,
415 )
416 return copies
417
388
418 def setbranch(self, branch):
389 def setbranch(self, branch):
419 self.__class__._branch.set(self, encoding.fromlocal(branch))
390 self.__class__._branch.set(self, encoding.fromlocal(branch))
420 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
391 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
421 try:
392 try:
422 f.write(self._branch + b'\n')
393 f.write(self._branch + b'\n')
423 f.close()
394 f.close()
424
395
425 # make sure filecache has the correct stat info for _branch after
396 # make sure filecache has the correct stat info for _branch after
426 # replacing the underlying file
397 # replacing the underlying file
427 ce = self._filecache[b'_branch']
398 ce = self._filecache[b'_branch']
428 if ce:
399 if ce:
429 ce.refresh()
400 ce.refresh()
430 except: # re-raises
401 except: # re-raises
431 f.discard()
402 f.discard()
432 raise
403 raise
433
404
434 def invalidate(self):
405 def invalidate(self):
435 """Causes the next access to reread the dirstate.
406 """Causes the next access to reread the dirstate.
436
407
437 This is different from localrepo.invalidatedirstate() because it always
408 This is different from localrepo.invalidatedirstate() because it always
438 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
409 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
439 check whether the dirstate has changed before rereading it."""
410 check whether the dirstate has changed before rereading it."""
440
411
441 for a in ("_map", "_branch", "_ignore"):
412 for a in ("_map", "_branch", "_ignore"):
442 if a in self.__dict__:
413 if a in self.__dict__:
443 delattr(self, a)
414 delattr(self, a)
444 self._lastnormaltime = 0
415 self._lastnormaltime = 0
445 self._dirty = False
416 self._dirty = False
446 self._parentwriters = 0
417 self._parentwriters = 0
447 self._origpl = None
418 self._origpl = None
448
419
449 def copy(self, source, dest):
420 def copy(self, source, dest):
450 """Mark dest as a copy of source. Unmark dest if source is None."""
421 """Mark dest as a copy of source. Unmark dest if source is None."""
451 if source == dest:
422 if source == dest:
452 return
423 return
453 self._dirty = True
424 self._dirty = True
454 if source is not None:
425 if source is not None:
455 self._map.copymap[dest] = source
426 self._map.copymap[dest] = source
456 else:
427 else:
457 self._map.copymap.pop(dest, None)
428 self._map.copymap.pop(dest, None)
458
429
459 def copied(self, file):
430 def copied(self, file):
460 return self._map.copymap.get(file, None)
431 return self._map.copymap.get(file, None)
461
432
462 def copies(self):
433 def copies(self):
463 return self._map.copymap
434 return self._map.copymap
464
435
465 @requires_no_parents_change
436 @requires_no_parents_change
466 def set_tracked(self, filename):
437 def set_tracked(self, filename):
467 """a "public" method for generic code to mark a file as tracked
438 """a "public" method for generic code to mark a file as tracked
468
439
469 This function is to be called outside of "update/merge" case. For
440 This function is to be called outside of "update/merge" case. For
470 example by a command like `hg add X`.
441 example by a command like `hg add X`.
471
442
472 return True the file was previously untracked, False otherwise.
443 return True the file was previously untracked, False otherwise.
473 """
444 """
474 self._dirty = True
445 self._dirty = True
475 entry = self._map.get(filename)
446 entry = self._map.get(filename)
476 if entry is None or not entry.tracked:
447 if entry is None or not entry.tracked:
477 self._check_new_tracked_filename(filename)
448 self._check_new_tracked_filename(filename)
478 return self._map.set_tracked(filename)
449 return self._map.set_tracked(filename)
479
450
480 @requires_no_parents_change
451 @requires_no_parents_change
481 def set_untracked(self, filename):
452 def set_untracked(self, filename):
482 """a "public" method for generic code to mark a file as untracked
453 """a "public" method for generic code to mark a file as untracked
483
454
484 This function is to be called outside of "update/merge" case. For
455 This function is to be called outside of "update/merge" case. For
485 example by a command like `hg remove X`.
456 example by a command like `hg remove X`.
486
457
487 return True the file was previously tracked, False otherwise.
458 return True the file was previously tracked, False otherwise.
488 """
459 """
489 ret = self._map.set_untracked(filename)
460 ret = self._map.set_untracked(filename)
490 if ret:
461 if ret:
491 self._dirty = True
462 self._dirty = True
492 return ret
463 return ret
493
464
494 @requires_no_parents_change
465 @requires_no_parents_change
495 def set_clean(self, filename, parentfiledata=None):
466 def set_clean(self, filename, parentfiledata=None):
496 """record that the current state of the file on disk is known to be clean"""
467 """record that the current state of the file on disk is known to be clean"""
497 self._dirty = True
468 self._dirty = True
498 if parentfiledata:
469 if parentfiledata:
499 (mode, size, mtime) = parentfiledata
470 (mode, size, mtime) = parentfiledata
500 else:
471 else:
501 (mode, size, mtime) = self._get_filedata(filename)
472 (mode, size, mtime) = self._get_filedata(filename)
502 if not self._map[filename].tracked:
473 if not self._map[filename].tracked:
503 self._check_new_tracked_filename(filename)
474 self._check_new_tracked_filename(filename)
504 self._map.set_clean(filename, mode, size, mtime)
475 self._map.set_clean(filename, mode, size, mtime)
505 if mtime > self._lastnormaltime:
476 if mtime > self._lastnormaltime:
506 # Remember the most recent modification timeslot for status(),
477 # Remember the most recent modification timeslot for status(),
507 # to make sure we won't miss future size-preserving file content
478 # to make sure we won't miss future size-preserving file content
508 # modifications that happen within the same timeslot.
479 # modifications that happen within the same timeslot.
509 self._lastnormaltime = mtime
480 self._lastnormaltime = mtime
510
481
511 @requires_no_parents_change
482 @requires_no_parents_change
512 def set_possibly_dirty(self, filename):
483 def set_possibly_dirty(self, filename):
513 """record that the current state of the file on disk is unknown"""
484 """record that the current state of the file on disk is unknown"""
514 self._dirty = True
485 self._dirty = True
515 self._map.set_possibly_dirty(filename)
486 self._map.set_possibly_dirty(filename)
516
487
517 @requires_parents_change
488 @requires_parents_change
518 def update_file_p1(
489 def update_file_p1(
519 self,
490 self,
520 filename,
491 filename,
521 p1_tracked,
492 p1_tracked,
522 ):
493 ):
523 """Set a file as tracked in the parent (or not)
494 """Set a file as tracked in the parent (or not)
524
495
525 This is to be called when adjust the dirstate to a new parent after an history
496 This is to be called when adjust the dirstate to a new parent after an history
526 rewriting operation.
497 rewriting operation.
527
498
528 It should not be called during a merge (p2 != nullid) and only within
499 It should not be called during a merge (p2 != nullid) and only within
529 a `with dirstate.parentchange():` context.
500 a `with dirstate.parentchange():` context.
530 """
501 """
531 if self.in_merge:
502 if self.in_merge:
532 msg = b'update_file_reference should not be called when merging'
503 msg = b'update_file_reference should not be called when merging'
533 raise error.ProgrammingError(msg)
504 raise error.ProgrammingError(msg)
534 entry = self._map.get(filename)
505 entry = self._map.get(filename)
535 if entry is None:
506 if entry is None:
536 wc_tracked = False
507 wc_tracked = False
537 else:
508 else:
538 wc_tracked = entry.tracked
509 wc_tracked = entry.tracked
539 possibly_dirty = False
510 possibly_dirty = False
540 if p1_tracked and wc_tracked:
511 if p1_tracked and wc_tracked:
541 # the underlying reference might have changed, we will have to
512 # the underlying reference might have changed, we will have to
542 # check it.
513 # check it.
543 possibly_dirty = True
514 possibly_dirty = True
544 elif not (p1_tracked or wc_tracked):
515 elif not (p1_tracked or wc_tracked):
545 # the file is no longer relevant to anyone
516 # the file is no longer relevant to anyone
546 if self._map.get(filename) is not None:
517 if self._map.get(filename) is not None:
547 self._map.reset_state(filename)
518 self._map.reset_state(filename)
548 self._dirty = True
519 self._dirty = True
549 elif (not p1_tracked) and wc_tracked:
520 elif (not p1_tracked) and wc_tracked:
550 if entry is not None and entry.added:
521 if entry is not None and entry.added:
551 return # avoid dropping copy information (maybe?)
522 return # avoid dropping copy information (maybe?)
552 elif p1_tracked and not wc_tracked:
523 elif p1_tracked and not wc_tracked:
553 pass
524 pass
554 else:
525 else:
555 assert False, 'unreachable'
526 assert False, 'unreachable'
556
527
557 # this mean we are doing call for file we do not really care about the
528 # this mean we are doing call for file we do not really care about the
558 # data (eg: added or removed), however this should be a minor overhead
529 # data (eg: added or removed), however this should be a minor overhead
559 # compared to the overall update process calling this.
530 # compared to the overall update process calling this.
560 parentfiledata = None
531 parentfiledata = None
561 if wc_tracked:
532 if wc_tracked:
562 parentfiledata = self._get_filedata(filename)
533 parentfiledata = self._get_filedata(filename)
563
534
564 self._map.reset_state(
535 self._map.reset_state(
565 filename,
536 filename,
566 wc_tracked,
537 wc_tracked,
567 p1_tracked,
538 p1_tracked,
568 possibly_dirty=possibly_dirty,
539 possibly_dirty=possibly_dirty,
569 parentfiledata=parentfiledata,
540 parentfiledata=parentfiledata,
570 )
541 )
571 if (
542 if (
572 parentfiledata is not None
543 parentfiledata is not None
573 and parentfiledata[2] > self._lastnormaltime
544 and parentfiledata[2] > self._lastnormaltime
574 ):
545 ):
575 # Remember the most recent modification timeslot for status(),
546 # Remember the most recent modification timeslot for status(),
576 # to make sure we won't miss future size-preserving file content
547 # to make sure we won't miss future size-preserving file content
577 # modifications that happen within the same timeslot.
548 # modifications that happen within the same timeslot.
578 self._lastnormaltime = parentfiledata[2]
549 self._lastnormaltime = parentfiledata[2]
579
550
580 @requires_parents_change
551 @requires_parents_change
581 def update_file(
552 def update_file(
582 self,
553 self,
583 filename,
554 filename,
584 wc_tracked,
555 wc_tracked,
585 p1_tracked,
556 p1_tracked,
586 p2_tracked=False,
557 p2_tracked=False,
587 merged=False,
558 merged=False,
588 clean_p1=False,
559 clean_p1=False,
589 clean_p2=False,
560 clean_p2=False,
590 possibly_dirty=False,
561 possibly_dirty=False,
591 parentfiledata=None,
562 parentfiledata=None,
592 ):
563 ):
593 """update the information about a file in the dirstate
564 """update the information about a file in the dirstate
594
565
595 This is to be called when the direstates parent changes to keep track
566 This is to be called when the direstates parent changes to keep track
596 of what is the file situation in regards to the working copy and its parent.
567 of what is the file situation in regards to the working copy and its parent.
597
568
598 This function must be called within a `dirstate.parentchange` context.
569 This function must be called within a `dirstate.parentchange` context.
599
570
600 note: the API is at an early stage and we might need to adjust it
571 note: the API is at an early stage and we might need to adjust it
601 depending of what information ends up being relevant and useful to
572 depending of what information ends up being relevant and useful to
602 other processing.
573 other processing.
603 """
574 """
604 if merged and (clean_p1 or clean_p2):
575 if merged and (clean_p1 or clean_p2):
605 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
576 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
606 raise error.ProgrammingError(msg)
577 raise error.ProgrammingError(msg)
607
578
608 # note: I do not think we need to double check name clash here since we
579 # note: I do not think we need to double check name clash here since we
609 # are in a update/merge case that should already have taken care of
580 # are in a update/merge case that should already have taken care of
610 # this. The test agrees
581 # this. The test agrees
611
582
612 self._dirty = True
583 self._dirty = True
613
584
614 need_parent_file_data = (
585 need_parent_file_data = (
615 not (possibly_dirty or clean_p2 or merged)
586 not (possibly_dirty or clean_p2 or merged)
616 and wc_tracked
587 and wc_tracked
617 and p1_tracked
588 and p1_tracked
618 )
589 )
619
590
620 # this mean we are doing call for file we do not really care about the
591 # this mean we are doing call for file we do not really care about the
621 # data (eg: added or removed), however this should be a minor overhead
592 # data (eg: added or removed), however this should be a minor overhead
622 # compared to the overall update process calling this.
593 # compared to the overall update process calling this.
623 if need_parent_file_data:
594 if need_parent_file_data:
624 if parentfiledata is None:
595 if parentfiledata is None:
625 parentfiledata = self._get_filedata(filename)
596 parentfiledata = self._get_filedata(filename)
626 mtime = parentfiledata[2]
597 mtime = parentfiledata[2]
627
598
628 if mtime > self._lastnormaltime:
599 if mtime > self._lastnormaltime:
629 # Remember the most recent modification timeslot for
600 # Remember the most recent modification timeslot for
630 # status(), to make sure we won't miss future
601 # status(), to make sure we won't miss future
631 # size-preserving file content modifications that happen
602 # size-preserving file content modifications that happen
632 # within the same timeslot.
603 # within the same timeslot.
633 self._lastnormaltime = mtime
604 self._lastnormaltime = mtime
634
605
635 self._map.reset_state(
606 self._map.reset_state(
636 filename,
607 filename,
637 wc_tracked,
608 wc_tracked,
638 p1_tracked,
609 p1_tracked,
639 p2_tracked=p2_tracked,
610 p2_tracked=p2_tracked,
640 merged=merged,
611 merged=merged,
641 clean_p1=clean_p1,
612 clean_p1=clean_p1,
642 clean_p2=clean_p2,
613 clean_p2=clean_p2,
643 possibly_dirty=possibly_dirty,
614 possibly_dirty=possibly_dirty,
644 parentfiledata=parentfiledata,
615 parentfiledata=parentfiledata,
645 )
616 )
646 if (
617 if (
647 parentfiledata is not None
618 parentfiledata is not None
648 and parentfiledata[2] > self._lastnormaltime
619 and parentfiledata[2] > self._lastnormaltime
649 ):
620 ):
650 # Remember the most recent modification timeslot for status(),
621 # Remember the most recent modification timeslot for status(),
651 # to make sure we won't miss future size-preserving file content
622 # to make sure we won't miss future size-preserving file content
652 # modifications that happen within the same timeslot.
623 # modifications that happen within the same timeslot.
653 self._lastnormaltime = parentfiledata[2]
624 self._lastnormaltime = parentfiledata[2]
654
625
655 def _check_new_tracked_filename(self, filename):
626 def _check_new_tracked_filename(self, filename):
656 scmutil.checkfilename(filename)
627 scmutil.checkfilename(filename)
657 if self._map.hastrackeddir(filename):
628 if self._map.hastrackeddir(filename):
658 msg = _(b'directory %r already in dirstate')
629 msg = _(b'directory %r already in dirstate')
659 msg %= pycompat.bytestr(filename)
630 msg %= pycompat.bytestr(filename)
660 raise error.Abort(msg)
631 raise error.Abort(msg)
661 # shadows
632 # shadows
662 for d in pathutil.finddirs(filename):
633 for d in pathutil.finddirs(filename):
663 if self._map.hastrackeddir(d):
634 if self._map.hastrackeddir(d):
664 break
635 break
665 entry = self._map.get(d)
636 entry = self._map.get(d)
666 if entry is not None and not entry.removed:
637 if entry is not None and not entry.removed:
667 msg = _(b'file %r in dirstate clashes with %r')
638 msg = _(b'file %r in dirstate clashes with %r')
668 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
639 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
669 raise error.Abort(msg)
640 raise error.Abort(msg)
670
641
671 def _get_filedata(self, filename):
642 def _get_filedata(self, filename):
672 """returns"""
643 """returns"""
673 s = os.lstat(self._join(filename))
644 s = os.lstat(self._join(filename))
674 mode = s.st_mode
645 mode = s.st_mode
675 size = s.st_size
646 size = s.st_size
676 mtime = s[stat.ST_MTIME]
647 mtime = s[stat.ST_MTIME]
677 return (mode, size, mtime)
648 return (mode, size, mtime)
678
649
679 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
650 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
680 if exists is None:
651 if exists is None:
681 exists = os.path.lexists(os.path.join(self._root, path))
652 exists = os.path.lexists(os.path.join(self._root, path))
682 if not exists:
653 if not exists:
683 # Maybe a path component exists
654 # Maybe a path component exists
684 if not ignoremissing and b'/' in path:
655 if not ignoremissing and b'/' in path:
685 d, f = path.rsplit(b'/', 1)
656 d, f = path.rsplit(b'/', 1)
686 d = self._normalize(d, False, ignoremissing, None)
657 d = self._normalize(d, False, ignoremissing, None)
687 folded = d + b"/" + f
658 folded = d + b"/" + f
688 else:
659 else:
689 # No path components, preserve original case
660 # No path components, preserve original case
690 folded = path
661 folded = path
691 else:
662 else:
692 # recursively normalize leading directory components
663 # recursively normalize leading directory components
693 # against dirstate
664 # against dirstate
694 if b'/' in normed:
665 if b'/' in normed:
695 d, f = normed.rsplit(b'/', 1)
666 d, f = normed.rsplit(b'/', 1)
696 d = self._normalize(d, False, ignoremissing, True)
667 d = self._normalize(d, False, ignoremissing, True)
697 r = self._root + b"/" + d
668 r = self._root + b"/" + d
698 folded = d + b"/" + util.fspath(f, r)
669 folded = d + b"/" + util.fspath(f, r)
699 else:
670 else:
700 folded = util.fspath(normed, self._root)
671 folded = util.fspath(normed, self._root)
701 storemap[normed] = folded
672 storemap[normed] = folded
702
673
703 return folded
674 return folded
704
675
705 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
676 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
706 normed = util.normcase(path)
677 normed = util.normcase(path)
707 folded = self._map.filefoldmap.get(normed, None)
678 folded = self._map.filefoldmap.get(normed, None)
708 if folded is None:
679 if folded is None:
709 if isknown:
680 if isknown:
710 folded = path
681 folded = path
711 else:
682 else:
712 folded = self._discoverpath(
683 folded = self._discoverpath(
713 path, normed, ignoremissing, exists, self._map.filefoldmap
684 path, normed, ignoremissing, exists, self._map.filefoldmap
714 )
685 )
715 return folded
686 return folded
716
687
717 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
688 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
718 normed = util.normcase(path)
689 normed = util.normcase(path)
719 folded = self._map.filefoldmap.get(normed, None)
690 folded = self._map.filefoldmap.get(normed, None)
720 if folded is None:
691 if folded is None:
721 folded = self._map.dirfoldmap.get(normed, None)
692 folded = self._map.dirfoldmap.get(normed, None)
722 if folded is None:
693 if folded is None:
723 if isknown:
694 if isknown:
724 folded = path
695 folded = path
725 else:
696 else:
726 # store discovered result in dirfoldmap so that future
697 # store discovered result in dirfoldmap so that future
727 # normalizefile calls don't start matching directories
698 # normalizefile calls don't start matching directories
728 folded = self._discoverpath(
699 folded = self._discoverpath(
729 path, normed, ignoremissing, exists, self._map.dirfoldmap
700 path, normed, ignoremissing, exists, self._map.dirfoldmap
730 )
701 )
731 return folded
702 return folded
732
703
733 def normalize(self, path, isknown=False, ignoremissing=False):
704 def normalize(self, path, isknown=False, ignoremissing=False):
734 """
705 """
735 normalize the case of a pathname when on a casefolding filesystem
706 normalize the case of a pathname when on a casefolding filesystem
736
707
737 isknown specifies whether the filename came from walking the
708 isknown specifies whether the filename came from walking the
738 disk, to avoid extra filesystem access.
709 disk, to avoid extra filesystem access.
739
710
740 If ignoremissing is True, missing path are returned
711 If ignoremissing is True, missing path are returned
741 unchanged. Otherwise, we try harder to normalize possibly
712 unchanged. Otherwise, we try harder to normalize possibly
742 existing path components.
713 existing path components.
743
714
744 The normalized case is determined based on the following precedence:
715 The normalized case is determined based on the following precedence:
745
716
746 - version of name already stored in the dirstate
717 - version of name already stored in the dirstate
747 - version of name stored on disk
718 - version of name stored on disk
748 - version provided via command arguments
719 - version provided via command arguments
749 """
720 """
750
721
751 if self._checkcase:
722 if self._checkcase:
752 return self._normalize(path, isknown, ignoremissing)
723 return self._normalize(path, isknown, ignoremissing)
753 return path
724 return path
754
725
755 def clear(self):
726 def clear(self):
756 self._map.clear()
727 self._map.clear()
757 self._lastnormaltime = 0
728 self._lastnormaltime = 0
758 self._dirty = True
729 self._dirty = True
759
730
760 def rebuild(self, parent, allfiles, changedfiles=None):
731 def rebuild(self, parent, allfiles, changedfiles=None):
761 if changedfiles is None:
732 if changedfiles is None:
762 # Rebuild entire dirstate
733 # Rebuild entire dirstate
763 to_lookup = allfiles
734 to_lookup = allfiles
764 to_drop = []
735 to_drop = []
765 lastnormaltime = self._lastnormaltime
736 lastnormaltime = self._lastnormaltime
766 self.clear()
737 self.clear()
767 self._lastnormaltime = lastnormaltime
738 self._lastnormaltime = lastnormaltime
768 elif len(changedfiles) < 10:
739 elif len(changedfiles) < 10:
769 # Avoid turning allfiles into a set, which can be expensive if it's
740 # Avoid turning allfiles into a set, which can be expensive if it's
770 # large.
741 # large.
771 to_lookup = []
742 to_lookup = []
772 to_drop = []
743 to_drop = []
773 for f in changedfiles:
744 for f in changedfiles:
774 if f in allfiles:
745 if f in allfiles:
775 to_lookup.append(f)
746 to_lookup.append(f)
776 else:
747 else:
777 to_drop.append(f)
748 to_drop.append(f)
778 else:
749 else:
779 changedfilesset = set(changedfiles)
750 changedfilesset = set(changedfiles)
780 to_lookup = changedfilesset & set(allfiles)
751 to_lookup = changedfilesset & set(allfiles)
781 to_drop = changedfilesset - to_lookup
752 to_drop = changedfilesset - to_lookup
782
753
783 if self._origpl is None:
754 if self._origpl is None:
784 self._origpl = self._pl
755 self._origpl = self._pl
785 self._map.setparents(parent, self._nodeconstants.nullid)
756 self._map.setparents(parent, self._nodeconstants.nullid)
786
757
787 for f in to_lookup:
758 for f in to_lookup:
788
759
789 if self.in_merge:
760 if self.in_merge:
790 self.set_tracked(f)
761 self.set_tracked(f)
791 else:
762 else:
792 self._map.reset_state(
763 self._map.reset_state(
793 f,
764 f,
794 wc_tracked=True,
765 wc_tracked=True,
795 p1_tracked=True,
766 p1_tracked=True,
796 possibly_dirty=True,
767 possibly_dirty=True,
797 )
768 )
798 for f in to_drop:
769 for f in to_drop:
799 self._map.reset_state(f)
770 self._map.reset_state(f)
800
771
801 self._dirty = True
772 self._dirty = True
802
773
803 def identity(self):
774 def identity(self):
804 """Return identity of dirstate itself to detect changing in storage
775 """Return identity of dirstate itself to detect changing in storage
805
776
806 If identity of previous dirstate is equal to this, writing
777 If identity of previous dirstate is equal to this, writing
807 changes based on the former dirstate out can keep consistency.
778 changes based on the former dirstate out can keep consistency.
808 """
779 """
809 return self._map.identity
780 return self._map.identity
810
781
811 def write(self, tr):
782 def write(self, tr):
812 if not self._dirty:
783 if not self._dirty:
813 return
784 return
814
785
815 filename = self._filename
786 filename = self._filename
816 if tr:
787 if tr:
817 # 'dirstate.write()' is not only for writing in-memory
788 # 'dirstate.write()' is not only for writing in-memory
818 # changes out, but also for dropping ambiguous timestamp.
789 # changes out, but also for dropping ambiguous timestamp.
819 # delayed writing re-raise "ambiguous timestamp issue".
790 # delayed writing re-raise "ambiguous timestamp issue".
820 # See also the wiki page below for detail:
791 # See also the wiki page below for detail:
821 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
792 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
822
793
823 # record when mtime start to be ambiguous
794 # record when mtime start to be ambiguous
824 now = _getfsnow(self._opener)
795 now = _getfsnow(self._opener)
825
796
826 # delay writing in-memory changes out
797 # delay writing in-memory changes out
827 tr.addfilegenerator(
798 tr.addfilegenerator(
828 b'dirstate',
799 b'dirstate',
829 (self._filename,),
800 (self._filename,),
830 lambda f: self._writedirstate(tr, f, now=now),
801 lambda f: self._writedirstate(tr, f, now=now),
831 location=b'plain',
802 location=b'plain',
832 )
803 )
833 return
804 return
834
805
835 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
806 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
836 self._writedirstate(tr, st)
807 self._writedirstate(tr, st)
837
808
838 def addparentchangecallback(self, category, callback):
809 def addparentchangecallback(self, category, callback):
839 """add a callback to be called when the wd parents are changed
810 """add a callback to be called when the wd parents are changed
840
811
841 Callback will be called with the following arguments:
812 Callback will be called with the following arguments:
842 dirstate, (oldp1, oldp2), (newp1, newp2)
813 dirstate, (oldp1, oldp2), (newp1, newp2)
843
814
844 Category is a unique identifier to allow overwriting an old callback
815 Category is a unique identifier to allow overwriting an old callback
845 with a newer callback.
816 with a newer callback.
846 """
817 """
847 self._plchangecallbacks[category] = callback
818 self._plchangecallbacks[category] = callback
848
819
849 def _writedirstate(self, tr, st, now=None):
820 def _writedirstate(self, tr, st, now=None):
850 # notify callbacks about parents change
821 # notify callbacks about parents change
851 if self._origpl is not None and self._origpl != self._pl:
822 if self._origpl is not None and self._origpl != self._pl:
852 for c, callback in sorted(
823 for c, callback in sorted(
853 pycompat.iteritems(self._plchangecallbacks)
824 pycompat.iteritems(self._plchangecallbacks)
854 ):
825 ):
855 callback(self, self._origpl, self._pl)
826 callback(self, self._origpl, self._pl)
856 self._origpl = None
827 self._origpl = None
857
828
858 if now is None:
829 if now is None:
859 # use the modification time of the newly created temporary file as the
830 # use the modification time of the newly created temporary file as the
860 # filesystem's notion of 'now'
831 # filesystem's notion of 'now'
861 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
832 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
862
833
863 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
834 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
864 # timestamp of each entries in dirstate, because of 'now > mtime'
835 # timestamp of each entries in dirstate, because of 'now > mtime'
865 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
836 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
866 if delaywrite > 0:
837 if delaywrite > 0:
867 # do we have any files to delay for?
838 # do we have any files to delay for?
868 for f, e in pycompat.iteritems(self._map):
839 for f, e in pycompat.iteritems(self._map):
869 if e.need_delay(now):
840 if e.need_delay(now):
870 import time # to avoid useless import
841 import time # to avoid useless import
871
842
872 # rather than sleep n seconds, sleep until the next
843 # rather than sleep n seconds, sleep until the next
873 # multiple of n seconds
844 # multiple of n seconds
874 clock = time.time()
845 clock = time.time()
875 start = int(clock) - (int(clock) % delaywrite)
846 start = int(clock) - (int(clock) % delaywrite)
876 end = start + delaywrite
847 end = start + delaywrite
877 time.sleep(end - clock)
848 time.sleep(end - clock)
878 now = end # trust our estimate that the end is near now
849 now = end # trust our estimate that the end is near now
879 break
850 break
880
851
881 self._map.write(tr, st, now)
852 self._map.write(tr, st, now)
882 self._lastnormaltime = 0
853 self._lastnormaltime = 0
883 self._dirty = False
854 self._dirty = False
884
855
885 def _dirignore(self, f):
856 def _dirignore(self, f):
886 if self._ignore(f):
857 if self._ignore(f):
887 return True
858 return True
888 for p in pathutil.finddirs(f):
859 for p in pathutil.finddirs(f):
889 if self._ignore(p):
860 if self._ignore(p):
890 return True
861 return True
891 return False
862 return False
892
863
893 def _ignorefiles(self):
864 def _ignorefiles(self):
894 files = []
865 files = []
895 if os.path.exists(self._join(b'.hgignore')):
866 if os.path.exists(self._join(b'.hgignore')):
896 files.append(self._join(b'.hgignore'))
867 files.append(self._join(b'.hgignore'))
897 for name, path in self._ui.configitems(b"ui"):
868 for name, path in self._ui.configitems(b"ui"):
898 if name == b'ignore' or name.startswith(b'ignore.'):
869 if name == b'ignore' or name.startswith(b'ignore.'):
899 # we need to use os.path.join here rather than self._join
870 # we need to use os.path.join here rather than self._join
900 # because path is arbitrary and user-specified
871 # because path is arbitrary and user-specified
901 files.append(os.path.join(self._rootdir, util.expandpath(path)))
872 files.append(os.path.join(self._rootdir, util.expandpath(path)))
902 return files
873 return files
903
874
904 def _ignorefileandline(self, f):
875 def _ignorefileandline(self, f):
905 files = collections.deque(self._ignorefiles())
876 files = collections.deque(self._ignorefiles())
906 visited = set()
877 visited = set()
907 while files:
878 while files:
908 i = files.popleft()
879 i = files.popleft()
909 patterns = matchmod.readpatternfile(
880 patterns = matchmod.readpatternfile(
910 i, self._ui.warn, sourceinfo=True
881 i, self._ui.warn, sourceinfo=True
911 )
882 )
912 for pattern, lineno, line in patterns:
883 for pattern, lineno, line in patterns:
913 kind, p = matchmod._patsplit(pattern, b'glob')
884 kind, p = matchmod._patsplit(pattern, b'glob')
914 if kind == b"subinclude":
885 if kind == b"subinclude":
915 if p not in visited:
886 if p not in visited:
916 files.append(p)
887 files.append(p)
917 continue
888 continue
918 m = matchmod.match(
889 m = matchmod.match(
919 self._root, b'', [], [pattern], warn=self._ui.warn
890 self._root, b'', [], [pattern], warn=self._ui.warn
920 )
891 )
921 if m(f):
892 if m(f):
922 return (i, lineno, line)
893 return (i, lineno, line)
923 visited.add(i)
894 visited.add(i)
924 return (None, -1, b"")
895 return (None, -1, b"")
925
896
926 def _walkexplicit(self, match, subrepos):
897 def _walkexplicit(self, match, subrepos):
927 """Get stat data about the files explicitly specified by match.
898 """Get stat data about the files explicitly specified by match.
928
899
929 Return a triple (results, dirsfound, dirsnotfound).
900 Return a triple (results, dirsfound, dirsnotfound).
930 - results is a mapping from filename to stat result. It also contains
901 - results is a mapping from filename to stat result. It also contains
931 listings mapping subrepos and .hg to None.
902 listings mapping subrepos and .hg to None.
932 - dirsfound is a list of files found to be directories.
903 - dirsfound is a list of files found to be directories.
933 - dirsnotfound is a list of files that the dirstate thinks are
904 - dirsnotfound is a list of files that the dirstate thinks are
934 directories and that were not found."""
905 directories and that were not found."""
935
906
936 def badtype(mode):
907 def badtype(mode):
937 kind = _(b'unknown')
908 kind = _(b'unknown')
938 if stat.S_ISCHR(mode):
909 if stat.S_ISCHR(mode):
939 kind = _(b'character device')
910 kind = _(b'character device')
940 elif stat.S_ISBLK(mode):
911 elif stat.S_ISBLK(mode):
941 kind = _(b'block device')
912 kind = _(b'block device')
942 elif stat.S_ISFIFO(mode):
913 elif stat.S_ISFIFO(mode):
943 kind = _(b'fifo')
914 kind = _(b'fifo')
944 elif stat.S_ISSOCK(mode):
915 elif stat.S_ISSOCK(mode):
945 kind = _(b'socket')
916 kind = _(b'socket')
946 elif stat.S_ISDIR(mode):
917 elif stat.S_ISDIR(mode):
947 kind = _(b'directory')
918 kind = _(b'directory')
948 return _(b'unsupported file type (type is %s)') % kind
919 return _(b'unsupported file type (type is %s)') % kind
949
920
950 badfn = match.bad
921 badfn = match.bad
951 dmap = self._map
922 dmap = self._map
952 lstat = os.lstat
923 lstat = os.lstat
953 getkind = stat.S_IFMT
924 getkind = stat.S_IFMT
954 dirkind = stat.S_IFDIR
925 dirkind = stat.S_IFDIR
955 regkind = stat.S_IFREG
926 regkind = stat.S_IFREG
956 lnkkind = stat.S_IFLNK
927 lnkkind = stat.S_IFLNK
957 join = self._join
928 join = self._join
958 dirsfound = []
929 dirsfound = []
959 foundadd = dirsfound.append
930 foundadd = dirsfound.append
960 dirsnotfound = []
931 dirsnotfound = []
961 notfoundadd = dirsnotfound.append
932 notfoundadd = dirsnotfound.append
962
933
963 if not match.isexact() and self._checkcase:
934 if not match.isexact() and self._checkcase:
964 normalize = self._normalize
935 normalize = self._normalize
965 else:
936 else:
966 normalize = None
937 normalize = None
967
938
968 files = sorted(match.files())
939 files = sorted(match.files())
969 subrepos.sort()
940 subrepos.sort()
970 i, j = 0, 0
941 i, j = 0, 0
971 while i < len(files) and j < len(subrepos):
942 while i < len(files) and j < len(subrepos):
972 subpath = subrepos[j] + b"/"
943 subpath = subrepos[j] + b"/"
973 if files[i] < subpath:
944 if files[i] < subpath:
974 i += 1
945 i += 1
975 continue
946 continue
976 while i < len(files) and files[i].startswith(subpath):
947 while i < len(files) and files[i].startswith(subpath):
977 del files[i]
948 del files[i]
978 j += 1
949 j += 1
979
950
980 if not files or b'' in files:
951 if not files or b'' in files:
981 files = [b'']
952 files = [b'']
982 # constructing the foldmap is expensive, so don't do it for the
953 # constructing the foldmap is expensive, so don't do it for the
983 # common case where files is ['']
954 # common case where files is ['']
984 normalize = None
955 normalize = None
985 results = dict.fromkeys(subrepos)
956 results = dict.fromkeys(subrepos)
986 results[b'.hg'] = None
957 results[b'.hg'] = None
987
958
988 for ff in files:
959 for ff in files:
989 if normalize:
960 if normalize:
990 nf = normalize(ff, False, True)
961 nf = normalize(ff, False, True)
991 else:
962 else:
992 nf = ff
963 nf = ff
993 if nf in results:
964 if nf in results:
994 continue
965 continue
995
966
996 try:
967 try:
997 st = lstat(join(nf))
968 st = lstat(join(nf))
998 kind = getkind(st.st_mode)
969 kind = getkind(st.st_mode)
999 if kind == dirkind:
970 if kind == dirkind:
1000 if nf in dmap:
971 if nf in dmap:
1001 # file replaced by dir on disk but still in dirstate
972 # file replaced by dir on disk but still in dirstate
1002 results[nf] = None
973 results[nf] = None
1003 foundadd((nf, ff))
974 foundadd((nf, ff))
1004 elif kind == regkind or kind == lnkkind:
975 elif kind == regkind or kind == lnkkind:
1005 results[nf] = st
976 results[nf] = st
1006 else:
977 else:
1007 badfn(ff, badtype(kind))
978 badfn(ff, badtype(kind))
1008 if nf in dmap:
979 if nf in dmap:
1009 results[nf] = None
980 results[nf] = None
1010 except OSError as inst: # nf not found on disk - it is dirstate only
981 except OSError as inst: # nf not found on disk - it is dirstate only
1011 if nf in dmap: # does it exactly match a missing file?
982 if nf in dmap: # does it exactly match a missing file?
1012 results[nf] = None
983 results[nf] = None
1013 else: # does it match a missing directory?
984 else: # does it match a missing directory?
1014 if self._map.hasdir(nf):
985 if self._map.hasdir(nf):
1015 notfoundadd(nf)
986 notfoundadd(nf)
1016 else:
987 else:
1017 badfn(ff, encoding.strtolocal(inst.strerror))
988 badfn(ff, encoding.strtolocal(inst.strerror))
1018
989
1019 # match.files() may contain explicitly-specified paths that shouldn't
990 # match.files() may contain explicitly-specified paths that shouldn't
1020 # be taken; drop them from the list of files found. dirsfound/notfound
991 # be taken; drop them from the list of files found. dirsfound/notfound
1021 # aren't filtered here because they will be tested later.
992 # aren't filtered here because they will be tested later.
1022 if match.anypats():
993 if match.anypats():
1023 for f in list(results):
994 for f in list(results):
1024 if f == b'.hg' or f in subrepos:
995 if f == b'.hg' or f in subrepos:
1025 # keep sentinel to disable further out-of-repo walks
996 # keep sentinel to disable further out-of-repo walks
1026 continue
997 continue
1027 if not match(f):
998 if not match(f):
1028 del results[f]
999 del results[f]
1029
1000
1030 # Case insensitive filesystems cannot rely on lstat() failing to detect
1001 # Case insensitive filesystems cannot rely on lstat() failing to detect
1031 # a case-only rename. Prune the stat object for any file that does not
1002 # a case-only rename. Prune the stat object for any file that does not
1032 # match the case in the filesystem, if there are multiple files that
1003 # match the case in the filesystem, if there are multiple files that
1033 # normalize to the same path.
1004 # normalize to the same path.
1034 if match.isexact() and self._checkcase:
1005 if match.isexact() and self._checkcase:
1035 normed = {}
1006 normed = {}
1036
1007
1037 for f, st in pycompat.iteritems(results):
1008 for f, st in pycompat.iteritems(results):
1038 if st is None:
1009 if st is None:
1039 continue
1010 continue
1040
1011
1041 nc = util.normcase(f)
1012 nc = util.normcase(f)
1042 paths = normed.get(nc)
1013 paths = normed.get(nc)
1043
1014
1044 if paths is None:
1015 if paths is None:
1045 paths = set()
1016 paths = set()
1046 normed[nc] = paths
1017 normed[nc] = paths
1047
1018
1048 paths.add(f)
1019 paths.add(f)
1049
1020
1050 for norm, paths in pycompat.iteritems(normed):
1021 for norm, paths in pycompat.iteritems(normed):
1051 if len(paths) > 1:
1022 if len(paths) > 1:
1052 for path in paths:
1023 for path in paths:
1053 folded = self._discoverpath(
1024 folded = self._discoverpath(
1054 path, norm, True, None, self._map.dirfoldmap
1025 path, norm, True, None, self._map.dirfoldmap
1055 )
1026 )
1056 if path != folded:
1027 if path != folded:
1057 results[path] = None
1028 results[path] = None
1058
1029
1059 return results, dirsfound, dirsnotfound
1030 return results, dirsfound, dirsnotfound
1060
1031
1061 def walk(self, match, subrepos, unknown, ignored, full=True):
1032 def walk(self, match, subrepos, unknown, ignored, full=True):
1062 """
1033 """
1063 Walk recursively through the directory tree, finding all files
1034 Walk recursively through the directory tree, finding all files
1064 matched by match.
1035 matched by match.
1065
1036
1066 If full is False, maybe skip some known-clean files.
1037 If full is False, maybe skip some known-clean files.
1067
1038
1068 Return a dict mapping filename to stat-like object (either
1039 Return a dict mapping filename to stat-like object (either
1069 mercurial.osutil.stat instance or return value of os.stat()).
1040 mercurial.osutil.stat instance or return value of os.stat()).
1070
1041
1071 """
1042 """
1072 # full is a flag that extensions that hook into walk can use -- this
1043 # full is a flag that extensions that hook into walk can use -- this
1073 # implementation doesn't use it at all. This satisfies the contract
1044 # implementation doesn't use it at all. This satisfies the contract
1074 # because we only guarantee a "maybe".
1045 # because we only guarantee a "maybe".
1075
1046
1076 if ignored:
1047 if ignored:
1077 ignore = util.never
1048 ignore = util.never
1078 dirignore = util.never
1049 dirignore = util.never
1079 elif unknown:
1050 elif unknown:
1080 ignore = self._ignore
1051 ignore = self._ignore
1081 dirignore = self._dirignore
1052 dirignore = self._dirignore
1082 else:
1053 else:
1083 # if not unknown and not ignored, drop dir recursion and step 2
1054 # if not unknown and not ignored, drop dir recursion and step 2
1084 ignore = util.always
1055 ignore = util.always
1085 dirignore = util.always
1056 dirignore = util.always
1086
1057
1087 matchfn = match.matchfn
1058 matchfn = match.matchfn
1088 matchalways = match.always()
1059 matchalways = match.always()
1089 matchtdir = match.traversedir
1060 matchtdir = match.traversedir
1090 dmap = self._map
1061 dmap = self._map
1091 listdir = util.listdir
1062 listdir = util.listdir
1092 lstat = os.lstat
1063 lstat = os.lstat
1093 dirkind = stat.S_IFDIR
1064 dirkind = stat.S_IFDIR
1094 regkind = stat.S_IFREG
1065 regkind = stat.S_IFREG
1095 lnkkind = stat.S_IFLNK
1066 lnkkind = stat.S_IFLNK
1096 join = self._join
1067 join = self._join
1097
1068
1098 exact = skipstep3 = False
1069 exact = skipstep3 = False
1099 if match.isexact(): # match.exact
1070 if match.isexact(): # match.exact
1100 exact = True
1071 exact = True
1101 dirignore = util.always # skip step 2
1072 dirignore = util.always # skip step 2
1102 elif match.prefix(): # match.match, no patterns
1073 elif match.prefix(): # match.match, no patterns
1103 skipstep3 = True
1074 skipstep3 = True
1104
1075
1105 if not exact and self._checkcase:
1076 if not exact and self._checkcase:
1106 normalize = self._normalize
1077 normalize = self._normalize
1107 normalizefile = self._normalizefile
1078 normalizefile = self._normalizefile
1108 skipstep3 = False
1079 skipstep3 = False
1109 else:
1080 else:
1110 normalize = self._normalize
1081 normalize = self._normalize
1111 normalizefile = None
1082 normalizefile = None
1112
1083
1113 # step 1: find all explicit files
1084 # step 1: find all explicit files
1114 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1085 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1115 if matchtdir:
1086 if matchtdir:
1116 for d in work:
1087 for d in work:
1117 matchtdir(d[0])
1088 matchtdir(d[0])
1118 for d in dirsnotfound:
1089 for d in dirsnotfound:
1119 matchtdir(d)
1090 matchtdir(d)
1120
1091
1121 skipstep3 = skipstep3 and not (work or dirsnotfound)
1092 skipstep3 = skipstep3 and not (work or dirsnotfound)
1122 work = [d for d in work if not dirignore(d[0])]
1093 work = [d for d in work if not dirignore(d[0])]
1123
1094
1124 # step 2: visit subdirectories
1095 # step 2: visit subdirectories
1125 def traverse(work, alreadynormed):
1096 def traverse(work, alreadynormed):
1126 wadd = work.append
1097 wadd = work.append
1127 while work:
1098 while work:
1128 tracing.counter('dirstate.walk work', len(work))
1099 tracing.counter('dirstate.walk work', len(work))
1129 nd = work.pop()
1100 nd = work.pop()
1130 visitentries = match.visitchildrenset(nd)
1101 visitentries = match.visitchildrenset(nd)
1131 if not visitentries:
1102 if not visitentries:
1132 continue
1103 continue
1133 if visitentries == b'this' or visitentries == b'all':
1104 if visitentries == b'this' or visitentries == b'all':
1134 visitentries = None
1105 visitentries = None
1135 skip = None
1106 skip = None
1136 if nd != b'':
1107 if nd != b'':
1137 skip = b'.hg'
1108 skip = b'.hg'
1138 try:
1109 try:
1139 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1110 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1140 entries = listdir(join(nd), stat=True, skip=skip)
1111 entries = listdir(join(nd), stat=True, skip=skip)
1141 except OSError as inst:
1112 except OSError as inst:
1142 if inst.errno in (errno.EACCES, errno.ENOENT):
1113 if inst.errno in (errno.EACCES, errno.ENOENT):
1143 match.bad(
1114 match.bad(
1144 self.pathto(nd), encoding.strtolocal(inst.strerror)
1115 self.pathto(nd), encoding.strtolocal(inst.strerror)
1145 )
1116 )
1146 continue
1117 continue
1147 raise
1118 raise
1148 for f, kind, st in entries:
1119 for f, kind, st in entries:
1149 # Some matchers may return files in the visitentries set,
1120 # Some matchers may return files in the visitentries set,
1150 # instead of 'this', if the matcher explicitly mentions them
1121 # instead of 'this', if the matcher explicitly mentions them
1151 # and is not an exactmatcher. This is acceptable; we do not
1122 # and is not an exactmatcher. This is acceptable; we do not
1152 # make any hard assumptions about file-or-directory below
1123 # make any hard assumptions about file-or-directory below
1153 # based on the presence of `f` in visitentries. If
1124 # based on the presence of `f` in visitentries. If
1154 # visitchildrenset returned a set, we can always skip the
1125 # visitchildrenset returned a set, we can always skip the
1155 # entries *not* in the set it provided regardless of whether
1126 # entries *not* in the set it provided regardless of whether
1156 # they're actually a file or a directory.
1127 # they're actually a file or a directory.
1157 if visitentries and f not in visitentries:
1128 if visitentries and f not in visitentries:
1158 continue
1129 continue
1159 if normalizefile:
1130 if normalizefile:
1160 # even though f might be a directory, we're only
1131 # even though f might be a directory, we're only
1161 # interested in comparing it to files currently in the
1132 # interested in comparing it to files currently in the
1162 # dmap -- therefore normalizefile is enough
1133 # dmap -- therefore normalizefile is enough
1163 nf = normalizefile(
1134 nf = normalizefile(
1164 nd and (nd + b"/" + f) or f, True, True
1135 nd and (nd + b"/" + f) or f, True, True
1165 )
1136 )
1166 else:
1137 else:
1167 nf = nd and (nd + b"/" + f) or f
1138 nf = nd and (nd + b"/" + f) or f
1168 if nf not in results:
1139 if nf not in results:
1169 if kind == dirkind:
1140 if kind == dirkind:
1170 if not ignore(nf):
1141 if not ignore(nf):
1171 if matchtdir:
1142 if matchtdir:
1172 matchtdir(nf)
1143 matchtdir(nf)
1173 wadd(nf)
1144 wadd(nf)
1174 if nf in dmap and (matchalways or matchfn(nf)):
1145 if nf in dmap and (matchalways or matchfn(nf)):
1175 results[nf] = None
1146 results[nf] = None
1176 elif kind == regkind or kind == lnkkind:
1147 elif kind == regkind or kind == lnkkind:
1177 if nf in dmap:
1148 if nf in dmap:
1178 if matchalways or matchfn(nf):
1149 if matchalways or matchfn(nf):
1179 results[nf] = st
1150 results[nf] = st
1180 elif (matchalways or matchfn(nf)) and not ignore(
1151 elif (matchalways or matchfn(nf)) and not ignore(
1181 nf
1152 nf
1182 ):
1153 ):
1183 # unknown file -- normalize if necessary
1154 # unknown file -- normalize if necessary
1184 if not alreadynormed:
1155 if not alreadynormed:
1185 nf = normalize(nf, False, True)
1156 nf = normalize(nf, False, True)
1186 results[nf] = st
1157 results[nf] = st
1187 elif nf in dmap and (matchalways or matchfn(nf)):
1158 elif nf in dmap and (matchalways or matchfn(nf)):
1188 results[nf] = None
1159 results[nf] = None
1189
1160
1190 for nd, d in work:
1161 for nd, d in work:
1191 # alreadynormed means that processwork doesn't have to do any
1162 # alreadynormed means that processwork doesn't have to do any
1192 # expensive directory normalization
1163 # expensive directory normalization
1193 alreadynormed = not normalize or nd == d
1164 alreadynormed = not normalize or nd == d
1194 traverse([d], alreadynormed)
1165 traverse([d], alreadynormed)
1195
1166
1196 for s in subrepos:
1167 for s in subrepos:
1197 del results[s]
1168 del results[s]
1198 del results[b'.hg']
1169 del results[b'.hg']
1199
1170
1200 # step 3: visit remaining files from dmap
1171 # step 3: visit remaining files from dmap
1201 if not skipstep3 and not exact:
1172 if not skipstep3 and not exact:
1202 # If a dmap file is not in results yet, it was either
1173 # If a dmap file is not in results yet, it was either
1203 # a) not matching matchfn b) ignored, c) missing, or d) under a
1174 # a) not matching matchfn b) ignored, c) missing, or d) under a
1204 # symlink directory.
1175 # symlink directory.
1205 if not results and matchalways:
1176 if not results and matchalways:
1206 visit = [f for f in dmap]
1177 visit = [f for f in dmap]
1207 else:
1178 else:
1208 visit = [f for f in dmap if f not in results and matchfn(f)]
1179 visit = [f for f in dmap if f not in results and matchfn(f)]
1209 visit.sort()
1180 visit.sort()
1210
1181
1211 if unknown:
1182 if unknown:
1212 # unknown == True means we walked all dirs under the roots
1183 # unknown == True means we walked all dirs under the roots
1213 # that wasn't ignored, and everything that matched was stat'ed
1184 # that wasn't ignored, and everything that matched was stat'ed
1214 # and is already in results.
1185 # and is already in results.
1215 # The rest must thus be ignored or under a symlink.
1186 # The rest must thus be ignored or under a symlink.
1216 audit_path = pathutil.pathauditor(self._root, cached=True)
1187 audit_path = pathutil.pathauditor(self._root, cached=True)
1217
1188
1218 for nf in iter(visit):
1189 for nf in iter(visit):
1219 # If a stat for the same file was already added with a
1190 # If a stat for the same file was already added with a
1220 # different case, don't add one for this, since that would
1191 # different case, don't add one for this, since that would
1221 # make it appear as if the file exists under both names
1192 # make it appear as if the file exists under both names
1222 # on disk.
1193 # on disk.
1223 if (
1194 if (
1224 normalizefile
1195 normalizefile
1225 and normalizefile(nf, True, True) in results
1196 and normalizefile(nf, True, True) in results
1226 ):
1197 ):
1227 results[nf] = None
1198 results[nf] = None
1228 # Report ignored items in the dmap as long as they are not
1199 # Report ignored items in the dmap as long as they are not
1229 # under a symlink directory.
1200 # under a symlink directory.
1230 elif audit_path.check(nf):
1201 elif audit_path.check(nf):
1231 try:
1202 try:
1232 results[nf] = lstat(join(nf))
1203 results[nf] = lstat(join(nf))
1233 # file was just ignored, no links, and exists
1204 # file was just ignored, no links, and exists
1234 except OSError:
1205 except OSError:
1235 # file doesn't exist
1206 # file doesn't exist
1236 results[nf] = None
1207 results[nf] = None
1237 else:
1208 else:
1238 # It's either missing or under a symlink directory
1209 # It's either missing or under a symlink directory
1239 # which we in this case report as missing
1210 # which we in this case report as missing
1240 results[nf] = None
1211 results[nf] = None
1241 else:
1212 else:
1242 # We may not have walked the full directory tree above,
1213 # We may not have walked the full directory tree above,
1243 # so stat and check everything we missed.
1214 # so stat and check everything we missed.
1244 iv = iter(visit)
1215 iv = iter(visit)
1245 for st in util.statfiles([join(i) for i in visit]):
1216 for st in util.statfiles([join(i) for i in visit]):
1246 results[next(iv)] = st
1217 results[next(iv)] = st
1247 return results
1218 return results
1248
1219
1249 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1220 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1250 # Force Rayon (Rust parallelism library) to respect the number of
1221 # Force Rayon (Rust parallelism library) to respect the number of
1251 # workers. This is a temporary workaround until Rust code knows
1222 # workers. This is a temporary workaround until Rust code knows
1252 # how to read the config file.
1223 # how to read the config file.
1253 numcpus = self._ui.configint(b"worker", b"numcpus")
1224 numcpus = self._ui.configint(b"worker", b"numcpus")
1254 if numcpus is not None:
1225 if numcpus is not None:
1255 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1226 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1256
1227
1257 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1228 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1258 if not workers_enabled:
1229 if not workers_enabled:
1259 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1230 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1260
1231
1261 (
1232 (
1262 lookup,
1233 lookup,
1263 modified,
1234 modified,
1264 added,
1235 added,
1265 removed,
1236 removed,
1266 deleted,
1237 deleted,
1267 clean,
1238 clean,
1268 ignored,
1239 ignored,
1269 unknown,
1240 unknown,
1270 warnings,
1241 warnings,
1271 bad,
1242 bad,
1272 traversed,
1243 traversed,
1273 dirty,
1244 dirty,
1274 ) = rustmod.status(
1245 ) = rustmod.status(
1275 self._map._rustmap,
1246 self._map._rustmap,
1276 matcher,
1247 matcher,
1277 self._rootdir,
1248 self._rootdir,
1278 self._ignorefiles(),
1249 self._ignorefiles(),
1279 self._checkexec,
1250 self._checkexec,
1280 self._lastnormaltime,
1251 self._lastnormaltime,
1281 bool(list_clean),
1252 bool(list_clean),
1282 bool(list_ignored),
1253 bool(list_ignored),
1283 bool(list_unknown),
1254 bool(list_unknown),
1284 bool(matcher.traversedir),
1255 bool(matcher.traversedir),
1285 )
1256 )
1286
1257
1287 self._dirty |= dirty
1258 self._dirty |= dirty
1288
1259
1289 if matcher.traversedir:
1260 if matcher.traversedir:
1290 for dir in traversed:
1261 for dir in traversed:
1291 matcher.traversedir(dir)
1262 matcher.traversedir(dir)
1292
1263
1293 if self._ui.warn:
1264 if self._ui.warn:
1294 for item in warnings:
1265 for item in warnings:
1295 if isinstance(item, tuple):
1266 if isinstance(item, tuple):
1296 file_path, syntax = item
1267 file_path, syntax = item
1297 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1268 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1298 file_path,
1269 file_path,
1299 syntax,
1270 syntax,
1300 )
1271 )
1301 self._ui.warn(msg)
1272 self._ui.warn(msg)
1302 else:
1273 else:
1303 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1274 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1304 self._ui.warn(
1275 self._ui.warn(
1305 msg
1276 msg
1306 % (
1277 % (
1307 pathutil.canonpath(
1278 pathutil.canonpath(
1308 self._rootdir, self._rootdir, item
1279 self._rootdir, self._rootdir, item
1309 ),
1280 ),
1310 b"No such file or directory",
1281 b"No such file or directory",
1311 )
1282 )
1312 )
1283 )
1313
1284
1314 for (fn, message) in bad:
1285 for (fn, message) in bad:
1315 matcher.bad(fn, encoding.strtolocal(message))
1286 matcher.bad(fn, encoding.strtolocal(message))
1316
1287
1317 status = scmutil.status(
1288 status = scmutil.status(
1318 modified=modified,
1289 modified=modified,
1319 added=added,
1290 added=added,
1320 removed=removed,
1291 removed=removed,
1321 deleted=deleted,
1292 deleted=deleted,
1322 unknown=unknown,
1293 unknown=unknown,
1323 ignored=ignored,
1294 ignored=ignored,
1324 clean=clean,
1295 clean=clean,
1325 )
1296 )
1326 return (lookup, status)
1297 return (lookup, status)
1327
1298
1328 def status(self, match, subrepos, ignored, clean, unknown):
1299 def status(self, match, subrepos, ignored, clean, unknown):
1329 """Determine the status of the working copy relative to the
1300 """Determine the status of the working copy relative to the
1330 dirstate and return a pair of (unsure, status), where status is of type
1301 dirstate and return a pair of (unsure, status), where status is of type
1331 scmutil.status and:
1302 scmutil.status and:
1332
1303
1333 unsure:
1304 unsure:
1334 files that might have been modified since the dirstate was
1305 files that might have been modified since the dirstate was
1335 written, but need to be read to be sure (size is the same
1306 written, but need to be read to be sure (size is the same
1336 but mtime differs)
1307 but mtime differs)
1337 status.modified:
1308 status.modified:
1338 files that have definitely been modified since the dirstate
1309 files that have definitely been modified since the dirstate
1339 was written (different size or mode)
1310 was written (different size or mode)
1340 status.clean:
1311 status.clean:
1341 files that have definitely not been modified since the
1312 files that have definitely not been modified since the
1342 dirstate was written
1313 dirstate was written
1343 """
1314 """
1344 listignored, listclean, listunknown = ignored, clean, unknown
1315 listignored, listclean, listunknown = ignored, clean, unknown
1345 lookup, modified, added, unknown, ignored = [], [], [], [], []
1316 lookup, modified, added, unknown, ignored = [], [], [], [], []
1346 removed, deleted, clean = [], [], []
1317 removed, deleted, clean = [], [], []
1347
1318
1348 dmap = self._map
1319 dmap = self._map
1349 dmap.preload()
1320 dmap.preload()
1350
1321
1351 use_rust = True
1322 use_rust = True
1352
1323
1353 allowed_matchers = (
1324 allowed_matchers = (
1354 matchmod.alwaysmatcher,
1325 matchmod.alwaysmatcher,
1355 matchmod.exactmatcher,
1326 matchmod.exactmatcher,
1356 matchmod.includematcher,
1327 matchmod.includematcher,
1357 )
1328 )
1358
1329
1359 if rustmod is None:
1330 if rustmod is None:
1360 use_rust = False
1331 use_rust = False
1361 elif self._checkcase:
1332 elif self._checkcase:
1362 # Case-insensitive filesystems are not handled yet
1333 # Case-insensitive filesystems are not handled yet
1363 use_rust = False
1334 use_rust = False
1364 elif subrepos:
1335 elif subrepos:
1365 use_rust = False
1336 use_rust = False
1366 elif sparse.enabled:
1337 elif sparse.enabled:
1367 use_rust = False
1338 use_rust = False
1368 elif not isinstance(match, allowed_matchers):
1339 elif not isinstance(match, allowed_matchers):
1369 # Some matchers have yet to be implemented
1340 # Some matchers have yet to be implemented
1370 use_rust = False
1341 use_rust = False
1371
1342
1372 if use_rust:
1343 if use_rust:
1373 try:
1344 try:
1374 return self._rust_status(
1345 return self._rust_status(
1375 match, listclean, listignored, listunknown
1346 match, listclean, listignored, listunknown
1376 )
1347 )
1377 except rustmod.FallbackError:
1348 except rustmod.FallbackError:
1378 pass
1349 pass
1379
1350
1380 def noop(f):
1351 def noop(f):
1381 pass
1352 pass
1382
1353
1383 dcontains = dmap.__contains__
1354 dcontains = dmap.__contains__
1384 dget = dmap.__getitem__
1355 dget = dmap.__getitem__
1385 ladd = lookup.append # aka "unsure"
1356 ladd = lookup.append # aka "unsure"
1386 madd = modified.append
1357 madd = modified.append
1387 aadd = added.append
1358 aadd = added.append
1388 uadd = unknown.append if listunknown else noop
1359 uadd = unknown.append if listunknown else noop
1389 iadd = ignored.append if listignored else noop
1360 iadd = ignored.append if listignored else noop
1390 radd = removed.append
1361 radd = removed.append
1391 dadd = deleted.append
1362 dadd = deleted.append
1392 cadd = clean.append if listclean else noop
1363 cadd = clean.append if listclean else noop
1393 mexact = match.exact
1364 mexact = match.exact
1394 dirignore = self._dirignore
1365 dirignore = self._dirignore
1395 checkexec = self._checkexec
1366 checkexec = self._checkexec
1396 copymap = self._map.copymap
1367 copymap = self._map.copymap
1397 lastnormaltime = self._lastnormaltime
1368 lastnormaltime = self._lastnormaltime
1398
1369
1399 # We need to do full walks when either
1370 # We need to do full walks when either
1400 # - we're listing all clean files, or
1371 # - we're listing all clean files, or
1401 # - match.traversedir does something, because match.traversedir should
1372 # - match.traversedir does something, because match.traversedir should
1402 # be called for every dir in the working dir
1373 # be called for every dir in the working dir
1403 full = listclean or match.traversedir is not None
1374 full = listclean or match.traversedir is not None
1404 for fn, st in pycompat.iteritems(
1375 for fn, st in pycompat.iteritems(
1405 self.walk(match, subrepos, listunknown, listignored, full=full)
1376 self.walk(match, subrepos, listunknown, listignored, full=full)
1406 ):
1377 ):
1407 if not dcontains(fn):
1378 if not dcontains(fn):
1408 if (listignored or mexact(fn)) and dirignore(fn):
1379 if (listignored or mexact(fn)) and dirignore(fn):
1409 if listignored:
1380 if listignored:
1410 iadd(fn)
1381 iadd(fn)
1411 else:
1382 else:
1412 uadd(fn)
1383 uadd(fn)
1413 continue
1384 continue
1414
1385
1415 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1386 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1416 # written like that for performance reasons. dmap[fn] is not a
1387 # written like that for performance reasons. dmap[fn] is not a
1417 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1388 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1418 # opcode has fast paths when the value to be unpacked is a tuple or
1389 # opcode has fast paths when the value to be unpacked is a tuple or
1419 # a list, but falls back to creating a full-fledged iterator in
1390 # a list, but falls back to creating a full-fledged iterator in
1420 # general. That is much slower than simply accessing and storing the
1391 # general. That is much slower than simply accessing and storing the
1421 # tuple members one by one.
1392 # tuple members one by one.
1422 t = dget(fn)
1393 t = dget(fn)
1423 mode = t.mode
1394 mode = t.mode
1424 size = t.size
1395 size = t.size
1425 time = t.mtime
1396 time = t.mtime
1426
1397
1427 if not st and t.tracked:
1398 if not st and t.tracked:
1428 dadd(fn)
1399 dadd(fn)
1429 elif t.merged:
1400 elif t.merged:
1430 madd(fn)
1401 madd(fn)
1431 elif t.added:
1402 elif t.added:
1432 aadd(fn)
1403 aadd(fn)
1433 elif t.removed:
1404 elif t.removed:
1434 radd(fn)
1405 radd(fn)
1435 elif t.tracked:
1406 elif t.tracked:
1436 if (
1407 if (
1437 size >= 0
1408 size >= 0
1438 and (
1409 and (
1439 (size != st.st_size and size != st.st_size & _rangemask)
1410 (size != st.st_size and size != st.st_size & _rangemask)
1440 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1411 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1441 )
1412 )
1442 or t.from_p2
1413 or t.from_p2
1443 or fn in copymap
1414 or fn in copymap
1444 ):
1415 ):
1445 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1416 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1446 # issue6456: Size returned may be longer due to
1417 # issue6456: Size returned may be longer due to
1447 # encryption on EXT-4 fscrypt, undecided.
1418 # encryption on EXT-4 fscrypt, undecided.
1448 ladd(fn)
1419 ladd(fn)
1449 else:
1420 else:
1450 madd(fn)
1421 madd(fn)
1451 elif (
1422 elif (
1452 time != st[stat.ST_MTIME]
1423 time != st[stat.ST_MTIME]
1453 and time != st[stat.ST_MTIME] & _rangemask
1424 and time != st[stat.ST_MTIME] & _rangemask
1454 ):
1425 ):
1455 ladd(fn)
1426 ladd(fn)
1456 elif st[stat.ST_MTIME] == lastnormaltime:
1427 elif st[stat.ST_MTIME] == lastnormaltime:
1457 # fn may have just been marked as normal and it may have
1428 # fn may have just been marked as normal and it may have
1458 # changed in the same second without changing its size.
1429 # changed in the same second without changing its size.
1459 # This can happen if we quickly do multiple commits.
1430 # This can happen if we quickly do multiple commits.
1460 # Force lookup, so we don't miss such a racy file change.
1431 # Force lookup, so we don't miss such a racy file change.
1461 ladd(fn)
1432 ladd(fn)
1462 elif listclean:
1433 elif listclean:
1463 cadd(fn)
1434 cadd(fn)
1464 status = scmutil.status(
1435 status = scmutil.status(
1465 modified, added, removed, deleted, unknown, ignored, clean
1436 modified, added, removed, deleted, unknown, ignored, clean
1466 )
1437 )
1467 return (lookup, status)
1438 return (lookup, status)
1468
1439
1469 def matches(self, match):
1440 def matches(self, match):
1470 """
1441 """
1471 return files in the dirstate (in whatever state) filtered by match
1442 return files in the dirstate (in whatever state) filtered by match
1472 """
1443 """
1473 dmap = self._map
1444 dmap = self._map
1474 if rustmod is not None:
1445 if rustmod is not None:
1475 dmap = self._map._rustmap
1446 dmap = self._map._rustmap
1476
1447
1477 if match.always():
1448 if match.always():
1478 return dmap.keys()
1449 return dmap.keys()
1479 files = match.files()
1450 files = match.files()
1480 if match.isexact():
1451 if match.isexact():
1481 # fast path -- filter the other way around, since typically files is
1452 # fast path -- filter the other way around, since typically files is
1482 # much smaller than dmap
1453 # much smaller than dmap
1483 return [f for f in files if f in dmap]
1454 return [f for f in files if f in dmap]
1484 if match.prefix() and all(fn in dmap for fn in files):
1455 if match.prefix() and all(fn in dmap for fn in files):
1485 # fast path -- all the values are known to be files, so just return
1456 # fast path -- all the values are known to be files, so just return
1486 # that
1457 # that
1487 return list(files)
1458 return list(files)
1488 return [f for f in dmap if match(f)]
1459 return [f for f in dmap if match(f)]
1489
1460
1490 def _actualfilename(self, tr):
1461 def _actualfilename(self, tr):
1491 if tr:
1462 if tr:
1492 return self._pendingfilename
1463 return self._pendingfilename
1493 else:
1464 else:
1494 return self._filename
1465 return self._filename
1495
1466
1496 def savebackup(self, tr, backupname):
1467 def savebackup(self, tr, backupname):
1497 '''Save current dirstate into backup file'''
1468 '''Save current dirstate into backup file'''
1498 filename = self._actualfilename(tr)
1469 filename = self._actualfilename(tr)
1499 assert backupname != filename
1470 assert backupname != filename
1500
1471
1501 # use '_writedirstate' instead of 'write' to write changes certainly,
1472 # use '_writedirstate' instead of 'write' to write changes certainly,
1502 # because the latter omits writing out if transaction is running.
1473 # because the latter omits writing out if transaction is running.
1503 # output file will be used to create backup of dirstate at this point.
1474 # output file will be used to create backup of dirstate at this point.
1504 if self._dirty or not self._opener.exists(filename):
1475 if self._dirty or not self._opener.exists(filename):
1505 self._writedirstate(
1476 self._writedirstate(
1506 tr,
1477 tr,
1507 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1478 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1508 )
1479 )
1509
1480
1510 if tr:
1481 if tr:
1511 # ensure that subsequent tr.writepending returns True for
1482 # ensure that subsequent tr.writepending returns True for
1512 # changes written out above, even if dirstate is never
1483 # changes written out above, even if dirstate is never
1513 # changed after this
1484 # changed after this
1514 tr.addfilegenerator(
1485 tr.addfilegenerator(
1515 b'dirstate',
1486 b'dirstate',
1516 (self._filename,),
1487 (self._filename,),
1517 lambda f: self._writedirstate(tr, f),
1488 lambda f: self._writedirstate(tr, f),
1518 location=b'plain',
1489 location=b'plain',
1519 )
1490 )
1520
1491
1521 # ensure that pending file written above is unlinked at
1492 # ensure that pending file written above is unlinked at
1522 # failure, even if tr.writepending isn't invoked until the
1493 # failure, even if tr.writepending isn't invoked until the
1523 # end of this transaction
1494 # end of this transaction
1524 tr.registertmp(filename, location=b'plain')
1495 tr.registertmp(filename, location=b'plain')
1525
1496
1526 self._opener.tryunlink(backupname)
1497 self._opener.tryunlink(backupname)
1527 # hardlink backup is okay because _writedirstate is always called
1498 # hardlink backup is okay because _writedirstate is always called
1528 # with an "atomictemp=True" file.
1499 # with an "atomictemp=True" file.
1529 util.copyfile(
1500 util.copyfile(
1530 self._opener.join(filename),
1501 self._opener.join(filename),
1531 self._opener.join(backupname),
1502 self._opener.join(backupname),
1532 hardlink=True,
1503 hardlink=True,
1533 )
1504 )
1534
1505
1535 def restorebackup(self, tr, backupname):
1506 def restorebackup(self, tr, backupname):
1536 '''Restore dirstate by backup file'''
1507 '''Restore dirstate by backup file'''
1537 # this "invalidate()" prevents "wlock.release()" from writing
1508 # this "invalidate()" prevents "wlock.release()" from writing
1538 # changes of dirstate out after restoring from backup file
1509 # changes of dirstate out after restoring from backup file
1539 self.invalidate()
1510 self.invalidate()
1540 filename = self._actualfilename(tr)
1511 filename = self._actualfilename(tr)
1541 o = self._opener
1512 o = self._opener
1542 if util.samefile(o.join(backupname), o.join(filename)):
1513 if util.samefile(o.join(backupname), o.join(filename)):
1543 o.unlink(backupname)
1514 o.unlink(backupname)
1544 else:
1515 else:
1545 o.rename(backupname, filename, checkambig=True)
1516 o.rename(backupname, filename, checkambig=True)
1546
1517
1547 def clearbackup(self, tr, backupname):
1518 def clearbackup(self, tr, backupname):
1548 '''Clear backup file'''
1519 '''Clear backup file'''
1549 self._opener.unlink(backupname)
1520 self._opener.unlink(backupname)
@@ -1,959 +1,1021
1 # dirstatemap.py
1 # dirstatemap.py
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 from __future__ import absolute_import
6 from __future__ import absolute_import
7
7
8 import errno
8 import errno
9
9
10 from .i18n import _
10 from .i18n import _
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 pathutil,
14 pathutil,
15 policy,
15 policy,
16 pycompat,
16 pycompat,
17 txnutil,
17 txnutil,
18 util,
18 util,
19 )
19 )
20
20
21 from .dirstateutils import (
21 from .dirstateutils import (
22 docket as docketmod,
22 docket as docketmod,
23 )
23 )
24
24
25 parsers = policy.importmod('parsers')
25 parsers = policy.importmod('parsers')
26 rustmod = policy.importrust('dirstate')
26 rustmod = policy.importrust('dirstate')
27
27
28 propertycache = util.propertycache
28 propertycache = util.propertycache
29
29
30 if rustmod is None:
30 if rustmod is None:
31 DirstateItem = parsers.DirstateItem
31 DirstateItem = parsers.DirstateItem
32 else:
32 else:
33 DirstateItem = rustmod.DirstateItem
33 DirstateItem = rustmod.DirstateItem
34
34
35 rangemask = 0x7FFFFFFF
35 rangemask = 0x7FFFFFFF
36
36
37
37
38 class dirstatemap(object):
38 class dirstatemap(object):
39 """Map encapsulating the dirstate's contents.
39 """Map encapsulating the dirstate's contents.
40
40
41 The dirstate contains the following state:
41 The dirstate contains the following state:
42
42
43 - `identity` is the identity of the dirstate file, which can be used to
43 - `identity` is the identity of the dirstate file, which can be used to
44 detect when changes have occurred to the dirstate file.
44 detect when changes have occurred to the dirstate file.
45
45
46 - `parents` is a pair containing the parents of the working copy. The
46 - `parents` is a pair containing the parents of the working copy. The
47 parents are updated by calling `setparents`.
47 parents are updated by calling `setparents`.
48
48
49 - the state map maps filenames to tuples of (state, mode, size, mtime),
49 - the state map maps filenames to tuples of (state, mode, size, mtime),
50 where state is a single character representing 'normal', 'added',
50 where state is a single character representing 'normal', 'added',
51 'removed', or 'merged'. It is read by treating the dirstate as a
51 'removed', or 'merged'. It is read by treating the dirstate as a
52 dict. File state is updated by calling various methods (see each
52 dict. File state is updated by calling various methods (see each
53 documentation for details):
53 documentation for details):
54
54
55 - `reset_state`,
55 - `reset_state`,
56 - `set_tracked`
56 - `set_tracked`
57 - `set_untracked`
57 - `set_untracked`
58 - `set_clean`
58 - `set_clean`
59 - `set_possibly_dirty`
59 - `set_possibly_dirty`
60
60
61 - `copymap` maps destination filenames to their source filename.
61 - `copymap` maps destination filenames to their source filename.
62
62
63 The dirstate also provides the following views onto the state:
63 The dirstate also provides the following views onto the state:
64
64
65 - `nonnormalset` is a set of the filenames that have state other
65 - `nonnormalset` is a set of the filenames that have state other
66 than 'normal', or are normal but have an mtime of -1 ('normallookup').
66 than 'normal', or are normal but have an mtime of -1 ('normallookup').
67
67
68 - `otherparentset` is a set of the filenames that are marked as coming
68 - `otherparentset` is a set of the filenames that are marked as coming
69 from the second parent when the dirstate is currently being merged.
69 from the second parent when the dirstate is currently being merged.
70
70
71 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
71 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
72 form that they appear as in the dirstate.
72 form that they appear as in the dirstate.
73
73
74 - `dirfoldmap` is a dict mapping normalized directory names to the
74 - `dirfoldmap` is a dict mapping normalized directory names to the
75 denormalized form that they appear as in the dirstate.
75 denormalized form that they appear as in the dirstate.
76 """
76 """
77
77
78 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
78 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
79 self._ui = ui
79 self._ui = ui
80 self._opener = opener
80 self._opener = opener
81 self._root = root
81 self._root = root
82 self._filename = b'dirstate'
82 self._filename = b'dirstate'
83 self._nodelen = 20
83 self._nodelen = 20
84 self._nodeconstants = nodeconstants
84 self._nodeconstants = nodeconstants
85 assert (
85 assert (
86 not use_dirstate_v2
86 not use_dirstate_v2
87 ), "should have detected unsupported requirement"
87 ), "should have detected unsupported requirement"
88
88
89 self._parents = None
89 self._parents = None
90 self._dirtyparents = False
90 self._dirtyparents = False
91
91
92 # for consistent view between _pl() and _read() invocations
92 # for consistent view between _pl() and _read() invocations
93 self._pendingmode = None
93 self._pendingmode = None
94
94
95 @propertycache
95 @propertycache
96 def _map(self):
96 def _map(self):
97 self._map = {}
97 self._map = {}
98 self.read()
98 self.read()
99 return self._map
99 return self._map
100
100
101 @propertycache
101 @propertycache
102 def copymap(self):
102 def copymap(self):
103 self.copymap = {}
103 self.copymap = {}
104 self._map
104 self._map
105 return self.copymap
105 return self.copymap
106
106
107 def clear(self):
107 def clear(self):
108 self._map.clear()
108 self._map.clear()
109 self.copymap.clear()
109 self.copymap.clear()
110 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
110 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
111 util.clearcachedproperty(self, b"_dirs")
111 util.clearcachedproperty(self, b"_dirs")
112 util.clearcachedproperty(self, b"_alldirs")
112 util.clearcachedproperty(self, b"_alldirs")
113 util.clearcachedproperty(self, b"filefoldmap")
113 util.clearcachedproperty(self, b"filefoldmap")
114 util.clearcachedproperty(self, b"dirfoldmap")
114 util.clearcachedproperty(self, b"dirfoldmap")
115 util.clearcachedproperty(self, b"nonnormalset")
115 util.clearcachedproperty(self, b"nonnormalset")
116 util.clearcachedproperty(self, b"otherparentset")
116 util.clearcachedproperty(self, b"otherparentset")
117
117
118 def items(self):
118 def items(self):
119 return pycompat.iteritems(self._map)
119 return pycompat.iteritems(self._map)
120
120
121 # forward for python2,3 compat
121 # forward for python2,3 compat
122 iteritems = items
122 iteritems = items
123
123
124 def debug_iter(self, all):
124 def debug_iter(self, all):
125 """
125 """
126 Return an iterator of (filename, state, mode, size, mtime) tuples
126 Return an iterator of (filename, state, mode, size, mtime) tuples
127
127
128 `all` is unused when Rust is not enabled
128 `all` is unused when Rust is not enabled
129 """
129 """
130 for (filename, item) in self.items():
130 for (filename, item) in self.items():
131 yield (filename, item.state, item.mode, item.size, item.mtime)
131 yield (filename, item.state, item.mode, item.size, item.mtime)
132
132
133 def __len__(self):
133 def __len__(self):
134 return len(self._map)
134 return len(self._map)
135
135
136 def __iter__(self):
136 def __iter__(self):
137 return iter(self._map)
137 return iter(self._map)
138
138
139 def get(self, key, default=None):
139 def get(self, key, default=None):
140 return self._map.get(key, default)
140 return self._map.get(key, default)
141
141
142 def __contains__(self, key):
142 def __contains__(self, key):
143 return key in self._map
143 return key in self._map
144
144
145 def __getitem__(self, key):
145 def __getitem__(self, key):
146 return self._map[key]
146 return self._map[key]
147
147
148 def keys(self):
148 def keys(self):
149 return self._map.keys()
149 return self._map.keys()
150
150
151 def preload(self):
151 def preload(self):
152 """Loads the underlying data, if it's not already loaded"""
152 """Loads the underlying data, if it's not already loaded"""
153 self._map
153 self._map
154
154
155 def _dirs_incr(self, filename, old_entry=None):
155 def _dirs_incr(self, filename, old_entry=None):
156 """incremente the dirstate counter if applicable"""
156 """incremente the dirstate counter if applicable"""
157 if (
157 if (
158 old_entry is None or old_entry.removed
158 old_entry is None or old_entry.removed
159 ) and "_dirs" in self.__dict__:
159 ) and "_dirs" in self.__dict__:
160 self._dirs.addpath(filename)
160 self._dirs.addpath(filename)
161 if old_entry is None and "_alldirs" in self.__dict__:
161 if old_entry is None and "_alldirs" in self.__dict__:
162 self._alldirs.addpath(filename)
162 self._alldirs.addpath(filename)
163
163
164 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
164 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
165 """decremente the dirstate counter if applicable"""
165 """decremente the dirstate counter if applicable"""
166 if old_entry is not None:
166 if old_entry is not None:
167 if "_dirs" in self.__dict__ and not old_entry.removed:
167 if "_dirs" in self.__dict__ and not old_entry.removed:
168 self._dirs.delpath(filename)
168 self._dirs.delpath(filename)
169 if "_alldirs" in self.__dict__ and not remove_variant:
169 if "_alldirs" in self.__dict__ and not remove_variant:
170 self._alldirs.delpath(filename)
170 self._alldirs.delpath(filename)
171 elif remove_variant and "_alldirs" in self.__dict__:
171 elif remove_variant and "_alldirs" in self.__dict__:
172 self._alldirs.addpath(filename)
172 self._alldirs.addpath(filename)
173 if "filefoldmap" in self.__dict__:
173 if "filefoldmap" in self.__dict__:
174 normed = util.normcase(filename)
174 normed = util.normcase(filename)
175 self.filefoldmap.pop(normed, None)
175 self.filefoldmap.pop(normed, None)
176
176
177 def set_possibly_dirty(self, filename):
177 def set_possibly_dirty(self, filename):
178 """record that the current state of the file on disk is unknown"""
178 """record that the current state of the file on disk is unknown"""
179 self[filename].set_possibly_dirty()
179 self[filename].set_possibly_dirty()
180
180
181 def set_clean(self, filename, mode, size, mtime):
181 def set_clean(self, filename, mode, size, mtime):
182 """mark a file as back to a clean state"""
182 """mark a file as back to a clean state"""
183 entry = self[filename]
183 entry = self[filename]
184 mtime = mtime & rangemask
184 mtime = mtime & rangemask
185 size = size & rangemask
185 size = size & rangemask
186 entry.set_clean(mode, size, mtime)
186 entry.set_clean(mode, size, mtime)
187 self.copymap.pop(filename, None)
187 self.copymap.pop(filename, None)
188 self.nonnormalset.discard(filename)
188 self.nonnormalset.discard(filename)
189
189
190 def reset_state(
190 def reset_state(
191 self,
191 self,
192 filename,
192 filename,
193 wc_tracked=False,
193 wc_tracked=False,
194 p1_tracked=False,
194 p1_tracked=False,
195 p2_tracked=False,
195 p2_tracked=False,
196 merged=False,
196 merged=False,
197 clean_p1=False,
197 clean_p1=False,
198 clean_p2=False,
198 clean_p2=False,
199 possibly_dirty=False,
199 possibly_dirty=False,
200 parentfiledata=None,
200 parentfiledata=None,
201 ):
201 ):
202 """Set a entry to a given state, diregarding all previous state
202 """Set a entry to a given state, diregarding all previous state
203
203
204 This is to be used by the part of the dirstate API dedicated to
204 This is to be used by the part of the dirstate API dedicated to
205 adjusting the dirstate after a update/merge.
205 adjusting the dirstate after a update/merge.
206
206
207 note: calling this might result to no entry existing at all if the
207 note: calling this might result to no entry existing at all if the
208 dirstate map does not see any point at having one for this file
208 dirstate map does not see any point at having one for this file
209 anymore.
209 anymore.
210 """
210 """
211 if merged and (clean_p1 or clean_p2):
211 if merged and (clean_p1 or clean_p2):
212 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
212 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
213 raise error.ProgrammingError(msg)
213 raise error.ProgrammingError(msg)
214 # copy information are now outdated
214 # copy information are now outdated
215 # (maybe new information should be in directly passed to this function)
215 # (maybe new information should be in directly passed to this function)
216 self.copymap.pop(filename, None)
216 self.copymap.pop(filename, None)
217
217
218 if not (p1_tracked or p2_tracked or wc_tracked):
218 if not (p1_tracked or p2_tracked or wc_tracked):
219 old_entry = self._map.pop(filename, None)
219 old_entry = self._map.pop(filename, None)
220 self._dirs_decr(filename, old_entry=old_entry)
220 self._dirs_decr(filename, old_entry=old_entry)
221 self.nonnormalset.discard(filename)
221 self.nonnormalset.discard(filename)
222 self.copymap.pop(filename, None)
222 self.copymap.pop(filename, None)
223 return
223 return
224 elif merged:
224 elif merged:
225 # XXX might be merged and removed ?
225 # XXX might be merged and removed ?
226 entry = self.get(filename)
226 entry = self.get(filename)
227 if entry is None or not entry.tracked:
227 if entry is None or not entry.tracked:
228 # XXX mostly replicate dirstate.other parent. We should get
228 # XXX mostly replicate dirstate.other parent. We should get
229 # the higher layer to pass us more reliable data where `merged`
229 # the higher layer to pass us more reliable data where `merged`
230 # actually mean merged. Dropping this clause will show failure
230 # actually mean merged. Dropping this clause will show failure
231 # in `test-graft.t`
231 # in `test-graft.t`
232 merged = False
232 merged = False
233 clean_p2 = True
233 clean_p2 = True
234 elif not (p1_tracked or p2_tracked) and wc_tracked:
234 elif not (p1_tracked or p2_tracked) and wc_tracked:
235 pass # file is added, nothing special to adjust
235 pass # file is added, nothing special to adjust
236 elif (p1_tracked or p2_tracked) and not wc_tracked:
236 elif (p1_tracked or p2_tracked) and not wc_tracked:
237 pass
237 pass
238 elif clean_p2 and wc_tracked:
238 elif clean_p2 and wc_tracked:
239 if p1_tracked or self.get(filename) is not None:
239 if p1_tracked or self.get(filename) is not None:
240 # XXX the `self.get` call is catching some case in
240 # XXX the `self.get` call is catching some case in
241 # `test-merge-remove.t` where the file is tracked in p1, the
241 # `test-merge-remove.t` where the file is tracked in p1, the
242 # p1_tracked argument is False.
242 # p1_tracked argument is False.
243 #
243 #
244 # In addition, this seems to be a case where the file is marked
244 # In addition, this seems to be a case where the file is marked
245 # as merged without actually being the result of a merge
245 # as merged without actually being the result of a merge
246 # action. So thing are not ideal here.
246 # action. So thing are not ideal here.
247 merged = True
247 merged = True
248 clean_p2 = False
248 clean_p2 = False
249 elif not p1_tracked and p2_tracked and wc_tracked:
249 elif not p1_tracked and p2_tracked and wc_tracked:
250 clean_p2 = True
250 clean_p2 = True
251 elif possibly_dirty:
251 elif possibly_dirty:
252 pass
252 pass
253 elif wc_tracked:
253 elif wc_tracked:
254 # this is a "normal" file
254 # this is a "normal" file
255 if parentfiledata is None:
255 if parentfiledata is None:
256 msg = b'failed to pass parentfiledata for a normal file: %s'
256 msg = b'failed to pass parentfiledata for a normal file: %s'
257 msg %= filename
257 msg %= filename
258 raise error.ProgrammingError(msg)
258 raise error.ProgrammingError(msg)
259 else:
259 else:
260 assert False, 'unreachable'
260 assert False, 'unreachable'
261
261
262 old_entry = self._map.get(filename)
262 old_entry = self._map.get(filename)
263 self._dirs_incr(filename, old_entry)
263 self._dirs_incr(filename, old_entry)
264 entry = DirstateItem(
264 entry = DirstateItem(
265 wc_tracked=wc_tracked,
265 wc_tracked=wc_tracked,
266 p1_tracked=p1_tracked,
266 p1_tracked=p1_tracked,
267 p2_tracked=p2_tracked,
267 p2_tracked=p2_tracked,
268 merged=merged,
268 merged=merged,
269 clean_p1=clean_p1,
269 clean_p1=clean_p1,
270 clean_p2=clean_p2,
270 clean_p2=clean_p2,
271 possibly_dirty=possibly_dirty,
271 possibly_dirty=possibly_dirty,
272 parentfiledata=parentfiledata,
272 parentfiledata=parentfiledata,
273 )
273 )
274 if entry.dm_nonnormal:
274 if entry.dm_nonnormal:
275 self.nonnormalset.add(filename)
275 self.nonnormalset.add(filename)
276 else:
276 else:
277 self.nonnormalset.discard(filename)
277 self.nonnormalset.discard(filename)
278 if entry.dm_otherparent:
278 if entry.dm_otherparent:
279 self.otherparentset.add(filename)
279 self.otherparentset.add(filename)
280 else:
280 else:
281 self.otherparentset.discard(filename)
281 self.otherparentset.discard(filename)
282 self._map[filename] = entry
282 self._map[filename] = entry
283
283
284 def set_tracked(self, filename):
284 def set_tracked(self, filename):
285 new = False
285 new = False
286 entry = self.get(filename)
286 entry = self.get(filename)
287 if entry is None:
287 if entry is None:
288 self._dirs_incr(filename)
288 self._dirs_incr(filename)
289 entry = DirstateItem(
289 entry = DirstateItem(
290 p1_tracked=False,
290 p1_tracked=False,
291 p2_tracked=False,
291 p2_tracked=False,
292 wc_tracked=True,
292 wc_tracked=True,
293 merged=False,
293 merged=False,
294 clean_p1=False,
294 clean_p1=False,
295 clean_p2=False,
295 clean_p2=False,
296 possibly_dirty=False,
296 possibly_dirty=False,
297 parentfiledata=None,
297 parentfiledata=None,
298 )
298 )
299 self._map[filename] = entry
299 self._map[filename] = entry
300 if entry.dm_nonnormal:
300 if entry.dm_nonnormal:
301 self.nonnormalset.add(filename)
301 self.nonnormalset.add(filename)
302 new = True
302 new = True
303 elif not entry.tracked:
303 elif not entry.tracked:
304 self._dirs_incr(filename, entry)
304 self._dirs_incr(filename, entry)
305 entry.set_tracked()
305 entry.set_tracked()
306 new = True
306 new = True
307 else:
307 else:
308 # XXX This is probably overkill for more case, but we need this to
308 # XXX This is probably overkill for more case, but we need this to
309 # fully replace the `normallookup` call with `set_tracked` one.
309 # fully replace the `normallookup` call with `set_tracked` one.
310 # Consider smoothing this in the future.
310 # Consider smoothing this in the future.
311 self.set_possibly_dirty(filename)
311 self.set_possibly_dirty(filename)
312 return new
312 return new
313
313
314 def set_untracked(self, f):
314 def set_untracked(self, f):
315 """Mark a file as no longer tracked in the dirstate map"""
315 """Mark a file as no longer tracked in the dirstate map"""
316 entry = self.get(f)
316 entry = self.get(f)
317 if entry is None:
317 if entry is None:
318 return False
318 return False
319 else:
319 else:
320 self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added)
320 self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added)
321 if not entry.merged:
321 if not entry.merged:
322 self.copymap.pop(f, None)
322 self.copymap.pop(f, None)
323 if entry.added:
323 if entry.added:
324 self.nonnormalset.discard(f)
324 self.nonnormalset.discard(f)
325 self._map.pop(f, None)
325 self._map.pop(f, None)
326 else:
326 else:
327 self.nonnormalset.add(f)
327 self.nonnormalset.add(f)
328 if entry.from_p2:
328 if entry.from_p2:
329 self.otherparentset.add(f)
329 self.otherparentset.add(f)
330 entry.set_untracked()
330 entry.set_untracked()
331 return True
331 return True
332
332
333 def nonnormalentries(self):
333 def nonnormalentries(self):
334 '''Compute the nonnormal dirstate entries from the dmap'''
334 '''Compute the nonnormal dirstate entries from the dmap'''
335 try:
335 try:
336 return parsers.nonnormalotherparententries(self._map)
336 return parsers.nonnormalotherparententries(self._map)
337 except AttributeError:
337 except AttributeError:
338 nonnorm = set()
338 nonnorm = set()
339 otherparent = set()
339 otherparent = set()
340 for fname, e in pycompat.iteritems(self._map):
340 for fname, e in pycompat.iteritems(self._map):
341 if e.dm_nonnormal:
341 if e.dm_nonnormal:
342 nonnorm.add(fname)
342 nonnorm.add(fname)
343 if e.from_p2:
343 if e.from_p2:
344 otherparent.add(fname)
344 otherparent.add(fname)
345 return nonnorm, otherparent
345 return nonnorm, otherparent
346
346
347 @propertycache
347 @propertycache
348 def filefoldmap(self):
348 def filefoldmap(self):
349 """Returns a dictionary mapping normalized case paths to their
349 """Returns a dictionary mapping normalized case paths to their
350 non-normalized versions.
350 non-normalized versions.
351 """
351 """
352 try:
352 try:
353 makefilefoldmap = parsers.make_file_foldmap
353 makefilefoldmap = parsers.make_file_foldmap
354 except AttributeError:
354 except AttributeError:
355 pass
355 pass
356 else:
356 else:
357 return makefilefoldmap(
357 return makefilefoldmap(
358 self._map, util.normcasespec, util.normcasefallback
358 self._map, util.normcasespec, util.normcasefallback
359 )
359 )
360
360
361 f = {}
361 f = {}
362 normcase = util.normcase
362 normcase = util.normcase
363 for name, s in pycompat.iteritems(self._map):
363 for name, s in pycompat.iteritems(self._map):
364 if not s.removed:
364 if not s.removed:
365 f[normcase(name)] = name
365 f[normcase(name)] = name
366 f[b'.'] = b'.' # prevents useless util.fspath() invocation
366 f[b'.'] = b'.' # prevents useless util.fspath() invocation
367 return f
367 return f
368
368
369 def hastrackeddir(self, d):
369 def hastrackeddir(self, d):
370 """
370 """
371 Returns True if the dirstate contains a tracked (not removed) file
371 Returns True if the dirstate contains a tracked (not removed) file
372 in this directory.
372 in this directory.
373 """
373 """
374 return d in self._dirs
374 return d in self._dirs
375
375
376 def hasdir(self, d):
376 def hasdir(self, d):
377 """
377 """
378 Returns True if the dirstate contains a file (tracked or removed)
378 Returns True if the dirstate contains a file (tracked or removed)
379 in this directory.
379 in this directory.
380 """
380 """
381 return d in self._alldirs
381 return d in self._alldirs
382
382
383 @propertycache
383 @propertycache
384 def _dirs(self):
384 def _dirs(self):
385 return pathutil.dirs(self._map, only_tracked=True)
385 return pathutil.dirs(self._map, only_tracked=True)
386
386
387 @propertycache
387 @propertycache
388 def _alldirs(self):
388 def _alldirs(self):
389 return pathutil.dirs(self._map)
389 return pathutil.dirs(self._map)
390
390
391 def _opendirstatefile(self):
391 def _opendirstatefile(self):
392 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
392 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
393 if self._pendingmode is not None and self._pendingmode != mode:
393 if self._pendingmode is not None and self._pendingmode != mode:
394 fp.close()
394 fp.close()
395 raise error.Abort(
395 raise error.Abort(
396 _(b'working directory state may be changed parallelly')
396 _(b'working directory state may be changed parallelly')
397 )
397 )
398 self._pendingmode = mode
398 self._pendingmode = mode
399 return fp
399 return fp
400
400
401 def parents(self):
401 def parents(self):
402 if not self._parents:
402 if not self._parents:
403 try:
403 try:
404 fp = self._opendirstatefile()
404 fp = self._opendirstatefile()
405 st = fp.read(2 * self._nodelen)
405 st = fp.read(2 * self._nodelen)
406 fp.close()
406 fp.close()
407 except IOError as err:
407 except IOError as err:
408 if err.errno != errno.ENOENT:
408 if err.errno != errno.ENOENT:
409 raise
409 raise
410 # File doesn't exist, so the current state is empty
410 # File doesn't exist, so the current state is empty
411 st = b''
411 st = b''
412
412
413 l = len(st)
413 l = len(st)
414 if l == self._nodelen * 2:
414 if l == self._nodelen * 2:
415 self._parents = (
415 self._parents = (
416 st[: self._nodelen],
416 st[: self._nodelen],
417 st[self._nodelen : 2 * self._nodelen],
417 st[self._nodelen : 2 * self._nodelen],
418 )
418 )
419 elif l == 0:
419 elif l == 0:
420 self._parents = (
420 self._parents = (
421 self._nodeconstants.nullid,
421 self._nodeconstants.nullid,
422 self._nodeconstants.nullid,
422 self._nodeconstants.nullid,
423 )
423 )
424 else:
424 else:
425 raise error.Abort(
425 raise error.Abort(
426 _(b'working directory state appears damaged!')
426 _(b'working directory state appears damaged!')
427 )
427 )
428
428
429 return self._parents
429 return self._parents
430
430
431 def setparents(self, p1, p2):
431 def setparents(self, p1, p2, fold_p2=False):
432 self._parents = (p1, p2)
432 self._parents = (p1, p2)
433 self._dirtyparents = True
433 self._dirtyparents = True
434 copies = {}
435 if fold_p2:
436 candidatefiles = self.non_normal_or_other_parent_paths()
437
438 for f in candidatefiles:
439 s = self.get(f)
440 if s is None:
441 continue
442
443 # Discard "merged" markers when moving away from a merge state
444 if s.merged:
445 source = self.copymap.get(f)
446 if source:
447 copies[f] = source
448 self.reset_state(
449 f,
450 wc_tracked=True,
451 p1_tracked=True,
452 possibly_dirty=True,
453 )
454 # Also fix up otherparent markers
455 elif s.from_p2:
456 source = self.copymap.get(f)
457 if source:
458 copies[f] = source
459 self.reset_state(
460 f,
461 p1_tracked=False,
462 wc_tracked=True,
463 )
464 return copies
434
465
435 def read(self):
466 def read(self):
436 # ignore HG_PENDING because identity is used only for writing
467 # ignore HG_PENDING because identity is used only for writing
437 self.identity = util.filestat.frompath(
468 self.identity = util.filestat.frompath(
438 self._opener.join(self._filename)
469 self._opener.join(self._filename)
439 )
470 )
440
471
441 try:
472 try:
442 fp = self._opendirstatefile()
473 fp = self._opendirstatefile()
443 try:
474 try:
444 st = fp.read()
475 st = fp.read()
445 finally:
476 finally:
446 fp.close()
477 fp.close()
447 except IOError as err:
478 except IOError as err:
448 if err.errno != errno.ENOENT:
479 if err.errno != errno.ENOENT:
449 raise
480 raise
450 return
481 return
451 if not st:
482 if not st:
452 return
483 return
453
484
454 if util.safehasattr(parsers, b'dict_new_presized'):
485 if util.safehasattr(parsers, b'dict_new_presized'):
455 # Make an estimate of the number of files in the dirstate based on
486 # Make an estimate of the number of files in the dirstate based on
456 # its size. This trades wasting some memory for avoiding costly
487 # its size. This trades wasting some memory for avoiding costly
457 # resizes. Each entry have a prefix of 17 bytes followed by one or
488 # resizes. Each entry have a prefix of 17 bytes followed by one or
458 # two path names. Studies on various large-scale real-world repositories
489 # two path names. Studies on various large-scale real-world repositories
459 # found 54 bytes a reasonable upper limit for the average path names.
490 # found 54 bytes a reasonable upper limit for the average path names.
460 # Copy entries are ignored for the sake of this estimate.
491 # Copy entries are ignored for the sake of this estimate.
461 self._map = parsers.dict_new_presized(len(st) // 71)
492 self._map = parsers.dict_new_presized(len(st) // 71)
462
493
463 # Python's garbage collector triggers a GC each time a certain number
494 # Python's garbage collector triggers a GC each time a certain number
464 # of container objects (the number being defined by
495 # of container objects (the number being defined by
465 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
496 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
466 # for each file in the dirstate. The C version then immediately marks
497 # for each file in the dirstate. The C version then immediately marks
467 # them as not to be tracked by the collector. However, this has no
498 # them as not to be tracked by the collector. However, this has no
468 # effect on when GCs are triggered, only on what objects the GC looks
499 # effect on when GCs are triggered, only on what objects the GC looks
469 # into. This means that O(number of files) GCs are unavoidable.
500 # into. This means that O(number of files) GCs are unavoidable.
470 # Depending on when in the process's lifetime the dirstate is parsed,
501 # Depending on when in the process's lifetime the dirstate is parsed,
471 # this can get very expensive. As a workaround, disable GC while
502 # this can get very expensive. As a workaround, disable GC while
472 # parsing the dirstate.
503 # parsing the dirstate.
473 #
504 #
474 # (we cannot decorate the function directly since it is in a C module)
505 # (we cannot decorate the function directly since it is in a C module)
475 parse_dirstate = util.nogc(parsers.parse_dirstate)
506 parse_dirstate = util.nogc(parsers.parse_dirstate)
476 p = parse_dirstate(self._map, self.copymap, st)
507 p = parse_dirstate(self._map, self.copymap, st)
477 if not self._dirtyparents:
508 if not self._dirtyparents:
478 self.setparents(*p)
509 self.setparents(*p)
479
510
480 # Avoid excess attribute lookups by fast pathing certain checks
511 # Avoid excess attribute lookups by fast pathing certain checks
481 self.__contains__ = self._map.__contains__
512 self.__contains__ = self._map.__contains__
482 self.__getitem__ = self._map.__getitem__
513 self.__getitem__ = self._map.__getitem__
483 self.get = self._map.get
514 self.get = self._map.get
484
515
485 def write(self, _tr, st, now):
516 def write(self, _tr, st, now):
486 st.write(
517 st.write(
487 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
518 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
488 )
519 )
489 st.close()
520 st.close()
490 self._dirtyparents = False
521 self._dirtyparents = False
491 self.nonnormalset, self.otherparentset = self.nonnormalentries()
522 self.nonnormalset, self.otherparentset = self.nonnormalentries()
492
523
493 @propertycache
524 @propertycache
494 def nonnormalset(self):
525 def nonnormalset(self):
495 nonnorm, otherparents = self.nonnormalentries()
526 nonnorm, otherparents = self.nonnormalentries()
496 self.otherparentset = otherparents
527 self.otherparentset = otherparents
497 return nonnorm
528 return nonnorm
498
529
499 @propertycache
530 @propertycache
500 def otherparentset(self):
531 def otherparentset(self):
501 nonnorm, otherparents = self.nonnormalentries()
532 nonnorm, otherparents = self.nonnormalentries()
502 self.nonnormalset = nonnorm
533 self.nonnormalset = nonnorm
503 return otherparents
534 return otherparents
504
535
505 def non_normal_or_other_parent_paths(self):
536 def non_normal_or_other_parent_paths(self):
506 return self.nonnormalset.union(self.otherparentset)
537 return self.nonnormalset.union(self.otherparentset)
507
538
508 @propertycache
539 @propertycache
509 def identity(self):
540 def identity(self):
510 self._map
541 self._map
511 return self.identity
542 return self.identity
512
543
513 @propertycache
544 @propertycache
514 def dirfoldmap(self):
545 def dirfoldmap(self):
515 f = {}
546 f = {}
516 normcase = util.normcase
547 normcase = util.normcase
517 for name in self._dirs:
548 for name in self._dirs:
518 f[normcase(name)] = name
549 f[normcase(name)] = name
519 return f
550 return f
520
551
521
552
522 if rustmod is not None:
553 if rustmod is not None:
523
554
524 class dirstatemap(object):
555 class dirstatemap(object):
525 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
556 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
526 self._use_dirstate_v2 = use_dirstate_v2
557 self._use_dirstate_v2 = use_dirstate_v2
527 self._nodeconstants = nodeconstants
558 self._nodeconstants = nodeconstants
528 self._ui = ui
559 self._ui = ui
529 self._opener = opener
560 self._opener = opener
530 self._root = root
561 self._root = root
531 self._filename = b'dirstate'
562 self._filename = b'dirstate'
532 self._nodelen = 20 # Also update Rust code when changing this!
563 self._nodelen = 20 # Also update Rust code when changing this!
533 self._parents = None
564 self._parents = None
534 self._dirtyparents = False
565 self._dirtyparents = False
535 self._docket = None
566 self._docket = None
536
567
537 # for consistent view between _pl() and _read() invocations
568 # for consistent view between _pl() and _read() invocations
538 self._pendingmode = None
569 self._pendingmode = None
539
570
540 self._use_dirstate_tree = self._ui.configbool(
571 self._use_dirstate_tree = self._ui.configbool(
541 b"experimental",
572 b"experimental",
542 b"dirstate-tree.in-memory",
573 b"dirstate-tree.in-memory",
543 False,
574 False,
544 )
575 )
545
576
546 def addfile(
577 def addfile(
547 self,
578 self,
548 f,
579 f,
549 mode=0,
580 mode=0,
550 size=None,
581 size=None,
551 mtime=None,
582 mtime=None,
552 added=False,
583 added=False,
553 merged=False,
584 merged=False,
554 from_p2=False,
585 from_p2=False,
555 possibly_dirty=False,
586 possibly_dirty=False,
556 ):
587 ):
557 if added:
588 if added:
558 assert not possibly_dirty
589 assert not possibly_dirty
559 assert not from_p2
590 assert not from_p2
560 item = DirstateItem.new_added()
591 item = DirstateItem.new_added()
561 elif merged:
592 elif merged:
562 assert not possibly_dirty
593 assert not possibly_dirty
563 assert not from_p2
594 assert not from_p2
564 item = DirstateItem.new_merged()
595 item = DirstateItem.new_merged()
565 elif from_p2:
596 elif from_p2:
566 assert not possibly_dirty
597 assert not possibly_dirty
567 item = DirstateItem.new_from_p2()
598 item = DirstateItem.new_from_p2()
568 elif possibly_dirty:
599 elif possibly_dirty:
569 item = DirstateItem.new_possibly_dirty()
600 item = DirstateItem.new_possibly_dirty()
570 else:
601 else:
571 assert size is not None
602 assert size is not None
572 assert mtime is not None
603 assert mtime is not None
573 size = size & rangemask
604 size = size & rangemask
574 mtime = mtime & rangemask
605 mtime = mtime & rangemask
575 item = DirstateItem.new_normal(mode, size, mtime)
606 item = DirstateItem.new_normal(mode, size, mtime)
576 self._rustmap.addfile(f, item)
607 self._rustmap.addfile(f, item)
577 if added:
608 if added:
578 self.copymap.pop(f, None)
609 self.copymap.pop(f, None)
579
610
580 def reset_state(
611 def reset_state(
581 self,
612 self,
582 filename,
613 filename,
583 wc_tracked=False,
614 wc_tracked=False,
584 p1_tracked=False,
615 p1_tracked=False,
585 p2_tracked=False,
616 p2_tracked=False,
586 merged=False,
617 merged=False,
587 clean_p1=False,
618 clean_p1=False,
588 clean_p2=False,
619 clean_p2=False,
589 possibly_dirty=False,
620 possibly_dirty=False,
590 parentfiledata=None,
621 parentfiledata=None,
591 ):
622 ):
592 """Set a entry to a given state, disregarding all previous state
623 """Set a entry to a given state, disregarding all previous state
593
624
594 This is to be used by the part of the dirstate API dedicated to
625 This is to be used by the part of the dirstate API dedicated to
595 adjusting the dirstate after a update/merge.
626 adjusting the dirstate after a update/merge.
596
627
597 note: calling this might result to no entry existing at all if the
628 note: calling this might result to no entry existing at all if the
598 dirstate map does not see any point at having one for this file
629 dirstate map does not see any point at having one for this file
599 anymore.
630 anymore.
600 """
631 """
601 if merged and (clean_p1 or clean_p2):
632 if merged and (clean_p1 or clean_p2):
602 msg = (
633 msg = (
603 b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
634 b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
604 )
635 )
605 raise error.ProgrammingError(msg)
636 raise error.ProgrammingError(msg)
606 # copy information are now outdated
637 # copy information are now outdated
607 # (maybe new information should be in directly passed to this function)
638 # (maybe new information should be in directly passed to this function)
608 self.copymap.pop(filename, None)
639 self.copymap.pop(filename, None)
609
640
610 if not (p1_tracked or p2_tracked or wc_tracked):
641 if not (p1_tracked or p2_tracked or wc_tracked):
611 self._rustmap.drop_item_and_copy_source(filename)
642 self._rustmap.drop_item_and_copy_source(filename)
612 elif merged:
643 elif merged:
613 # XXX might be merged and removed ?
644 # XXX might be merged and removed ?
614 entry = self.get(filename)
645 entry = self.get(filename)
615 if entry is not None and entry.tracked:
646 if entry is not None and entry.tracked:
616 # XXX mostly replicate dirstate.other parent. We should get
647 # XXX mostly replicate dirstate.other parent. We should get
617 # the higher layer to pass us more reliable data where `merged`
648 # the higher layer to pass us more reliable data where `merged`
618 # actually mean merged. Dropping the else clause will show
649 # actually mean merged. Dropping the else clause will show
619 # failure in `test-graft.t`
650 # failure in `test-graft.t`
620 self.addfile(filename, merged=True)
651 self.addfile(filename, merged=True)
621 else:
652 else:
622 self.addfile(filename, from_p2=True)
653 self.addfile(filename, from_p2=True)
623 elif not (p1_tracked or p2_tracked) and wc_tracked:
654 elif not (p1_tracked or p2_tracked) and wc_tracked:
624 self.addfile(
655 self.addfile(
625 filename, added=True, possibly_dirty=possibly_dirty
656 filename, added=True, possibly_dirty=possibly_dirty
626 )
657 )
627 elif (p1_tracked or p2_tracked) and not wc_tracked:
658 elif (p1_tracked or p2_tracked) and not wc_tracked:
628 # XXX might be merged and removed ?
659 # XXX might be merged and removed ?
629 self[filename] = DirstateItem.from_v1_data(b'r', 0, 0, 0)
660 self[filename] = DirstateItem.from_v1_data(b'r', 0, 0, 0)
630 self.nonnormalset.add(filename)
661 self.nonnormalset.add(filename)
631 elif clean_p2 and wc_tracked:
662 elif clean_p2 and wc_tracked:
632 if p1_tracked or self.get(filename) is not None:
663 if p1_tracked or self.get(filename) is not None:
633 # XXX the `self.get` call is catching some case in
664 # XXX the `self.get` call is catching some case in
634 # `test-merge-remove.t` where the file is tracked in p1, the
665 # `test-merge-remove.t` where the file is tracked in p1, the
635 # p1_tracked argument is False.
666 # p1_tracked argument is False.
636 #
667 #
637 # In addition, this seems to be a case where the file is marked
668 # In addition, this seems to be a case where the file is marked
638 # as merged without actually being the result of a merge
669 # as merged without actually being the result of a merge
639 # action. So thing are not ideal here.
670 # action. So thing are not ideal here.
640 self.addfile(filename, merged=True)
671 self.addfile(filename, merged=True)
641 else:
672 else:
642 self.addfile(filename, from_p2=True)
673 self.addfile(filename, from_p2=True)
643 elif not p1_tracked and p2_tracked and wc_tracked:
674 elif not p1_tracked and p2_tracked and wc_tracked:
644 self.addfile(
675 self.addfile(
645 filename, from_p2=True, possibly_dirty=possibly_dirty
676 filename, from_p2=True, possibly_dirty=possibly_dirty
646 )
677 )
647 elif possibly_dirty:
678 elif possibly_dirty:
648 self.addfile(filename, possibly_dirty=possibly_dirty)
679 self.addfile(filename, possibly_dirty=possibly_dirty)
649 elif wc_tracked:
680 elif wc_tracked:
650 # this is a "normal" file
681 # this is a "normal" file
651 if parentfiledata is None:
682 if parentfiledata is None:
652 msg = b'failed to pass parentfiledata for a normal file: %s'
683 msg = b'failed to pass parentfiledata for a normal file: %s'
653 msg %= filename
684 msg %= filename
654 raise error.ProgrammingError(msg)
685 raise error.ProgrammingError(msg)
655 mode, size, mtime = parentfiledata
686 mode, size, mtime = parentfiledata
656 self.addfile(filename, mode=mode, size=size, mtime=mtime)
687 self.addfile(filename, mode=mode, size=size, mtime=mtime)
657 self.nonnormalset.discard(filename)
688 self.nonnormalset.discard(filename)
658 else:
689 else:
659 assert False, 'unreachable'
690 assert False, 'unreachable'
660
691
661 def set_tracked(self, filename):
692 def set_tracked(self, filename):
662 new = False
693 new = False
663 entry = self.get(filename)
694 entry = self.get(filename)
664 if entry is None:
695 if entry is None:
665 self.addfile(filename, added=True)
696 self.addfile(filename, added=True)
666 new = True
697 new = True
667 elif not entry.tracked:
698 elif not entry.tracked:
668 entry.set_tracked()
699 entry.set_tracked()
669 self._rustmap.set_dirstate_item(filename, entry)
700 self._rustmap.set_dirstate_item(filename, entry)
670 new = True
701 new = True
671 else:
702 else:
672 # XXX This is probably overkill for more case, but we need this to
703 # XXX This is probably overkill for more case, but we need this to
673 # fully replace the `normallookup` call with `set_tracked` one.
704 # fully replace the `normallookup` call with `set_tracked` one.
674 # Consider smoothing this in the future.
705 # Consider smoothing this in the future.
675 self.set_possibly_dirty(filename)
706 self.set_possibly_dirty(filename)
676 return new
707 return new
677
708
678 def set_untracked(self, f):
709 def set_untracked(self, f):
679 """Mark a file as no longer tracked in the dirstate map"""
710 """Mark a file as no longer tracked in the dirstate map"""
680 # in merge is only trigger more logic, so it "fine" to pass it.
711 # in merge is only trigger more logic, so it "fine" to pass it.
681 #
712 #
682 # the inner rust dirstate map code need to be adjusted once the API
713 # the inner rust dirstate map code need to be adjusted once the API
683 # for dirstate/dirstatemap/DirstateItem is a bit more settled
714 # for dirstate/dirstatemap/DirstateItem is a bit more settled
684 entry = self.get(f)
715 entry = self.get(f)
685 if entry is None:
716 if entry is None:
686 return False
717 return False
687 else:
718 else:
688 if entry.added:
719 if entry.added:
689 self._rustmap.drop_item_and_copy_source(f)
720 self._rustmap.drop_item_and_copy_source(f)
690 else:
721 else:
691 self._rustmap.removefile(f, in_merge=True)
722 self._rustmap.removefile(f, in_merge=True)
692 return True
723 return True
693
724
694 def removefile(self, *args, **kwargs):
725 def removefile(self, *args, **kwargs):
695 return self._rustmap.removefile(*args, **kwargs)
726 return self._rustmap.removefile(*args, **kwargs)
696
727
697 def nonnormalentries(self):
728 def nonnormalentries(self):
698 return self._rustmap.nonnormalentries()
729 return self._rustmap.nonnormalentries()
699
730
700 def get(self, *args, **kwargs):
731 def get(self, *args, **kwargs):
701 return self._rustmap.get(*args, **kwargs)
732 return self._rustmap.get(*args, **kwargs)
702
733
703 @property
734 @property
704 def copymap(self):
735 def copymap(self):
705 return self._rustmap.copymap()
736 return self._rustmap.copymap()
706
737
707 def debug_iter(self, all):
738 def debug_iter(self, all):
708 """
739 """
709 Return an iterator of (filename, state, mode, size, mtime) tuples
740 Return an iterator of (filename, state, mode, size, mtime) tuples
710
741
711 `all`: also include with `state == b' '` dirstate tree nodes that
742 `all`: also include with `state == b' '` dirstate tree nodes that
712 don't have an associated `DirstateItem`.
743 don't have an associated `DirstateItem`.
713
744
714 """
745 """
715 return self._rustmap.debug_iter(all)
746 return self._rustmap.debug_iter(all)
716
747
717 def preload(self):
748 def preload(self):
718 self._rustmap
749 self._rustmap
719
750
720 def clear(self):
751 def clear(self):
721 self._rustmap.clear()
752 self._rustmap.clear()
722 self.setparents(
753 self.setparents(
723 self._nodeconstants.nullid, self._nodeconstants.nullid
754 self._nodeconstants.nullid, self._nodeconstants.nullid
724 )
755 )
725 util.clearcachedproperty(self, b"_dirs")
756 util.clearcachedproperty(self, b"_dirs")
726 util.clearcachedproperty(self, b"_alldirs")
757 util.clearcachedproperty(self, b"_alldirs")
727 util.clearcachedproperty(self, b"dirfoldmap")
758 util.clearcachedproperty(self, b"dirfoldmap")
728
759
729 def items(self):
760 def items(self):
730 return self._rustmap.items()
761 return self._rustmap.items()
731
762
732 def keys(self):
763 def keys(self):
733 return iter(self._rustmap)
764 return iter(self._rustmap)
734
765
735 def __contains__(self, key):
766 def __contains__(self, key):
736 return key in self._rustmap
767 return key in self._rustmap
737
768
738 def __getitem__(self, item):
769 def __getitem__(self, item):
739 return self._rustmap[item]
770 return self._rustmap[item]
740
771
741 def __len__(self):
772 def __len__(self):
742 return len(self._rustmap)
773 return len(self._rustmap)
743
774
744 def __iter__(self):
775 def __iter__(self):
745 return iter(self._rustmap)
776 return iter(self._rustmap)
746
777
747 # forward for python2,3 compat
778 # forward for python2,3 compat
748 iteritems = items
779 iteritems = items
749
780
750 def _opendirstatefile(self):
781 def _opendirstatefile(self):
751 fp, mode = txnutil.trypending(
782 fp, mode = txnutil.trypending(
752 self._root, self._opener, self._filename
783 self._root, self._opener, self._filename
753 )
784 )
754 if self._pendingmode is not None and self._pendingmode != mode:
785 if self._pendingmode is not None and self._pendingmode != mode:
755 fp.close()
786 fp.close()
756 raise error.Abort(
787 raise error.Abort(
757 _(b'working directory state may be changed parallelly')
788 _(b'working directory state may be changed parallelly')
758 )
789 )
759 self._pendingmode = mode
790 self._pendingmode = mode
760 return fp
791 return fp
761
792
762 def _readdirstatefile(self, size=-1):
793 def _readdirstatefile(self, size=-1):
763 try:
794 try:
764 with self._opendirstatefile() as fp:
795 with self._opendirstatefile() as fp:
765 return fp.read(size)
796 return fp.read(size)
766 except IOError as err:
797 except IOError as err:
767 if err.errno != errno.ENOENT:
798 if err.errno != errno.ENOENT:
768 raise
799 raise
769 # File doesn't exist, so the current state is empty
800 # File doesn't exist, so the current state is empty
770 return b''
801 return b''
771
802
772 def setparents(self, p1, p2):
803 def setparents(self, p1, p2, fold_p2=False):
773 self._parents = (p1, p2)
804 self._parents = (p1, p2)
774 self._dirtyparents = True
805 self._dirtyparents = True
806 copies = {}
807 if fold_p2:
808 candidatefiles = self.non_normal_or_other_parent_paths()
809
810 for f in candidatefiles:
811 s = self.get(f)
812 if s is None:
813 continue
814
815 # Discard "merged" markers when moving away from a merge state
816 if s.merged:
817 source = self.copymap.get(f)
818 if source:
819 copies[f] = source
820 self.reset_state(
821 f,
822 wc_tracked=True,
823 p1_tracked=True,
824 possibly_dirty=True,
825 )
826 # Also fix up otherparent markers
827 elif s.from_p2:
828 source = self.copymap.get(f)
829 if source:
830 copies[f] = source
831 self.reset_state(
832 f,
833 p1_tracked=False,
834 wc_tracked=True,
835 )
836 return copies
775
837
776 def parents(self):
838 def parents(self):
777 if not self._parents:
839 if not self._parents:
778 if self._use_dirstate_v2:
840 if self._use_dirstate_v2:
779 self._parents = self.docket.parents
841 self._parents = self.docket.parents
780 else:
842 else:
781 read_len = self._nodelen * 2
843 read_len = self._nodelen * 2
782 st = self._readdirstatefile(read_len)
844 st = self._readdirstatefile(read_len)
783 l = len(st)
845 l = len(st)
784 if l == read_len:
846 if l == read_len:
785 self._parents = (
847 self._parents = (
786 st[: self._nodelen],
848 st[: self._nodelen],
787 st[self._nodelen : 2 * self._nodelen],
849 st[self._nodelen : 2 * self._nodelen],
788 )
850 )
789 elif l == 0:
851 elif l == 0:
790 self._parents = (
852 self._parents = (
791 self._nodeconstants.nullid,
853 self._nodeconstants.nullid,
792 self._nodeconstants.nullid,
854 self._nodeconstants.nullid,
793 )
855 )
794 else:
856 else:
795 raise error.Abort(
857 raise error.Abort(
796 _(b'working directory state appears damaged!')
858 _(b'working directory state appears damaged!')
797 )
859 )
798
860
799 return self._parents
861 return self._parents
800
862
801 @property
863 @property
802 def docket(self):
864 def docket(self):
803 if not self._docket:
865 if not self._docket:
804 if not self._use_dirstate_v2:
866 if not self._use_dirstate_v2:
805 raise error.ProgrammingError(
867 raise error.ProgrammingError(
806 b'dirstate only has a docket in v2 format'
868 b'dirstate only has a docket in v2 format'
807 )
869 )
808 self._docket = docketmod.DirstateDocket.parse(
870 self._docket = docketmod.DirstateDocket.parse(
809 self._readdirstatefile(), self._nodeconstants
871 self._readdirstatefile(), self._nodeconstants
810 )
872 )
811 return self._docket
873 return self._docket
812
874
813 @propertycache
875 @propertycache
814 def _rustmap(self):
876 def _rustmap(self):
815 """
877 """
816 Fills the Dirstatemap when called.
878 Fills the Dirstatemap when called.
817 """
879 """
818 # ignore HG_PENDING because identity is used only for writing
880 # ignore HG_PENDING because identity is used only for writing
819 self.identity = util.filestat.frompath(
881 self.identity = util.filestat.frompath(
820 self._opener.join(self._filename)
882 self._opener.join(self._filename)
821 )
883 )
822
884
823 if self._use_dirstate_v2:
885 if self._use_dirstate_v2:
824 if self.docket.uuid:
886 if self.docket.uuid:
825 # TODO: use mmap when possible
887 # TODO: use mmap when possible
826 data = self._opener.read(self.docket.data_filename())
888 data = self._opener.read(self.docket.data_filename())
827 else:
889 else:
828 data = b''
890 data = b''
829 self._rustmap = rustmod.DirstateMap.new_v2(
891 self._rustmap = rustmod.DirstateMap.new_v2(
830 data, self.docket.data_size, self.docket.tree_metadata
892 data, self.docket.data_size, self.docket.tree_metadata
831 )
893 )
832 parents = self.docket.parents
894 parents = self.docket.parents
833 else:
895 else:
834 self._rustmap, parents = rustmod.DirstateMap.new_v1(
896 self._rustmap, parents = rustmod.DirstateMap.new_v1(
835 self._use_dirstate_tree, self._readdirstatefile()
897 self._use_dirstate_tree, self._readdirstatefile()
836 )
898 )
837
899
838 if parents and not self._dirtyparents:
900 if parents and not self._dirtyparents:
839 self.setparents(*parents)
901 self.setparents(*parents)
840
902
841 self.__contains__ = self._rustmap.__contains__
903 self.__contains__ = self._rustmap.__contains__
842 self.__getitem__ = self._rustmap.__getitem__
904 self.__getitem__ = self._rustmap.__getitem__
843 self.get = self._rustmap.get
905 self.get = self._rustmap.get
844 return self._rustmap
906 return self._rustmap
845
907
846 def write(self, tr, st, now):
908 def write(self, tr, st, now):
847 if not self._use_dirstate_v2:
909 if not self._use_dirstate_v2:
848 p1, p2 = self.parents()
910 p1, p2 = self.parents()
849 packed = self._rustmap.write_v1(p1, p2, now)
911 packed = self._rustmap.write_v1(p1, p2, now)
850 st.write(packed)
912 st.write(packed)
851 st.close()
913 st.close()
852 self._dirtyparents = False
914 self._dirtyparents = False
853 return
915 return
854
916
855 # We can only append to an existing data file if there is one
917 # We can only append to an existing data file if there is one
856 can_append = self.docket.uuid is not None
918 can_append = self.docket.uuid is not None
857 packed, meta, append = self._rustmap.write_v2(now, can_append)
919 packed, meta, append = self._rustmap.write_v2(now, can_append)
858 if append:
920 if append:
859 docket = self.docket
921 docket = self.docket
860 data_filename = docket.data_filename()
922 data_filename = docket.data_filename()
861 if tr:
923 if tr:
862 tr.add(data_filename, docket.data_size)
924 tr.add(data_filename, docket.data_size)
863 with self._opener(data_filename, b'r+b') as fp:
925 with self._opener(data_filename, b'r+b') as fp:
864 fp.seek(docket.data_size)
926 fp.seek(docket.data_size)
865 assert fp.tell() == docket.data_size
927 assert fp.tell() == docket.data_size
866 written = fp.write(packed)
928 written = fp.write(packed)
867 if written is not None: # py2 may return None
929 if written is not None: # py2 may return None
868 assert written == len(packed), (written, len(packed))
930 assert written == len(packed), (written, len(packed))
869 docket.data_size += len(packed)
931 docket.data_size += len(packed)
870 docket.parents = self.parents()
932 docket.parents = self.parents()
871 docket.tree_metadata = meta
933 docket.tree_metadata = meta
872 st.write(docket.serialize())
934 st.write(docket.serialize())
873 st.close()
935 st.close()
874 else:
936 else:
875 old_docket = self.docket
937 old_docket = self.docket
876 new_docket = docketmod.DirstateDocket.with_new_uuid(
938 new_docket = docketmod.DirstateDocket.with_new_uuid(
877 self.parents(), len(packed), meta
939 self.parents(), len(packed), meta
878 )
940 )
879 data_filename = new_docket.data_filename()
941 data_filename = new_docket.data_filename()
880 if tr:
942 if tr:
881 tr.add(data_filename, 0)
943 tr.add(data_filename, 0)
882 self._opener.write(data_filename, packed)
944 self._opener.write(data_filename, packed)
883 # Write the new docket after the new data file has been
945 # Write the new docket after the new data file has been
884 # written. Because `st` was opened with `atomictemp=True`,
946 # written. Because `st` was opened with `atomictemp=True`,
885 # the actual `.hg/dirstate` file is only affected on close.
947 # the actual `.hg/dirstate` file is only affected on close.
886 st.write(new_docket.serialize())
948 st.write(new_docket.serialize())
887 st.close()
949 st.close()
888 # Remove the old data file after the new docket pointing to
950 # Remove the old data file after the new docket pointing to
889 # the new data file was written.
951 # the new data file was written.
890 if old_docket.uuid:
952 if old_docket.uuid:
891 data_filename = old_docket.data_filename()
953 data_filename = old_docket.data_filename()
892 unlink = lambda _tr=None: self._opener.unlink(data_filename)
954 unlink = lambda _tr=None: self._opener.unlink(data_filename)
893 if tr:
955 if tr:
894 category = b"dirstate-v2-clean-" + old_docket.uuid
956 category = b"dirstate-v2-clean-" + old_docket.uuid
895 tr.addpostclose(category, unlink)
957 tr.addpostclose(category, unlink)
896 else:
958 else:
897 unlink()
959 unlink()
898 self._docket = new_docket
960 self._docket = new_docket
899 # Reload from the newly-written file
961 # Reload from the newly-written file
900 util.clearcachedproperty(self, b"_rustmap")
962 util.clearcachedproperty(self, b"_rustmap")
901 self._dirtyparents = False
963 self._dirtyparents = False
902
964
903 @propertycache
965 @propertycache
904 def filefoldmap(self):
966 def filefoldmap(self):
905 """Returns a dictionary mapping normalized case paths to their
967 """Returns a dictionary mapping normalized case paths to their
906 non-normalized versions.
968 non-normalized versions.
907 """
969 """
908 return self._rustmap.filefoldmapasdict()
970 return self._rustmap.filefoldmapasdict()
909
971
910 def hastrackeddir(self, d):
972 def hastrackeddir(self, d):
911 return self._rustmap.hastrackeddir(d)
973 return self._rustmap.hastrackeddir(d)
912
974
913 def hasdir(self, d):
975 def hasdir(self, d):
914 return self._rustmap.hasdir(d)
976 return self._rustmap.hasdir(d)
915
977
916 @propertycache
978 @propertycache
917 def identity(self):
979 def identity(self):
918 self._rustmap
980 self._rustmap
919 return self.identity
981 return self.identity
920
982
921 @property
983 @property
922 def nonnormalset(self):
984 def nonnormalset(self):
923 nonnorm = self._rustmap.non_normal_entries()
985 nonnorm = self._rustmap.non_normal_entries()
924 return nonnorm
986 return nonnorm
925
987
926 @propertycache
988 @propertycache
927 def otherparentset(self):
989 def otherparentset(self):
928 otherparents = self._rustmap.other_parent_entries()
990 otherparents = self._rustmap.other_parent_entries()
929 return otherparents
991 return otherparents
930
992
931 def non_normal_or_other_parent_paths(self):
993 def non_normal_or_other_parent_paths(self):
932 return self._rustmap.non_normal_or_other_parent_paths()
994 return self._rustmap.non_normal_or_other_parent_paths()
933
995
934 @propertycache
996 @propertycache
935 def dirfoldmap(self):
997 def dirfoldmap(self):
936 f = {}
998 f = {}
937 normcase = util.normcase
999 normcase = util.normcase
938 for name in self._rustmap.tracked_dirs():
1000 for name in self._rustmap.tracked_dirs():
939 f[normcase(name)] = name
1001 f[normcase(name)] = name
940 return f
1002 return f
941
1003
942 def set_possibly_dirty(self, filename):
1004 def set_possibly_dirty(self, filename):
943 """record that the current state of the file on disk is unknown"""
1005 """record that the current state of the file on disk is unknown"""
944 entry = self[filename]
1006 entry = self[filename]
945 entry.set_possibly_dirty()
1007 entry.set_possibly_dirty()
946 self._rustmap.set_dirstate_item(filename, entry)
1008 self._rustmap.set_dirstate_item(filename, entry)
947
1009
948 def set_clean(self, filename, mode, size, mtime):
1010 def set_clean(self, filename, mode, size, mtime):
949 """mark a file as back to a clean state"""
1011 """mark a file as back to a clean state"""
950 entry = self[filename]
1012 entry = self[filename]
951 mtime = mtime & rangemask
1013 mtime = mtime & rangemask
952 size = size & rangemask
1014 size = size & rangemask
953 entry.set_clean(mode, size, mtime)
1015 entry.set_clean(mode, size, mtime)
954 self._rustmap.set_dirstate_item(filename, entry)
1016 self._rustmap.set_dirstate_item(filename, entry)
955 self._rustmap.copymap().pop(filename, None)
1017 self._rustmap.copymap().pop(filename, None)
956
1018
957 def __setitem__(self, key, value):
1019 def __setitem__(self, key, value):
958 assert isinstance(value, DirstateItem)
1020 assert isinstance(value, DirstateItem)
959 self._rustmap.set_dirstate_item(key, value)
1021 self._rustmap.set_dirstate_item(key, value)
General Comments 0
You need to be logged in to leave comments. Login now