##// END OF EJS Templates
dirstate: add a `set_possibly_dirty` method...
marmoute -
r48520:b0314d8d default
parent child Browse files
Show More
@@ -1,1703 +1,1710 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = parsers.DirstateItem
48 DirstateItem = parsers.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._updatedfiles = set()
133 self._updatedfiles = set()
134 self._mapcls = dirstatemap.dirstatemap
134 self._mapcls = dirstatemap.dirstatemap
135 # Access and cache cwd early, so we don't access it for the first time
135 # Access and cache cwd early, so we don't access it for the first time
136 # after a working-copy update caused it to not exist (accessing it then
136 # after a working-copy update caused it to not exist (accessing it then
137 # raises an exception).
137 # raises an exception).
138 self._cwd
138 self._cwd
139
139
140 def prefetch_parents(self):
140 def prefetch_parents(self):
141 """make sure the parents are loaded
141 """make sure the parents are loaded
142
142
143 Used to avoid a race condition.
143 Used to avoid a race condition.
144 """
144 """
145 self._pl
145 self._pl
146
146
147 @contextlib.contextmanager
147 @contextlib.contextmanager
148 def parentchange(self):
148 def parentchange(self):
149 """Context manager for handling dirstate parents.
149 """Context manager for handling dirstate parents.
150
150
151 If an exception occurs in the scope of the context manager,
151 If an exception occurs in the scope of the context manager,
152 the incoherent dirstate won't be written when wlock is
152 the incoherent dirstate won't be written when wlock is
153 released.
153 released.
154 """
154 """
155 self._parentwriters += 1
155 self._parentwriters += 1
156 yield
156 yield
157 # Typically we want the "undo" step of a context manager in a
157 # Typically we want the "undo" step of a context manager in a
158 # finally block so it happens even when an exception
158 # finally block so it happens even when an exception
159 # occurs. In this case, however, we only want to decrement
159 # occurs. In this case, however, we only want to decrement
160 # parentwriters if the code in the with statement exits
160 # parentwriters if the code in the with statement exits
161 # normally, so we don't have a try/finally here on purpose.
161 # normally, so we don't have a try/finally here on purpose.
162 self._parentwriters -= 1
162 self._parentwriters -= 1
163
163
164 def pendingparentchange(self):
164 def pendingparentchange(self):
165 """Returns true if the dirstate is in the middle of a set of changes
165 """Returns true if the dirstate is in the middle of a set of changes
166 that modify the dirstate parent.
166 that modify the dirstate parent.
167 """
167 """
168 return self._parentwriters > 0
168 return self._parentwriters > 0
169
169
170 @propertycache
170 @propertycache
171 def _map(self):
171 def _map(self):
172 """Return the dirstate contents (see documentation for dirstatemap)."""
172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 self._map = self._mapcls(
173 self._map = self._mapcls(
174 self._ui,
174 self._ui,
175 self._opener,
175 self._opener,
176 self._root,
176 self._root,
177 self._nodeconstants,
177 self._nodeconstants,
178 self._use_dirstate_v2,
178 self._use_dirstate_v2,
179 )
179 )
180 return self._map
180 return self._map
181
181
182 @property
182 @property
183 def _sparsematcher(self):
183 def _sparsematcher(self):
184 """The matcher for the sparse checkout.
184 """The matcher for the sparse checkout.
185
185
186 The working directory may not include every file from a manifest. The
186 The working directory may not include every file from a manifest. The
187 matcher obtained by this property will match a path if it is to be
187 matcher obtained by this property will match a path if it is to be
188 included in the working directory.
188 included in the working directory.
189 """
189 """
190 # TODO there is potential to cache this property. For now, the matcher
190 # TODO there is potential to cache this property. For now, the matcher
191 # is resolved on every access. (But the called function does use a
191 # is resolved on every access. (But the called function does use a
192 # cache to keep the lookup fast.)
192 # cache to keep the lookup fast.)
193 return self._sparsematchfn()
193 return self._sparsematchfn()
194
194
195 @repocache(b'branch')
195 @repocache(b'branch')
196 def _branch(self):
196 def _branch(self):
197 try:
197 try:
198 return self._opener.read(b"branch").strip() or b"default"
198 return self._opener.read(b"branch").strip() or b"default"
199 except IOError as inst:
199 except IOError as inst:
200 if inst.errno != errno.ENOENT:
200 if inst.errno != errno.ENOENT:
201 raise
201 raise
202 return b"default"
202 return b"default"
203
203
204 @property
204 @property
205 def _pl(self):
205 def _pl(self):
206 return self._map.parents()
206 return self._map.parents()
207
207
208 def hasdir(self, d):
208 def hasdir(self, d):
209 return self._map.hastrackeddir(d)
209 return self._map.hastrackeddir(d)
210
210
211 @rootcache(b'.hgignore')
211 @rootcache(b'.hgignore')
212 def _ignore(self):
212 def _ignore(self):
213 files = self._ignorefiles()
213 files = self._ignorefiles()
214 if not files:
214 if not files:
215 return matchmod.never()
215 return matchmod.never()
216
216
217 pats = [b'include:%s' % f for f in files]
217 pats = [b'include:%s' % f for f in files]
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219
219
220 @propertycache
220 @propertycache
221 def _slash(self):
221 def _slash(self):
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223
223
224 @propertycache
224 @propertycache
225 def _checklink(self):
225 def _checklink(self):
226 return util.checklink(self._root)
226 return util.checklink(self._root)
227
227
228 @propertycache
228 @propertycache
229 def _checkexec(self):
229 def _checkexec(self):
230 return bool(util.checkexec(self._root))
230 return bool(util.checkexec(self._root))
231
231
232 @propertycache
232 @propertycache
233 def _checkcase(self):
233 def _checkcase(self):
234 return not util.fscasesensitive(self._join(b'.hg'))
234 return not util.fscasesensitive(self._join(b'.hg'))
235
235
236 def _join(self, f):
236 def _join(self, f):
237 # much faster than os.path.join()
237 # much faster than os.path.join()
238 # it's safe because f is always a relative path
238 # it's safe because f is always a relative path
239 return self._rootdir + f
239 return self._rootdir + f
240
240
241 def flagfunc(self, buildfallback):
241 def flagfunc(self, buildfallback):
242 if self._checklink and self._checkexec:
242 if self._checklink and self._checkexec:
243
243
244 def f(x):
244 def f(x):
245 try:
245 try:
246 st = os.lstat(self._join(x))
246 st = os.lstat(self._join(x))
247 if util.statislink(st):
247 if util.statislink(st):
248 return b'l'
248 return b'l'
249 if util.statisexec(st):
249 if util.statisexec(st):
250 return b'x'
250 return b'x'
251 except OSError:
251 except OSError:
252 pass
252 pass
253 return b''
253 return b''
254
254
255 return f
255 return f
256
256
257 fallback = buildfallback()
257 fallback = buildfallback()
258 if self._checklink:
258 if self._checklink:
259
259
260 def f(x):
260 def f(x):
261 if os.path.islink(self._join(x)):
261 if os.path.islink(self._join(x)):
262 return b'l'
262 return b'l'
263 if b'x' in fallback(x):
263 if b'x' in fallback(x):
264 return b'x'
264 return b'x'
265 return b''
265 return b''
266
266
267 return f
267 return f
268 if self._checkexec:
268 if self._checkexec:
269
269
270 def f(x):
270 def f(x):
271 if b'l' in fallback(x):
271 if b'l' in fallback(x):
272 return b'l'
272 return b'l'
273 if util.isexec(self._join(x)):
273 if util.isexec(self._join(x)):
274 return b'x'
274 return b'x'
275 return b''
275 return b''
276
276
277 return f
277 return f
278 else:
278 else:
279 return fallback
279 return fallback
280
280
281 @propertycache
281 @propertycache
282 def _cwd(self):
282 def _cwd(self):
283 # internal config: ui.forcecwd
283 # internal config: ui.forcecwd
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 if forcecwd:
285 if forcecwd:
286 return forcecwd
286 return forcecwd
287 return encoding.getcwd()
287 return encoding.getcwd()
288
288
289 def getcwd(self):
289 def getcwd(self):
290 """Return the path from which a canonical path is calculated.
290 """Return the path from which a canonical path is calculated.
291
291
292 This path should be used to resolve file patterns or to convert
292 This path should be used to resolve file patterns or to convert
293 canonical paths back to file paths for display. It shouldn't be
293 canonical paths back to file paths for display. It shouldn't be
294 used to get real file paths. Use vfs functions instead.
294 used to get real file paths. Use vfs functions instead.
295 """
295 """
296 cwd = self._cwd
296 cwd = self._cwd
297 if cwd == self._root:
297 if cwd == self._root:
298 return b''
298 return b''
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 rootsep = self._root
300 rootsep = self._root
301 if not util.endswithsep(rootsep):
301 if not util.endswithsep(rootsep):
302 rootsep += pycompat.ossep
302 rootsep += pycompat.ossep
303 if cwd.startswith(rootsep):
303 if cwd.startswith(rootsep):
304 return cwd[len(rootsep) :]
304 return cwd[len(rootsep) :]
305 else:
305 else:
306 # we're outside the repo. return an absolute path.
306 # we're outside the repo. return an absolute path.
307 return cwd
307 return cwd
308
308
309 def pathto(self, f, cwd=None):
309 def pathto(self, f, cwd=None):
310 if cwd is None:
310 if cwd is None:
311 cwd = self.getcwd()
311 cwd = self.getcwd()
312 path = util.pathto(self._root, cwd, f)
312 path = util.pathto(self._root, cwd, f)
313 if self._slash:
313 if self._slash:
314 return util.pconvert(path)
314 return util.pconvert(path)
315 return path
315 return path
316
316
317 def __getitem__(self, key):
317 def __getitem__(self, key):
318 """Return the current state of key (a filename) in the dirstate.
318 """Return the current state of key (a filename) in the dirstate.
319
319
320 States are:
320 States are:
321 n normal
321 n normal
322 m needs merging
322 m needs merging
323 r marked for removal
323 r marked for removal
324 a marked for addition
324 a marked for addition
325 ? not tracked
325 ? not tracked
326
326
327 XXX The "state" is a bit obscure to be in the "public" API. we should
327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 consider migrating all user of this to going through the dirstate entry
328 consider migrating all user of this to going through the dirstate entry
329 instead.
329 instead.
330 """
330 """
331 entry = self._map.get(key)
331 entry = self._map.get(key)
332 if entry is not None:
332 if entry is not None:
333 return entry.state
333 return entry.state
334 return b'?'
334 return b'?'
335
335
336 def __contains__(self, key):
336 def __contains__(self, key):
337 return key in self._map
337 return key in self._map
338
338
339 def __iter__(self):
339 def __iter__(self):
340 return iter(sorted(self._map))
340 return iter(sorted(self._map))
341
341
342 def items(self):
342 def items(self):
343 return pycompat.iteritems(self._map)
343 return pycompat.iteritems(self._map)
344
344
345 iteritems = items
345 iteritems = items
346
346
347 def directories(self):
347 def directories(self):
348 return self._map.directories()
348 return self._map.directories()
349
349
350 def parents(self):
350 def parents(self):
351 return [self._validate(p) for p in self._pl]
351 return [self._validate(p) for p in self._pl]
352
352
353 def p1(self):
353 def p1(self):
354 return self._validate(self._pl[0])
354 return self._validate(self._pl[0])
355
355
356 def p2(self):
356 def p2(self):
357 return self._validate(self._pl[1])
357 return self._validate(self._pl[1])
358
358
359 @property
359 @property
360 def in_merge(self):
360 def in_merge(self):
361 """True if a merge is in progress"""
361 """True if a merge is in progress"""
362 return self._pl[1] != self._nodeconstants.nullid
362 return self._pl[1] != self._nodeconstants.nullid
363
363
364 def branch(self):
364 def branch(self):
365 return encoding.tolocal(self._branch)
365 return encoding.tolocal(self._branch)
366
366
367 def setparents(self, p1, p2=None):
367 def setparents(self, p1, p2=None):
368 """Set dirstate parents to p1 and p2.
368 """Set dirstate parents to p1 and p2.
369
369
370 When moving from two parents to one, "merged" entries a
370 When moving from two parents to one, "merged" entries a
371 adjusted to normal and previous copy records discarded and
371 adjusted to normal and previous copy records discarded and
372 returned by the call.
372 returned by the call.
373
373
374 See localrepo.setparents()
374 See localrepo.setparents()
375 """
375 """
376 if p2 is None:
376 if p2 is None:
377 p2 = self._nodeconstants.nullid
377 p2 = self._nodeconstants.nullid
378 if self._parentwriters == 0:
378 if self._parentwriters == 0:
379 raise ValueError(
379 raise ValueError(
380 b"cannot set dirstate parent outside of "
380 b"cannot set dirstate parent outside of "
381 b"dirstate.parentchange context manager"
381 b"dirstate.parentchange context manager"
382 )
382 )
383
383
384 self._dirty = True
384 self._dirty = True
385 oldp2 = self._pl[1]
385 oldp2 = self._pl[1]
386 if self._origpl is None:
386 if self._origpl is None:
387 self._origpl = self._pl
387 self._origpl = self._pl
388 self._map.setparents(p1, p2)
388 self._map.setparents(p1, p2)
389 copies = {}
389 copies = {}
390 if (
390 if (
391 oldp2 != self._nodeconstants.nullid
391 oldp2 != self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
393 ):
393 ):
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395
395
396 for f in candidatefiles:
396 for f in candidatefiles:
397 s = self._map.get(f)
397 s = self._map.get(f)
398 if s is None:
398 if s is None:
399 continue
399 continue
400
400
401 # Discard "merged" markers when moving away from a merge state
401 # Discard "merged" markers when moving away from a merge state
402 if s.merged:
402 if s.merged:
403 source = self._map.copymap.get(f)
403 source = self._map.copymap.get(f)
404 if source:
404 if source:
405 copies[f] = source
405 copies[f] = source
406 self.normallookup(f)
406 self.normallookup(f)
407 # Also fix up otherparent markers
407 # Also fix up otherparent markers
408 elif s.from_p2:
408 elif s.from_p2:
409 source = self._map.copymap.get(f)
409 source = self._map.copymap.get(f)
410 if source:
410 if source:
411 copies[f] = source
411 copies[f] = source
412 self._add(f)
412 self._add(f)
413 return copies
413 return copies
414
414
415 def setbranch(self, branch):
415 def setbranch(self, branch):
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 try:
418 try:
419 f.write(self._branch + b'\n')
419 f.write(self._branch + b'\n')
420 f.close()
420 f.close()
421
421
422 # make sure filecache has the correct stat info for _branch after
422 # make sure filecache has the correct stat info for _branch after
423 # replacing the underlying file
423 # replacing the underlying file
424 ce = self._filecache[b'_branch']
424 ce = self._filecache[b'_branch']
425 if ce:
425 if ce:
426 ce.refresh()
426 ce.refresh()
427 except: # re-raises
427 except: # re-raises
428 f.discard()
428 f.discard()
429 raise
429 raise
430
430
431 def invalidate(self):
431 def invalidate(self):
432 """Causes the next access to reread the dirstate.
432 """Causes the next access to reread the dirstate.
433
433
434 This is different from localrepo.invalidatedirstate() because it always
434 This is different from localrepo.invalidatedirstate() because it always
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 check whether the dirstate has changed before rereading it."""
436 check whether the dirstate has changed before rereading it."""
437
437
438 for a in ("_map", "_branch", "_ignore"):
438 for a in ("_map", "_branch", "_ignore"):
439 if a in self.__dict__:
439 if a in self.__dict__:
440 delattr(self, a)
440 delattr(self, a)
441 self._lastnormaltime = 0
441 self._lastnormaltime = 0
442 self._dirty = False
442 self._dirty = False
443 self._updatedfiles.clear()
443 self._updatedfiles.clear()
444 self._parentwriters = 0
444 self._parentwriters = 0
445 self._origpl = None
445 self._origpl = None
446
446
447 def copy(self, source, dest):
447 def copy(self, source, dest):
448 """Mark dest as a copy of source. Unmark dest if source is None."""
448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 if source == dest:
449 if source == dest:
450 return
450 return
451 self._dirty = True
451 self._dirty = True
452 if source is not None:
452 if source is not None:
453 self._map.copymap[dest] = source
453 self._map.copymap[dest] = source
454 self._updatedfiles.add(source)
454 self._updatedfiles.add(source)
455 self._updatedfiles.add(dest)
455 self._updatedfiles.add(dest)
456 elif self._map.copymap.pop(dest, None):
456 elif self._map.copymap.pop(dest, None):
457 self._updatedfiles.add(dest)
457 self._updatedfiles.add(dest)
458
458
459 def copied(self, file):
459 def copied(self, file):
460 return self._map.copymap.get(file, None)
460 return self._map.copymap.get(file, None)
461
461
462 def copies(self):
462 def copies(self):
463 return self._map.copymap
463 return self._map.copymap
464
464
465 @requires_no_parents_change
465 @requires_no_parents_change
466 def set_tracked(self, filename):
466 def set_tracked(self, filename):
467 """a "public" method for generic code to mark a file as tracked
467 """a "public" method for generic code to mark a file as tracked
468
468
469 This function is to be called outside of "update/merge" case. For
469 This function is to be called outside of "update/merge" case. For
470 example by a command like `hg add X`.
470 example by a command like `hg add X`.
471
471
472 return True the file was previously untracked, False otherwise.
472 return True the file was previously untracked, False otherwise.
473 """
473 """
474 entry = self._map.get(filename)
474 entry = self._map.get(filename)
475 if entry is None:
475 if entry is None:
476 self._add(filename)
476 self._add(filename)
477 return True
477 return True
478 elif not entry.tracked:
478 elif not entry.tracked:
479 self.normallookup(filename)
479 self.normallookup(filename)
480 return True
480 return True
481 return False
481 return False
482
482
483 @requires_no_parents_change
483 @requires_no_parents_change
484 def set_untracked(self, filename):
484 def set_untracked(self, filename):
485 """a "public" method for generic code to mark a file as untracked
485 """a "public" method for generic code to mark a file as untracked
486
486
487 This function is to be called outside of "update/merge" case. For
487 This function is to be called outside of "update/merge" case. For
488 example by a command like `hg remove X`.
488 example by a command like `hg remove X`.
489
489
490 return True the file was previously tracked, False otherwise.
490 return True the file was previously tracked, False otherwise.
491 """
491 """
492 entry = self._map.get(filename)
492 entry = self._map.get(filename)
493 if entry is None:
493 if entry is None:
494 return False
494 return False
495 elif entry.added:
495 elif entry.added:
496 self._drop(filename)
496 self._drop(filename)
497 return True
497 return True
498 else:
498 else:
499 self._remove(filename)
499 self._remove(filename)
500 return True
500 return True
501
501
502 @requires_no_parents_change
502 @requires_no_parents_change
503 def set_clean(self, filename, parentfiledata=None):
503 def set_clean(self, filename, parentfiledata=None):
504 """record that the current state of the file on disk is known to be clean"""
504 """record that the current state of the file on disk is known to be clean"""
505 self._dirty = True
505 self._dirty = True
506 self._updatedfiles.add(filename)
506 self._updatedfiles.add(filename)
507 self._normal(filename, parentfiledata=parentfiledata)
507 self._normal(filename, parentfiledata=parentfiledata)
508
508
509 @requires_no_parents_change
510 def set_possibly_dirty(self, filename):
511 """record that the current state of the file on disk is unknown"""
512 self._dirty = True
513 self._updatedfiles.add(filename)
514 self._map.set_possibly_dirty(filename)
515
509 @requires_parents_change
516 @requires_parents_change
510 def update_file_p1(
517 def update_file_p1(
511 self,
518 self,
512 filename,
519 filename,
513 p1_tracked,
520 p1_tracked,
514 ):
521 ):
515 """Set a file as tracked in the parent (or not)
522 """Set a file as tracked in the parent (or not)
516
523
517 This is to be called when adjust the dirstate to a new parent after an history
524 This is to be called when adjust the dirstate to a new parent after an history
518 rewriting operation.
525 rewriting operation.
519
526
520 It should not be called during a merge (p2 != nullid) and only within
527 It should not be called during a merge (p2 != nullid) and only within
521 a `with dirstate.parentchange():` context.
528 a `with dirstate.parentchange():` context.
522 """
529 """
523 if self.in_merge:
530 if self.in_merge:
524 msg = b'update_file_reference should not be called when merging'
531 msg = b'update_file_reference should not be called when merging'
525 raise error.ProgrammingError(msg)
532 raise error.ProgrammingError(msg)
526 entry = self._map.get(filename)
533 entry = self._map.get(filename)
527 if entry is None:
534 if entry is None:
528 wc_tracked = False
535 wc_tracked = False
529 else:
536 else:
530 wc_tracked = entry.tracked
537 wc_tracked = entry.tracked
531 possibly_dirty = False
538 possibly_dirty = False
532 if p1_tracked and wc_tracked:
539 if p1_tracked and wc_tracked:
533 # the underlying reference might have changed, we will have to
540 # the underlying reference might have changed, we will have to
534 # check it.
541 # check it.
535 possibly_dirty = True
542 possibly_dirty = True
536 elif not (p1_tracked or wc_tracked):
543 elif not (p1_tracked or wc_tracked):
537 # the file is no longer relevant to anyone
544 # the file is no longer relevant to anyone
538 self._drop(filename)
545 self._drop(filename)
539 elif (not p1_tracked) and wc_tracked:
546 elif (not p1_tracked) and wc_tracked:
540 if entry is not None and entry.added:
547 if entry is not None and entry.added:
541 return # avoid dropping copy information (maybe?)
548 return # avoid dropping copy information (maybe?)
542 elif p1_tracked and not wc_tracked:
549 elif p1_tracked and not wc_tracked:
543 pass
550 pass
544 else:
551 else:
545 assert False, 'unreachable'
552 assert False, 'unreachable'
546
553
547 # this mean we are doing call for file we do not really care about the
554 # this mean we are doing call for file we do not really care about the
548 # data (eg: added or removed), however this should be a minor overhead
555 # data (eg: added or removed), however this should be a minor overhead
549 # compared to the overall update process calling this.
556 # compared to the overall update process calling this.
550 parentfiledata = None
557 parentfiledata = None
551 if wc_tracked:
558 if wc_tracked:
552 parentfiledata = self._get_filedata(filename)
559 parentfiledata = self._get_filedata(filename)
553
560
554 self._updatedfiles.add(filename)
561 self._updatedfiles.add(filename)
555 self._map.reset_state(
562 self._map.reset_state(
556 filename,
563 filename,
557 wc_tracked,
564 wc_tracked,
558 p1_tracked,
565 p1_tracked,
559 possibly_dirty=possibly_dirty,
566 possibly_dirty=possibly_dirty,
560 parentfiledata=parentfiledata,
567 parentfiledata=parentfiledata,
561 )
568 )
562 if (
569 if (
563 parentfiledata is not None
570 parentfiledata is not None
564 and parentfiledata[2] > self._lastnormaltime
571 and parentfiledata[2] > self._lastnormaltime
565 ):
572 ):
566 # Remember the most recent modification timeslot for status(),
573 # Remember the most recent modification timeslot for status(),
567 # to make sure we won't miss future size-preserving file content
574 # to make sure we won't miss future size-preserving file content
568 # modifications that happen within the same timeslot.
575 # modifications that happen within the same timeslot.
569 self._lastnormaltime = parentfiledata[2]
576 self._lastnormaltime = parentfiledata[2]
570
577
571 @requires_parents_change
578 @requires_parents_change
572 def update_file(
579 def update_file(
573 self,
580 self,
574 filename,
581 filename,
575 wc_tracked,
582 wc_tracked,
576 p1_tracked,
583 p1_tracked,
577 p2_tracked=False,
584 p2_tracked=False,
578 merged=False,
585 merged=False,
579 clean_p1=False,
586 clean_p1=False,
580 clean_p2=False,
587 clean_p2=False,
581 possibly_dirty=False,
588 possibly_dirty=False,
582 parentfiledata=None,
589 parentfiledata=None,
583 ):
590 ):
584 """update the information about a file in the dirstate
591 """update the information about a file in the dirstate
585
592
586 This is to be called when the direstates parent changes to keep track
593 This is to be called when the direstates parent changes to keep track
587 of what is the file situation in regards to the working copy and its parent.
594 of what is the file situation in regards to the working copy and its parent.
588
595
589 This function must be called within a `dirstate.parentchange` context.
596 This function must be called within a `dirstate.parentchange` context.
590
597
591 note: the API is at an early stage and we might need to ajust it
598 note: the API is at an early stage and we might need to ajust it
592 depending of what information ends up being relevant and useful to
599 depending of what information ends up being relevant and useful to
593 other processing.
600 other processing.
594 """
601 """
595 if merged and (clean_p1 or clean_p2):
602 if merged and (clean_p1 or clean_p2):
596 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
603 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
597 raise error.ProgrammingError(msg)
604 raise error.ProgrammingError(msg)
598
605
599 # note: I do not think we need to double check name clash here since we
606 # note: I do not think we need to double check name clash here since we
600 # are in a update/merge case that should already have taken care of
607 # are in a update/merge case that should already have taken care of
601 # this. The test agrees
608 # this. The test agrees
602
609
603 self._dirty = True
610 self._dirty = True
604 self._updatedfiles.add(filename)
611 self._updatedfiles.add(filename)
605
612
606 need_parent_file_data = (
613 need_parent_file_data = (
607 not (possibly_dirty or clean_p2 or merged)
614 not (possibly_dirty or clean_p2 or merged)
608 and wc_tracked
615 and wc_tracked
609 and p1_tracked
616 and p1_tracked
610 )
617 )
611
618
612 # this mean we are doing call for file we do not really care about the
619 # this mean we are doing call for file we do not really care about the
613 # data (eg: added or removed), however this should be a minor overhead
620 # data (eg: added or removed), however this should be a minor overhead
614 # compared to the overall update process calling this.
621 # compared to the overall update process calling this.
615 if need_parent_file_data:
622 if need_parent_file_data:
616 if parentfiledata is None:
623 if parentfiledata is None:
617 parentfiledata = self._get_filedata(filename)
624 parentfiledata = self._get_filedata(filename)
618 mtime = parentfiledata[2]
625 mtime = parentfiledata[2]
619
626
620 if mtime > self._lastnormaltime:
627 if mtime > self._lastnormaltime:
621 # Remember the most recent modification timeslot for
628 # Remember the most recent modification timeslot for
622 # status(), to make sure we won't miss future
629 # status(), to make sure we won't miss future
623 # size-preserving file content modifications that happen
630 # size-preserving file content modifications that happen
624 # within the same timeslot.
631 # within the same timeslot.
625 self._lastnormaltime = mtime
632 self._lastnormaltime = mtime
626
633
627 self._map.reset_state(
634 self._map.reset_state(
628 filename,
635 filename,
629 wc_tracked,
636 wc_tracked,
630 p1_tracked,
637 p1_tracked,
631 p2_tracked=p2_tracked,
638 p2_tracked=p2_tracked,
632 merged=merged,
639 merged=merged,
633 clean_p1=clean_p1,
640 clean_p1=clean_p1,
634 clean_p2=clean_p2,
641 clean_p2=clean_p2,
635 possibly_dirty=possibly_dirty,
642 possibly_dirty=possibly_dirty,
636 parentfiledata=parentfiledata,
643 parentfiledata=parentfiledata,
637 )
644 )
638 if (
645 if (
639 parentfiledata is not None
646 parentfiledata is not None
640 and parentfiledata[2] > self._lastnormaltime
647 and parentfiledata[2] > self._lastnormaltime
641 ):
648 ):
642 # Remember the most recent modification timeslot for status(),
649 # Remember the most recent modification timeslot for status(),
643 # to make sure we won't miss future size-preserving file content
650 # to make sure we won't miss future size-preserving file content
644 # modifications that happen within the same timeslot.
651 # modifications that happen within the same timeslot.
645 self._lastnormaltime = parentfiledata[2]
652 self._lastnormaltime = parentfiledata[2]
646
653
647 def _addpath(
654 def _addpath(
648 self,
655 self,
649 f,
656 f,
650 mode=0,
657 mode=0,
651 size=None,
658 size=None,
652 mtime=None,
659 mtime=None,
653 added=False,
660 added=False,
654 merged=False,
661 merged=False,
655 from_p2=False,
662 from_p2=False,
656 possibly_dirty=False,
663 possibly_dirty=False,
657 ):
664 ):
658 entry = self._map.get(f)
665 entry = self._map.get(f)
659 if added or entry is not None and entry.removed:
666 if added or entry is not None and entry.removed:
660 scmutil.checkfilename(f)
667 scmutil.checkfilename(f)
661 if self._map.hastrackeddir(f):
668 if self._map.hastrackeddir(f):
662 msg = _(b'directory %r already in dirstate')
669 msg = _(b'directory %r already in dirstate')
663 msg %= pycompat.bytestr(f)
670 msg %= pycompat.bytestr(f)
664 raise error.Abort(msg)
671 raise error.Abort(msg)
665 # shadows
672 # shadows
666 for d in pathutil.finddirs(f):
673 for d in pathutil.finddirs(f):
667 if self._map.hastrackeddir(d):
674 if self._map.hastrackeddir(d):
668 break
675 break
669 entry = self._map.get(d)
676 entry = self._map.get(d)
670 if entry is not None and not entry.removed:
677 if entry is not None and not entry.removed:
671 msg = _(b'file %r in dirstate clashes with %r')
678 msg = _(b'file %r in dirstate clashes with %r')
672 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
679 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
673 raise error.Abort(msg)
680 raise error.Abort(msg)
674 self._dirty = True
681 self._dirty = True
675 self._updatedfiles.add(f)
682 self._updatedfiles.add(f)
676 self._map.addfile(
683 self._map.addfile(
677 f,
684 f,
678 mode=mode,
685 mode=mode,
679 size=size,
686 size=size,
680 mtime=mtime,
687 mtime=mtime,
681 added=added,
688 added=added,
682 merged=merged,
689 merged=merged,
683 from_p2=from_p2,
690 from_p2=from_p2,
684 possibly_dirty=possibly_dirty,
691 possibly_dirty=possibly_dirty,
685 )
692 )
686
693
687 def _get_filedata(self, filename):
694 def _get_filedata(self, filename):
688 """returns"""
695 """returns"""
689 s = os.lstat(self._join(filename))
696 s = os.lstat(self._join(filename))
690 mode = s.st_mode
697 mode = s.st_mode
691 size = s.st_size
698 size = s.st_size
692 mtime = s[stat.ST_MTIME]
699 mtime = s[stat.ST_MTIME]
693 return (mode, size, mtime)
700 return (mode, size, mtime)
694
701
695 def normal(self, f, parentfiledata=None):
702 def normal(self, f, parentfiledata=None):
696 """Mark a file normal and clean.
703 """Mark a file normal and clean.
697
704
698 parentfiledata: (mode, size, mtime) of the clean file
705 parentfiledata: (mode, size, mtime) of the clean file
699
706
700 parentfiledata should be computed from memory (for mode,
707 parentfiledata should be computed from memory (for mode,
701 size), as or close as possible from the point where we
708 size), as or close as possible from the point where we
702 determined the file was clean, to limit the risk of the
709 determined the file was clean, to limit the risk of the
703 file having been changed by an external process between the
710 file having been changed by an external process between the
704 moment where the file was determined to be clean and now."""
711 moment where the file was determined to be clean and now."""
705 if self.pendingparentchange():
712 if self.pendingparentchange():
706 util.nouideprecwarn(
713 util.nouideprecwarn(
707 b"do not use `normal` inside of update/merge context."
714 b"do not use `normal` inside of update/merge context."
708 b" Use `update_file` or `update_file_p1`",
715 b" Use `update_file` or `update_file_p1`",
709 b'6.0',
716 b'6.0',
710 stacklevel=2,
717 stacklevel=2,
711 )
718 )
712 else:
719 else:
713 util.nouideprecwarn(
720 util.nouideprecwarn(
714 b"do not use `normal` outside of update/merge context."
721 b"do not use `normal` outside of update/merge context."
715 b" Use `set_tracked`",
722 b" Use `set_tracked`",
716 b'6.0',
723 b'6.0',
717 stacklevel=2,
724 stacklevel=2,
718 )
725 )
719 self._normal(f, parentfiledata=parentfiledata)
726 self._normal(f, parentfiledata=parentfiledata)
720
727
721 def _normal(self, f, parentfiledata=None):
728 def _normal(self, f, parentfiledata=None):
722 if parentfiledata:
729 if parentfiledata:
723 (mode, size, mtime) = parentfiledata
730 (mode, size, mtime) = parentfiledata
724 else:
731 else:
725 (mode, size, mtime) = self._get_filedata(f)
732 (mode, size, mtime) = self._get_filedata(f)
726 self._addpath(f, mode=mode, size=size, mtime=mtime)
733 self._addpath(f, mode=mode, size=size, mtime=mtime)
727 self._map.copymap.pop(f, None)
734 self._map.copymap.pop(f, None)
728 if f in self._map.nonnormalset:
735 if f in self._map.nonnormalset:
729 self._map.nonnormalset.remove(f)
736 self._map.nonnormalset.remove(f)
730 if mtime > self._lastnormaltime:
737 if mtime > self._lastnormaltime:
731 # Remember the most recent modification timeslot for status(),
738 # Remember the most recent modification timeslot for status(),
732 # to make sure we won't miss future size-preserving file content
739 # to make sure we won't miss future size-preserving file content
733 # modifications that happen within the same timeslot.
740 # modifications that happen within the same timeslot.
734 self._lastnormaltime = mtime
741 self._lastnormaltime = mtime
735
742
736 def normallookup(self, f):
743 def normallookup(self, f):
737 '''Mark a file normal, but possibly dirty.'''
744 '''Mark a file normal, but possibly dirty.'''
738 if self.in_merge:
745 if self.in_merge:
739 # if there is a merge going on and the file was either
746 # if there is a merge going on and the file was either
740 # "merged" or coming from other parent (-2) before
747 # "merged" or coming from other parent (-2) before
741 # being removed, restore that state.
748 # being removed, restore that state.
742 entry = self._map.get(f)
749 entry = self._map.get(f)
743 if entry is not None:
750 if entry is not None:
744 # XXX this should probably be dealt with a a lower level
751 # XXX this should probably be dealt with a a lower level
745 # (see `merged_removed` and `from_p2_removed`)
752 # (see `merged_removed` and `from_p2_removed`)
746 if entry.merged_removed or entry.from_p2_removed:
753 if entry.merged_removed or entry.from_p2_removed:
747 source = self._map.copymap.get(f)
754 source = self._map.copymap.get(f)
748 if entry.merged_removed:
755 if entry.merged_removed:
749 self.merge(f)
756 self.merge(f)
750 elif entry.from_p2_removed:
757 elif entry.from_p2_removed:
751 self.otherparent(f)
758 self.otherparent(f)
752 if source is not None:
759 if source is not None:
753 self.copy(source, f)
760 self.copy(source, f)
754 return
761 return
755 elif entry.merged or entry.from_p2:
762 elif entry.merged or entry.from_p2:
756 return
763 return
757 self._addpath(f, possibly_dirty=True)
764 self._addpath(f, possibly_dirty=True)
758 self._map.copymap.pop(f, None)
765 self._map.copymap.pop(f, None)
759
766
760 def otherparent(self, f):
767 def otherparent(self, f):
761 '''Mark as coming from the other parent, always dirty.'''
768 '''Mark as coming from the other parent, always dirty.'''
762 if not self.in_merge:
769 if not self.in_merge:
763 msg = _(b"setting %r to other parent only allowed in merges") % f
770 msg = _(b"setting %r to other parent only allowed in merges") % f
764 raise error.Abort(msg)
771 raise error.Abort(msg)
765 entry = self._map.get(f)
772 entry = self._map.get(f)
766 if entry is not None and entry.tracked:
773 if entry is not None and entry.tracked:
767 # merge-like
774 # merge-like
768 self._addpath(f, merged=True)
775 self._addpath(f, merged=True)
769 else:
776 else:
770 # add-like
777 # add-like
771 self._addpath(f, from_p2=True)
778 self._addpath(f, from_p2=True)
772 self._map.copymap.pop(f, None)
779 self._map.copymap.pop(f, None)
773
780
774 def add(self, f):
781 def add(self, f):
775 '''Mark a file added.'''
782 '''Mark a file added.'''
776 if not self.pendingparentchange():
783 if not self.pendingparentchange():
777 util.nouideprecwarn(
784 util.nouideprecwarn(
778 b"do not use `add` outside of update/merge context."
785 b"do not use `add` outside of update/merge context."
779 b" Use `set_tracked`",
786 b" Use `set_tracked`",
780 b'6.0',
787 b'6.0',
781 stacklevel=2,
788 stacklevel=2,
782 )
789 )
783 self._add(f)
790 self._add(f)
784
791
785 def _add(self, filename):
792 def _add(self, filename):
786 """internal function to mark a file as added"""
793 """internal function to mark a file as added"""
787 self._addpath(filename, added=True)
794 self._addpath(filename, added=True)
788 self._map.copymap.pop(filename, None)
795 self._map.copymap.pop(filename, None)
789
796
790 def remove(self, f):
797 def remove(self, f):
791 '''Mark a file removed'''
798 '''Mark a file removed'''
792 if self.pendingparentchange():
799 if self.pendingparentchange():
793 util.nouideprecwarn(
800 util.nouideprecwarn(
794 b"do not use `remove` insde of update/merge context."
801 b"do not use `remove` insde of update/merge context."
795 b" Use `update_file` or `update_file_p1`",
802 b" Use `update_file` or `update_file_p1`",
796 b'6.0',
803 b'6.0',
797 stacklevel=2,
804 stacklevel=2,
798 )
805 )
799 else:
806 else:
800 util.nouideprecwarn(
807 util.nouideprecwarn(
801 b"do not use `remove` outside of update/merge context."
808 b"do not use `remove` outside of update/merge context."
802 b" Use `set_untracked`",
809 b" Use `set_untracked`",
803 b'6.0',
810 b'6.0',
804 stacklevel=2,
811 stacklevel=2,
805 )
812 )
806 self._remove(f)
813 self._remove(f)
807
814
808 def _remove(self, filename):
815 def _remove(self, filename):
809 """internal function to mark a file removed"""
816 """internal function to mark a file removed"""
810 self._dirty = True
817 self._dirty = True
811 self._updatedfiles.add(filename)
818 self._updatedfiles.add(filename)
812 self._map.removefile(filename, in_merge=self.in_merge)
819 self._map.removefile(filename, in_merge=self.in_merge)
813
820
814 def merge(self, f):
821 def merge(self, f):
815 '''Mark a file merged.'''
822 '''Mark a file merged.'''
816 if not self.in_merge:
823 if not self.in_merge:
817 return self.normallookup(f)
824 return self.normallookup(f)
818 return self.otherparent(f)
825 return self.otherparent(f)
819
826
820 def drop(self, f):
827 def drop(self, f):
821 '''Drop a file from the dirstate'''
828 '''Drop a file from the dirstate'''
822 if not self.pendingparentchange():
829 if not self.pendingparentchange():
823 util.nouideprecwarn(
830 util.nouideprecwarn(
824 b"do not use `drop` outside of update/merge context."
831 b"do not use `drop` outside of update/merge context."
825 b" Use `set_untracked`",
832 b" Use `set_untracked`",
826 b'6.0',
833 b'6.0',
827 stacklevel=2,
834 stacklevel=2,
828 )
835 )
829 self._drop(f)
836 self._drop(f)
830
837
831 def _drop(self, filename):
838 def _drop(self, filename):
832 """internal function to drop a file from the dirstate"""
839 """internal function to drop a file from the dirstate"""
833 if self._map.dropfile(filename):
840 if self._map.dropfile(filename):
834 self._dirty = True
841 self._dirty = True
835 self._updatedfiles.add(filename)
842 self._updatedfiles.add(filename)
836 self._map.copymap.pop(filename, None)
843 self._map.copymap.pop(filename, None)
837
844
838 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
845 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
839 if exists is None:
846 if exists is None:
840 exists = os.path.lexists(os.path.join(self._root, path))
847 exists = os.path.lexists(os.path.join(self._root, path))
841 if not exists:
848 if not exists:
842 # Maybe a path component exists
849 # Maybe a path component exists
843 if not ignoremissing and b'/' in path:
850 if not ignoremissing and b'/' in path:
844 d, f = path.rsplit(b'/', 1)
851 d, f = path.rsplit(b'/', 1)
845 d = self._normalize(d, False, ignoremissing, None)
852 d = self._normalize(d, False, ignoremissing, None)
846 folded = d + b"/" + f
853 folded = d + b"/" + f
847 else:
854 else:
848 # No path components, preserve original case
855 # No path components, preserve original case
849 folded = path
856 folded = path
850 else:
857 else:
851 # recursively normalize leading directory components
858 # recursively normalize leading directory components
852 # against dirstate
859 # against dirstate
853 if b'/' in normed:
860 if b'/' in normed:
854 d, f = normed.rsplit(b'/', 1)
861 d, f = normed.rsplit(b'/', 1)
855 d = self._normalize(d, False, ignoremissing, True)
862 d = self._normalize(d, False, ignoremissing, True)
856 r = self._root + b"/" + d
863 r = self._root + b"/" + d
857 folded = d + b"/" + util.fspath(f, r)
864 folded = d + b"/" + util.fspath(f, r)
858 else:
865 else:
859 folded = util.fspath(normed, self._root)
866 folded = util.fspath(normed, self._root)
860 storemap[normed] = folded
867 storemap[normed] = folded
861
868
862 return folded
869 return folded
863
870
864 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
871 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
865 normed = util.normcase(path)
872 normed = util.normcase(path)
866 folded = self._map.filefoldmap.get(normed, None)
873 folded = self._map.filefoldmap.get(normed, None)
867 if folded is None:
874 if folded is None:
868 if isknown:
875 if isknown:
869 folded = path
876 folded = path
870 else:
877 else:
871 folded = self._discoverpath(
878 folded = self._discoverpath(
872 path, normed, ignoremissing, exists, self._map.filefoldmap
879 path, normed, ignoremissing, exists, self._map.filefoldmap
873 )
880 )
874 return folded
881 return folded
875
882
876 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
883 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
877 normed = util.normcase(path)
884 normed = util.normcase(path)
878 folded = self._map.filefoldmap.get(normed, None)
885 folded = self._map.filefoldmap.get(normed, None)
879 if folded is None:
886 if folded is None:
880 folded = self._map.dirfoldmap.get(normed, None)
887 folded = self._map.dirfoldmap.get(normed, None)
881 if folded is None:
888 if folded is None:
882 if isknown:
889 if isknown:
883 folded = path
890 folded = path
884 else:
891 else:
885 # store discovered result in dirfoldmap so that future
892 # store discovered result in dirfoldmap so that future
886 # normalizefile calls don't start matching directories
893 # normalizefile calls don't start matching directories
887 folded = self._discoverpath(
894 folded = self._discoverpath(
888 path, normed, ignoremissing, exists, self._map.dirfoldmap
895 path, normed, ignoremissing, exists, self._map.dirfoldmap
889 )
896 )
890 return folded
897 return folded
891
898
892 def normalize(self, path, isknown=False, ignoremissing=False):
899 def normalize(self, path, isknown=False, ignoremissing=False):
893 """
900 """
894 normalize the case of a pathname when on a casefolding filesystem
901 normalize the case of a pathname when on a casefolding filesystem
895
902
896 isknown specifies whether the filename came from walking the
903 isknown specifies whether the filename came from walking the
897 disk, to avoid extra filesystem access.
904 disk, to avoid extra filesystem access.
898
905
899 If ignoremissing is True, missing path are returned
906 If ignoremissing is True, missing path are returned
900 unchanged. Otherwise, we try harder to normalize possibly
907 unchanged. Otherwise, we try harder to normalize possibly
901 existing path components.
908 existing path components.
902
909
903 The normalized case is determined based on the following precedence:
910 The normalized case is determined based on the following precedence:
904
911
905 - version of name already stored in the dirstate
912 - version of name already stored in the dirstate
906 - version of name stored on disk
913 - version of name stored on disk
907 - version provided via command arguments
914 - version provided via command arguments
908 """
915 """
909
916
910 if self._checkcase:
917 if self._checkcase:
911 return self._normalize(path, isknown, ignoremissing)
918 return self._normalize(path, isknown, ignoremissing)
912 return path
919 return path
913
920
914 def clear(self):
921 def clear(self):
915 self._map.clear()
922 self._map.clear()
916 self._lastnormaltime = 0
923 self._lastnormaltime = 0
917 self._updatedfiles.clear()
924 self._updatedfiles.clear()
918 self._dirty = True
925 self._dirty = True
919
926
920 def rebuild(self, parent, allfiles, changedfiles=None):
927 def rebuild(self, parent, allfiles, changedfiles=None):
921 if changedfiles is None:
928 if changedfiles is None:
922 # Rebuild entire dirstate
929 # Rebuild entire dirstate
923 to_lookup = allfiles
930 to_lookup = allfiles
924 to_drop = []
931 to_drop = []
925 lastnormaltime = self._lastnormaltime
932 lastnormaltime = self._lastnormaltime
926 self.clear()
933 self.clear()
927 self._lastnormaltime = lastnormaltime
934 self._lastnormaltime = lastnormaltime
928 elif len(changedfiles) < 10:
935 elif len(changedfiles) < 10:
929 # Avoid turning allfiles into a set, which can be expensive if it's
936 # Avoid turning allfiles into a set, which can be expensive if it's
930 # large.
937 # large.
931 to_lookup = []
938 to_lookup = []
932 to_drop = []
939 to_drop = []
933 for f in changedfiles:
940 for f in changedfiles:
934 if f in allfiles:
941 if f in allfiles:
935 to_lookup.append(f)
942 to_lookup.append(f)
936 else:
943 else:
937 to_drop.append(f)
944 to_drop.append(f)
938 else:
945 else:
939 changedfilesset = set(changedfiles)
946 changedfilesset = set(changedfiles)
940 to_lookup = changedfilesset & set(allfiles)
947 to_lookup = changedfilesset & set(allfiles)
941 to_drop = changedfilesset - to_lookup
948 to_drop = changedfilesset - to_lookup
942
949
943 if self._origpl is None:
950 if self._origpl is None:
944 self._origpl = self._pl
951 self._origpl = self._pl
945 self._map.setparents(parent, self._nodeconstants.nullid)
952 self._map.setparents(parent, self._nodeconstants.nullid)
946
953
947 for f in to_lookup:
954 for f in to_lookup:
948 self.normallookup(f)
955 self.normallookup(f)
949 for f in to_drop:
956 for f in to_drop:
950 self._drop(f)
957 self._drop(f)
951
958
952 self._dirty = True
959 self._dirty = True
953
960
954 def identity(self):
961 def identity(self):
955 """Return identity of dirstate itself to detect changing in storage
962 """Return identity of dirstate itself to detect changing in storage
956
963
957 If identity of previous dirstate is equal to this, writing
964 If identity of previous dirstate is equal to this, writing
958 changes based on the former dirstate out can keep consistency.
965 changes based on the former dirstate out can keep consistency.
959 """
966 """
960 return self._map.identity
967 return self._map.identity
961
968
962 def write(self, tr):
969 def write(self, tr):
963 if not self._dirty:
970 if not self._dirty:
964 return
971 return
965
972
966 filename = self._filename
973 filename = self._filename
967 if tr:
974 if tr:
968 # 'dirstate.write()' is not only for writing in-memory
975 # 'dirstate.write()' is not only for writing in-memory
969 # changes out, but also for dropping ambiguous timestamp.
976 # changes out, but also for dropping ambiguous timestamp.
970 # delayed writing re-raise "ambiguous timestamp issue".
977 # delayed writing re-raise "ambiguous timestamp issue".
971 # See also the wiki page below for detail:
978 # See also the wiki page below for detail:
972 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
979 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
973
980
974 # emulate dropping timestamp in 'parsers.pack_dirstate'
981 # emulate dropping timestamp in 'parsers.pack_dirstate'
975 now = _getfsnow(self._opener)
982 now = _getfsnow(self._opener)
976 self._map.clearambiguoustimes(self._updatedfiles, now)
983 self._map.clearambiguoustimes(self._updatedfiles, now)
977
984
978 # emulate that all 'dirstate.normal' results are written out
985 # emulate that all 'dirstate.normal' results are written out
979 self._lastnormaltime = 0
986 self._lastnormaltime = 0
980 self._updatedfiles.clear()
987 self._updatedfiles.clear()
981
988
982 # delay writing in-memory changes out
989 # delay writing in-memory changes out
983 tr.addfilegenerator(
990 tr.addfilegenerator(
984 b'dirstate',
991 b'dirstate',
985 (self._filename,),
992 (self._filename,),
986 lambda f: self._writedirstate(tr, f),
993 lambda f: self._writedirstate(tr, f),
987 location=b'plain',
994 location=b'plain',
988 )
995 )
989 return
996 return
990
997
991 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
998 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
992 self._writedirstate(tr, st)
999 self._writedirstate(tr, st)
993
1000
994 def addparentchangecallback(self, category, callback):
1001 def addparentchangecallback(self, category, callback):
995 """add a callback to be called when the wd parents are changed
1002 """add a callback to be called when the wd parents are changed
996
1003
997 Callback will be called with the following arguments:
1004 Callback will be called with the following arguments:
998 dirstate, (oldp1, oldp2), (newp1, newp2)
1005 dirstate, (oldp1, oldp2), (newp1, newp2)
999
1006
1000 Category is a unique identifier to allow overwriting an old callback
1007 Category is a unique identifier to allow overwriting an old callback
1001 with a newer callback.
1008 with a newer callback.
1002 """
1009 """
1003 self._plchangecallbacks[category] = callback
1010 self._plchangecallbacks[category] = callback
1004
1011
1005 def _writedirstate(self, tr, st):
1012 def _writedirstate(self, tr, st):
1006 # notify callbacks about parents change
1013 # notify callbacks about parents change
1007 if self._origpl is not None and self._origpl != self._pl:
1014 if self._origpl is not None and self._origpl != self._pl:
1008 for c, callback in sorted(
1015 for c, callback in sorted(
1009 pycompat.iteritems(self._plchangecallbacks)
1016 pycompat.iteritems(self._plchangecallbacks)
1010 ):
1017 ):
1011 callback(self, self._origpl, self._pl)
1018 callback(self, self._origpl, self._pl)
1012 self._origpl = None
1019 self._origpl = None
1013 # use the modification time of the newly created temporary file as the
1020 # use the modification time of the newly created temporary file as the
1014 # filesystem's notion of 'now'
1021 # filesystem's notion of 'now'
1015 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
1022 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
1016
1023
1017 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
1024 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
1018 # timestamp of each entries in dirstate, because of 'now > mtime'
1025 # timestamp of each entries in dirstate, because of 'now > mtime'
1019 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
1026 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
1020 if delaywrite > 0:
1027 if delaywrite > 0:
1021 # do we have any files to delay for?
1028 # do we have any files to delay for?
1022 for f, e in pycompat.iteritems(self._map):
1029 for f, e in pycompat.iteritems(self._map):
1023 if e.need_delay(now):
1030 if e.need_delay(now):
1024 import time # to avoid useless import
1031 import time # to avoid useless import
1025
1032
1026 # rather than sleep n seconds, sleep until the next
1033 # rather than sleep n seconds, sleep until the next
1027 # multiple of n seconds
1034 # multiple of n seconds
1028 clock = time.time()
1035 clock = time.time()
1029 start = int(clock) - (int(clock) % delaywrite)
1036 start = int(clock) - (int(clock) % delaywrite)
1030 end = start + delaywrite
1037 end = start + delaywrite
1031 time.sleep(end - clock)
1038 time.sleep(end - clock)
1032 now = end # trust our estimate that the end is near now
1039 now = end # trust our estimate that the end is near now
1033 break
1040 break
1034
1041
1035 self._map.write(tr, st, now)
1042 self._map.write(tr, st, now)
1036 self._lastnormaltime = 0
1043 self._lastnormaltime = 0
1037 self._dirty = False
1044 self._dirty = False
1038
1045
1039 def _dirignore(self, f):
1046 def _dirignore(self, f):
1040 if self._ignore(f):
1047 if self._ignore(f):
1041 return True
1048 return True
1042 for p in pathutil.finddirs(f):
1049 for p in pathutil.finddirs(f):
1043 if self._ignore(p):
1050 if self._ignore(p):
1044 return True
1051 return True
1045 return False
1052 return False
1046
1053
1047 def _ignorefiles(self):
1054 def _ignorefiles(self):
1048 files = []
1055 files = []
1049 if os.path.exists(self._join(b'.hgignore')):
1056 if os.path.exists(self._join(b'.hgignore')):
1050 files.append(self._join(b'.hgignore'))
1057 files.append(self._join(b'.hgignore'))
1051 for name, path in self._ui.configitems(b"ui"):
1058 for name, path in self._ui.configitems(b"ui"):
1052 if name == b'ignore' or name.startswith(b'ignore.'):
1059 if name == b'ignore' or name.startswith(b'ignore.'):
1053 # we need to use os.path.join here rather than self._join
1060 # we need to use os.path.join here rather than self._join
1054 # because path is arbitrary and user-specified
1061 # because path is arbitrary and user-specified
1055 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1062 files.append(os.path.join(self._rootdir, util.expandpath(path)))
1056 return files
1063 return files
1057
1064
1058 def _ignorefileandline(self, f):
1065 def _ignorefileandline(self, f):
1059 files = collections.deque(self._ignorefiles())
1066 files = collections.deque(self._ignorefiles())
1060 visited = set()
1067 visited = set()
1061 while files:
1068 while files:
1062 i = files.popleft()
1069 i = files.popleft()
1063 patterns = matchmod.readpatternfile(
1070 patterns = matchmod.readpatternfile(
1064 i, self._ui.warn, sourceinfo=True
1071 i, self._ui.warn, sourceinfo=True
1065 )
1072 )
1066 for pattern, lineno, line in patterns:
1073 for pattern, lineno, line in patterns:
1067 kind, p = matchmod._patsplit(pattern, b'glob')
1074 kind, p = matchmod._patsplit(pattern, b'glob')
1068 if kind == b"subinclude":
1075 if kind == b"subinclude":
1069 if p not in visited:
1076 if p not in visited:
1070 files.append(p)
1077 files.append(p)
1071 continue
1078 continue
1072 m = matchmod.match(
1079 m = matchmod.match(
1073 self._root, b'', [], [pattern], warn=self._ui.warn
1080 self._root, b'', [], [pattern], warn=self._ui.warn
1074 )
1081 )
1075 if m(f):
1082 if m(f):
1076 return (i, lineno, line)
1083 return (i, lineno, line)
1077 visited.add(i)
1084 visited.add(i)
1078 return (None, -1, b"")
1085 return (None, -1, b"")
1079
1086
1080 def _walkexplicit(self, match, subrepos):
1087 def _walkexplicit(self, match, subrepos):
1081 """Get stat data about the files explicitly specified by match.
1088 """Get stat data about the files explicitly specified by match.
1082
1089
1083 Return a triple (results, dirsfound, dirsnotfound).
1090 Return a triple (results, dirsfound, dirsnotfound).
1084 - results is a mapping from filename to stat result. It also contains
1091 - results is a mapping from filename to stat result. It also contains
1085 listings mapping subrepos and .hg to None.
1092 listings mapping subrepos and .hg to None.
1086 - dirsfound is a list of files found to be directories.
1093 - dirsfound is a list of files found to be directories.
1087 - dirsnotfound is a list of files that the dirstate thinks are
1094 - dirsnotfound is a list of files that the dirstate thinks are
1088 directories and that were not found."""
1095 directories and that were not found."""
1089
1096
1090 def badtype(mode):
1097 def badtype(mode):
1091 kind = _(b'unknown')
1098 kind = _(b'unknown')
1092 if stat.S_ISCHR(mode):
1099 if stat.S_ISCHR(mode):
1093 kind = _(b'character device')
1100 kind = _(b'character device')
1094 elif stat.S_ISBLK(mode):
1101 elif stat.S_ISBLK(mode):
1095 kind = _(b'block device')
1102 kind = _(b'block device')
1096 elif stat.S_ISFIFO(mode):
1103 elif stat.S_ISFIFO(mode):
1097 kind = _(b'fifo')
1104 kind = _(b'fifo')
1098 elif stat.S_ISSOCK(mode):
1105 elif stat.S_ISSOCK(mode):
1099 kind = _(b'socket')
1106 kind = _(b'socket')
1100 elif stat.S_ISDIR(mode):
1107 elif stat.S_ISDIR(mode):
1101 kind = _(b'directory')
1108 kind = _(b'directory')
1102 return _(b'unsupported file type (type is %s)') % kind
1109 return _(b'unsupported file type (type is %s)') % kind
1103
1110
1104 badfn = match.bad
1111 badfn = match.bad
1105 dmap = self._map
1112 dmap = self._map
1106 lstat = os.lstat
1113 lstat = os.lstat
1107 getkind = stat.S_IFMT
1114 getkind = stat.S_IFMT
1108 dirkind = stat.S_IFDIR
1115 dirkind = stat.S_IFDIR
1109 regkind = stat.S_IFREG
1116 regkind = stat.S_IFREG
1110 lnkkind = stat.S_IFLNK
1117 lnkkind = stat.S_IFLNK
1111 join = self._join
1118 join = self._join
1112 dirsfound = []
1119 dirsfound = []
1113 foundadd = dirsfound.append
1120 foundadd = dirsfound.append
1114 dirsnotfound = []
1121 dirsnotfound = []
1115 notfoundadd = dirsnotfound.append
1122 notfoundadd = dirsnotfound.append
1116
1123
1117 if not match.isexact() and self._checkcase:
1124 if not match.isexact() and self._checkcase:
1118 normalize = self._normalize
1125 normalize = self._normalize
1119 else:
1126 else:
1120 normalize = None
1127 normalize = None
1121
1128
1122 files = sorted(match.files())
1129 files = sorted(match.files())
1123 subrepos.sort()
1130 subrepos.sort()
1124 i, j = 0, 0
1131 i, j = 0, 0
1125 while i < len(files) and j < len(subrepos):
1132 while i < len(files) and j < len(subrepos):
1126 subpath = subrepos[j] + b"/"
1133 subpath = subrepos[j] + b"/"
1127 if files[i] < subpath:
1134 if files[i] < subpath:
1128 i += 1
1135 i += 1
1129 continue
1136 continue
1130 while i < len(files) and files[i].startswith(subpath):
1137 while i < len(files) and files[i].startswith(subpath):
1131 del files[i]
1138 del files[i]
1132 j += 1
1139 j += 1
1133
1140
1134 if not files or b'' in files:
1141 if not files or b'' in files:
1135 files = [b'']
1142 files = [b'']
1136 # constructing the foldmap is expensive, so don't do it for the
1143 # constructing the foldmap is expensive, so don't do it for the
1137 # common case where files is ['']
1144 # common case where files is ['']
1138 normalize = None
1145 normalize = None
1139 results = dict.fromkeys(subrepos)
1146 results = dict.fromkeys(subrepos)
1140 results[b'.hg'] = None
1147 results[b'.hg'] = None
1141
1148
1142 for ff in files:
1149 for ff in files:
1143 if normalize:
1150 if normalize:
1144 nf = normalize(ff, False, True)
1151 nf = normalize(ff, False, True)
1145 else:
1152 else:
1146 nf = ff
1153 nf = ff
1147 if nf in results:
1154 if nf in results:
1148 continue
1155 continue
1149
1156
1150 try:
1157 try:
1151 st = lstat(join(nf))
1158 st = lstat(join(nf))
1152 kind = getkind(st.st_mode)
1159 kind = getkind(st.st_mode)
1153 if kind == dirkind:
1160 if kind == dirkind:
1154 if nf in dmap:
1161 if nf in dmap:
1155 # file replaced by dir on disk but still in dirstate
1162 # file replaced by dir on disk but still in dirstate
1156 results[nf] = None
1163 results[nf] = None
1157 foundadd((nf, ff))
1164 foundadd((nf, ff))
1158 elif kind == regkind or kind == lnkkind:
1165 elif kind == regkind or kind == lnkkind:
1159 results[nf] = st
1166 results[nf] = st
1160 else:
1167 else:
1161 badfn(ff, badtype(kind))
1168 badfn(ff, badtype(kind))
1162 if nf in dmap:
1169 if nf in dmap:
1163 results[nf] = None
1170 results[nf] = None
1164 except OSError as inst: # nf not found on disk - it is dirstate only
1171 except OSError as inst: # nf not found on disk - it is dirstate only
1165 if nf in dmap: # does it exactly match a missing file?
1172 if nf in dmap: # does it exactly match a missing file?
1166 results[nf] = None
1173 results[nf] = None
1167 else: # does it match a missing directory?
1174 else: # does it match a missing directory?
1168 if self._map.hasdir(nf):
1175 if self._map.hasdir(nf):
1169 notfoundadd(nf)
1176 notfoundadd(nf)
1170 else:
1177 else:
1171 badfn(ff, encoding.strtolocal(inst.strerror))
1178 badfn(ff, encoding.strtolocal(inst.strerror))
1172
1179
1173 # match.files() may contain explicitly-specified paths that shouldn't
1180 # match.files() may contain explicitly-specified paths that shouldn't
1174 # be taken; drop them from the list of files found. dirsfound/notfound
1181 # be taken; drop them from the list of files found. dirsfound/notfound
1175 # aren't filtered here because they will be tested later.
1182 # aren't filtered here because they will be tested later.
1176 if match.anypats():
1183 if match.anypats():
1177 for f in list(results):
1184 for f in list(results):
1178 if f == b'.hg' or f in subrepos:
1185 if f == b'.hg' or f in subrepos:
1179 # keep sentinel to disable further out-of-repo walks
1186 # keep sentinel to disable further out-of-repo walks
1180 continue
1187 continue
1181 if not match(f):
1188 if not match(f):
1182 del results[f]
1189 del results[f]
1183
1190
1184 # Case insensitive filesystems cannot rely on lstat() failing to detect
1191 # Case insensitive filesystems cannot rely on lstat() failing to detect
1185 # a case-only rename. Prune the stat object for any file that does not
1192 # a case-only rename. Prune the stat object for any file that does not
1186 # match the case in the filesystem, if there are multiple files that
1193 # match the case in the filesystem, if there are multiple files that
1187 # normalize to the same path.
1194 # normalize to the same path.
1188 if match.isexact() and self._checkcase:
1195 if match.isexact() and self._checkcase:
1189 normed = {}
1196 normed = {}
1190
1197
1191 for f, st in pycompat.iteritems(results):
1198 for f, st in pycompat.iteritems(results):
1192 if st is None:
1199 if st is None:
1193 continue
1200 continue
1194
1201
1195 nc = util.normcase(f)
1202 nc = util.normcase(f)
1196 paths = normed.get(nc)
1203 paths = normed.get(nc)
1197
1204
1198 if paths is None:
1205 if paths is None:
1199 paths = set()
1206 paths = set()
1200 normed[nc] = paths
1207 normed[nc] = paths
1201
1208
1202 paths.add(f)
1209 paths.add(f)
1203
1210
1204 for norm, paths in pycompat.iteritems(normed):
1211 for norm, paths in pycompat.iteritems(normed):
1205 if len(paths) > 1:
1212 if len(paths) > 1:
1206 for path in paths:
1213 for path in paths:
1207 folded = self._discoverpath(
1214 folded = self._discoverpath(
1208 path, norm, True, None, self._map.dirfoldmap
1215 path, norm, True, None, self._map.dirfoldmap
1209 )
1216 )
1210 if path != folded:
1217 if path != folded:
1211 results[path] = None
1218 results[path] = None
1212
1219
1213 return results, dirsfound, dirsnotfound
1220 return results, dirsfound, dirsnotfound
1214
1221
1215 def walk(self, match, subrepos, unknown, ignored, full=True):
1222 def walk(self, match, subrepos, unknown, ignored, full=True):
1216 """
1223 """
1217 Walk recursively through the directory tree, finding all files
1224 Walk recursively through the directory tree, finding all files
1218 matched by match.
1225 matched by match.
1219
1226
1220 If full is False, maybe skip some known-clean files.
1227 If full is False, maybe skip some known-clean files.
1221
1228
1222 Return a dict mapping filename to stat-like object (either
1229 Return a dict mapping filename to stat-like object (either
1223 mercurial.osutil.stat instance or return value of os.stat()).
1230 mercurial.osutil.stat instance or return value of os.stat()).
1224
1231
1225 """
1232 """
1226 # full is a flag that extensions that hook into walk can use -- this
1233 # full is a flag that extensions that hook into walk can use -- this
1227 # implementation doesn't use it at all. This satisfies the contract
1234 # implementation doesn't use it at all. This satisfies the contract
1228 # because we only guarantee a "maybe".
1235 # because we only guarantee a "maybe".
1229
1236
1230 if ignored:
1237 if ignored:
1231 ignore = util.never
1238 ignore = util.never
1232 dirignore = util.never
1239 dirignore = util.never
1233 elif unknown:
1240 elif unknown:
1234 ignore = self._ignore
1241 ignore = self._ignore
1235 dirignore = self._dirignore
1242 dirignore = self._dirignore
1236 else:
1243 else:
1237 # if not unknown and not ignored, drop dir recursion and step 2
1244 # if not unknown and not ignored, drop dir recursion and step 2
1238 ignore = util.always
1245 ignore = util.always
1239 dirignore = util.always
1246 dirignore = util.always
1240
1247
1241 matchfn = match.matchfn
1248 matchfn = match.matchfn
1242 matchalways = match.always()
1249 matchalways = match.always()
1243 matchtdir = match.traversedir
1250 matchtdir = match.traversedir
1244 dmap = self._map
1251 dmap = self._map
1245 listdir = util.listdir
1252 listdir = util.listdir
1246 lstat = os.lstat
1253 lstat = os.lstat
1247 dirkind = stat.S_IFDIR
1254 dirkind = stat.S_IFDIR
1248 regkind = stat.S_IFREG
1255 regkind = stat.S_IFREG
1249 lnkkind = stat.S_IFLNK
1256 lnkkind = stat.S_IFLNK
1250 join = self._join
1257 join = self._join
1251
1258
1252 exact = skipstep3 = False
1259 exact = skipstep3 = False
1253 if match.isexact(): # match.exact
1260 if match.isexact(): # match.exact
1254 exact = True
1261 exact = True
1255 dirignore = util.always # skip step 2
1262 dirignore = util.always # skip step 2
1256 elif match.prefix(): # match.match, no patterns
1263 elif match.prefix(): # match.match, no patterns
1257 skipstep3 = True
1264 skipstep3 = True
1258
1265
1259 if not exact and self._checkcase:
1266 if not exact and self._checkcase:
1260 normalize = self._normalize
1267 normalize = self._normalize
1261 normalizefile = self._normalizefile
1268 normalizefile = self._normalizefile
1262 skipstep3 = False
1269 skipstep3 = False
1263 else:
1270 else:
1264 normalize = self._normalize
1271 normalize = self._normalize
1265 normalizefile = None
1272 normalizefile = None
1266
1273
1267 # step 1: find all explicit files
1274 # step 1: find all explicit files
1268 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1275 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1269 if matchtdir:
1276 if matchtdir:
1270 for d in work:
1277 for d in work:
1271 matchtdir(d[0])
1278 matchtdir(d[0])
1272 for d in dirsnotfound:
1279 for d in dirsnotfound:
1273 matchtdir(d)
1280 matchtdir(d)
1274
1281
1275 skipstep3 = skipstep3 and not (work or dirsnotfound)
1282 skipstep3 = skipstep3 and not (work or dirsnotfound)
1276 work = [d for d in work if not dirignore(d[0])]
1283 work = [d for d in work if not dirignore(d[0])]
1277
1284
1278 # step 2: visit subdirectories
1285 # step 2: visit subdirectories
1279 def traverse(work, alreadynormed):
1286 def traverse(work, alreadynormed):
1280 wadd = work.append
1287 wadd = work.append
1281 while work:
1288 while work:
1282 tracing.counter('dirstate.walk work', len(work))
1289 tracing.counter('dirstate.walk work', len(work))
1283 nd = work.pop()
1290 nd = work.pop()
1284 visitentries = match.visitchildrenset(nd)
1291 visitentries = match.visitchildrenset(nd)
1285 if not visitentries:
1292 if not visitentries:
1286 continue
1293 continue
1287 if visitentries == b'this' or visitentries == b'all':
1294 if visitentries == b'this' or visitentries == b'all':
1288 visitentries = None
1295 visitentries = None
1289 skip = None
1296 skip = None
1290 if nd != b'':
1297 if nd != b'':
1291 skip = b'.hg'
1298 skip = b'.hg'
1292 try:
1299 try:
1293 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1300 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1294 entries = listdir(join(nd), stat=True, skip=skip)
1301 entries = listdir(join(nd), stat=True, skip=skip)
1295 except OSError as inst:
1302 except OSError as inst:
1296 if inst.errno in (errno.EACCES, errno.ENOENT):
1303 if inst.errno in (errno.EACCES, errno.ENOENT):
1297 match.bad(
1304 match.bad(
1298 self.pathto(nd), encoding.strtolocal(inst.strerror)
1305 self.pathto(nd), encoding.strtolocal(inst.strerror)
1299 )
1306 )
1300 continue
1307 continue
1301 raise
1308 raise
1302 for f, kind, st in entries:
1309 for f, kind, st in entries:
1303 # Some matchers may return files in the visitentries set,
1310 # Some matchers may return files in the visitentries set,
1304 # instead of 'this', if the matcher explicitly mentions them
1311 # instead of 'this', if the matcher explicitly mentions them
1305 # and is not an exactmatcher. This is acceptable; we do not
1312 # and is not an exactmatcher. This is acceptable; we do not
1306 # make any hard assumptions about file-or-directory below
1313 # make any hard assumptions about file-or-directory below
1307 # based on the presence of `f` in visitentries. If
1314 # based on the presence of `f` in visitentries. If
1308 # visitchildrenset returned a set, we can always skip the
1315 # visitchildrenset returned a set, we can always skip the
1309 # entries *not* in the set it provided regardless of whether
1316 # entries *not* in the set it provided regardless of whether
1310 # they're actually a file or a directory.
1317 # they're actually a file or a directory.
1311 if visitentries and f not in visitentries:
1318 if visitentries and f not in visitentries:
1312 continue
1319 continue
1313 if normalizefile:
1320 if normalizefile:
1314 # even though f might be a directory, we're only
1321 # even though f might be a directory, we're only
1315 # interested in comparing it to files currently in the
1322 # interested in comparing it to files currently in the
1316 # dmap -- therefore normalizefile is enough
1323 # dmap -- therefore normalizefile is enough
1317 nf = normalizefile(
1324 nf = normalizefile(
1318 nd and (nd + b"/" + f) or f, True, True
1325 nd and (nd + b"/" + f) or f, True, True
1319 )
1326 )
1320 else:
1327 else:
1321 nf = nd and (nd + b"/" + f) or f
1328 nf = nd and (nd + b"/" + f) or f
1322 if nf not in results:
1329 if nf not in results:
1323 if kind == dirkind:
1330 if kind == dirkind:
1324 if not ignore(nf):
1331 if not ignore(nf):
1325 if matchtdir:
1332 if matchtdir:
1326 matchtdir(nf)
1333 matchtdir(nf)
1327 wadd(nf)
1334 wadd(nf)
1328 if nf in dmap and (matchalways or matchfn(nf)):
1335 if nf in dmap and (matchalways or matchfn(nf)):
1329 results[nf] = None
1336 results[nf] = None
1330 elif kind == regkind or kind == lnkkind:
1337 elif kind == regkind or kind == lnkkind:
1331 if nf in dmap:
1338 if nf in dmap:
1332 if matchalways or matchfn(nf):
1339 if matchalways or matchfn(nf):
1333 results[nf] = st
1340 results[nf] = st
1334 elif (matchalways or matchfn(nf)) and not ignore(
1341 elif (matchalways or matchfn(nf)) and not ignore(
1335 nf
1342 nf
1336 ):
1343 ):
1337 # unknown file -- normalize if necessary
1344 # unknown file -- normalize if necessary
1338 if not alreadynormed:
1345 if not alreadynormed:
1339 nf = normalize(nf, False, True)
1346 nf = normalize(nf, False, True)
1340 results[nf] = st
1347 results[nf] = st
1341 elif nf in dmap and (matchalways or matchfn(nf)):
1348 elif nf in dmap and (matchalways or matchfn(nf)):
1342 results[nf] = None
1349 results[nf] = None
1343
1350
1344 for nd, d in work:
1351 for nd, d in work:
1345 # alreadynormed means that processwork doesn't have to do any
1352 # alreadynormed means that processwork doesn't have to do any
1346 # expensive directory normalization
1353 # expensive directory normalization
1347 alreadynormed = not normalize or nd == d
1354 alreadynormed = not normalize or nd == d
1348 traverse([d], alreadynormed)
1355 traverse([d], alreadynormed)
1349
1356
1350 for s in subrepos:
1357 for s in subrepos:
1351 del results[s]
1358 del results[s]
1352 del results[b'.hg']
1359 del results[b'.hg']
1353
1360
1354 # step 3: visit remaining files from dmap
1361 # step 3: visit remaining files from dmap
1355 if not skipstep3 and not exact:
1362 if not skipstep3 and not exact:
1356 # If a dmap file is not in results yet, it was either
1363 # If a dmap file is not in results yet, it was either
1357 # a) not matching matchfn b) ignored, c) missing, or d) under a
1364 # a) not matching matchfn b) ignored, c) missing, or d) under a
1358 # symlink directory.
1365 # symlink directory.
1359 if not results and matchalways:
1366 if not results and matchalways:
1360 visit = [f for f in dmap]
1367 visit = [f for f in dmap]
1361 else:
1368 else:
1362 visit = [f for f in dmap if f not in results and matchfn(f)]
1369 visit = [f for f in dmap if f not in results and matchfn(f)]
1363 visit.sort()
1370 visit.sort()
1364
1371
1365 if unknown:
1372 if unknown:
1366 # unknown == True means we walked all dirs under the roots
1373 # unknown == True means we walked all dirs under the roots
1367 # that wasn't ignored, and everything that matched was stat'ed
1374 # that wasn't ignored, and everything that matched was stat'ed
1368 # and is already in results.
1375 # and is already in results.
1369 # The rest must thus be ignored or under a symlink.
1376 # The rest must thus be ignored or under a symlink.
1370 audit_path = pathutil.pathauditor(self._root, cached=True)
1377 audit_path = pathutil.pathauditor(self._root, cached=True)
1371
1378
1372 for nf in iter(visit):
1379 for nf in iter(visit):
1373 # If a stat for the same file was already added with a
1380 # If a stat for the same file was already added with a
1374 # different case, don't add one for this, since that would
1381 # different case, don't add one for this, since that would
1375 # make it appear as if the file exists under both names
1382 # make it appear as if the file exists under both names
1376 # on disk.
1383 # on disk.
1377 if (
1384 if (
1378 normalizefile
1385 normalizefile
1379 and normalizefile(nf, True, True) in results
1386 and normalizefile(nf, True, True) in results
1380 ):
1387 ):
1381 results[nf] = None
1388 results[nf] = None
1382 # Report ignored items in the dmap as long as they are not
1389 # Report ignored items in the dmap as long as they are not
1383 # under a symlink directory.
1390 # under a symlink directory.
1384 elif audit_path.check(nf):
1391 elif audit_path.check(nf):
1385 try:
1392 try:
1386 results[nf] = lstat(join(nf))
1393 results[nf] = lstat(join(nf))
1387 # file was just ignored, no links, and exists
1394 # file was just ignored, no links, and exists
1388 except OSError:
1395 except OSError:
1389 # file doesn't exist
1396 # file doesn't exist
1390 results[nf] = None
1397 results[nf] = None
1391 else:
1398 else:
1392 # It's either missing or under a symlink directory
1399 # It's either missing or under a symlink directory
1393 # which we in this case report as missing
1400 # which we in this case report as missing
1394 results[nf] = None
1401 results[nf] = None
1395 else:
1402 else:
1396 # We may not have walked the full directory tree above,
1403 # We may not have walked the full directory tree above,
1397 # so stat and check everything we missed.
1404 # so stat and check everything we missed.
1398 iv = iter(visit)
1405 iv = iter(visit)
1399 for st in util.statfiles([join(i) for i in visit]):
1406 for st in util.statfiles([join(i) for i in visit]):
1400 results[next(iv)] = st
1407 results[next(iv)] = st
1401 return results
1408 return results
1402
1409
1403 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1410 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1404 # Force Rayon (Rust parallelism library) to respect the number of
1411 # Force Rayon (Rust parallelism library) to respect the number of
1405 # workers. This is a temporary workaround until Rust code knows
1412 # workers. This is a temporary workaround until Rust code knows
1406 # how to read the config file.
1413 # how to read the config file.
1407 numcpus = self._ui.configint(b"worker", b"numcpus")
1414 numcpus = self._ui.configint(b"worker", b"numcpus")
1408 if numcpus is not None:
1415 if numcpus is not None:
1409 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1416 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1410
1417
1411 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1418 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1412 if not workers_enabled:
1419 if not workers_enabled:
1413 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1420 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1414
1421
1415 (
1422 (
1416 lookup,
1423 lookup,
1417 modified,
1424 modified,
1418 added,
1425 added,
1419 removed,
1426 removed,
1420 deleted,
1427 deleted,
1421 clean,
1428 clean,
1422 ignored,
1429 ignored,
1423 unknown,
1430 unknown,
1424 warnings,
1431 warnings,
1425 bad,
1432 bad,
1426 traversed,
1433 traversed,
1427 dirty,
1434 dirty,
1428 ) = rustmod.status(
1435 ) = rustmod.status(
1429 self._map._rustmap,
1436 self._map._rustmap,
1430 matcher,
1437 matcher,
1431 self._rootdir,
1438 self._rootdir,
1432 self._ignorefiles(),
1439 self._ignorefiles(),
1433 self._checkexec,
1440 self._checkexec,
1434 self._lastnormaltime,
1441 self._lastnormaltime,
1435 bool(list_clean),
1442 bool(list_clean),
1436 bool(list_ignored),
1443 bool(list_ignored),
1437 bool(list_unknown),
1444 bool(list_unknown),
1438 bool(matcher.traversedir),
1445 bool(matcher.traversedir),
1439 )
1446 )
1440
1447
1441 self._dirty |= dirty
1448 self._dirty |= dirty
1442
1449
1443 if matcher.traversedir:
1450 if matcher.traversedir:
1444 for dir in traversed:
1451 for dir in traversed:
1445 matcher.traversedir(dir)
1452 matcher.traversedir(dir)
1446
1453
1447 if self._ui.warn:
1454 if self._ui.warn:
1448 for item in warnings:
1455 for item in warnings:
1449 if isinstance(item, tuple):
1456 if isinstance(item, tuple):
1450 file_path, syntax = item
1457 file_path, syntax = item
1451 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1458 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1452 file_path,
1459 file_path,
1453 syntax,
1460 syntax,
1454 )
1461 )
1455 self._ui.warn(msg)
1462 self._ui.warn(msg)
1456 else:
1463 else:
1457 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1464 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1458 self._ui.warn(
1465 self._ui.warn(
1459 msg
1466 msg
1460 % (
1467 % (
1461 pathutil.canonpath(
1468 pathutil.canonpath(
1462 self._rootdir, self._rootdir, item
1469 self._rootdir, self._rootdir, item
1463 ),
1470 ),
1464 b"No such file or directory",
1471 b"No such file or directory",
1465 )
1472 )
1466 )
1473 )
1467
1474
1468 for (fn, message) in bad:
1475 for (fn, message) in bad:
1469 matcher.bad(fn, encoding.strtolocal(message))
1476 matcher.bad(fn, encoding.strtolocal(message))
1470
1477
1471 status = scmutil.status(
1478 status = scmutil.status(
1472 modified=modified,
1479 modified=modified,
1473 added=added,
1480 added=added,
1474 removed=removed,
1481 removed=removed,
1475 deleted=deleted,
1482 deleted=deleted,
1476 unknown=unknown,
1483 unknown=unknown,
1477 ignored=ignored,
1484 ignored=ignored,
1478 clean=clean,
1485 clean=clean,
1479 )
1486 )
1480 return (lookup, status)
1487 return (lookup, status)
1481
1488
1482 def status(self, match, subrepos, ignored, clean, unknown):
1489 def status(self, match, subrepos, ignored, clean, unknown):
1483 """Determine the status of the working copy relative to the
1490 """Determine the status of the working copy relative to the
1484 dirstate and return a pair of (unsure, status), where status is of type
1491 dirstate and return a pair of (unsure, status), where status is of type
1485 scmutil.status and:
1492 scmutil.status and:
1486
1493
1487 unsure:
1494 unsure:
1488 files that might have been modified since the dirstate was
1495 files that might have been modified since the dirstate was
1489 written, but need to be read to be sure (size is the same
1496 written, but need to be read to be sure (size is the same
1490 but mtime differs)
1497 but mtime differs)
1491 status.modified:
1498 status.modified:
1492 files that have definitely been modified since the dirstate
1499 files that have definitely been modified since the dirstate
1493 was written (different size or mode)
1500 was written (different size or mode)
1494 status.clean:
1501 status.clean:
1495 files that have definitely not been modified since the
1502 files that have definitely not been modified since the
1496 dirstate was written
1503 dirstate was written
1497 """
1504 """
1498 listignored, listclean, listunknown = ignored, clean, unknown
1505 listignored, listclean, listunknown = ignored, clean, unknown
1499 lookup, modified, added, unknown, ignored = [], [], [], [], []
1506 lookup, modified, added, unknown, ignored = [], [], [], [], []
1500 removed, deleted, clean = [], [], []
1507 removed, deleted, clean = [], [], []
1501
1508
1502 dmap = self._map
1509 dmap = self._map
1503 dmap.preload()
1510 dmap.preload()
1504
1511
1505 use_rust = True
1512 use_rust = True
1506
1513
1507 allowed_matchers = (
1514 allowed_matchers = (
1508 matchmod.alwaysmatcher,
1515 matchmod.alwaysmatcher,
1509 matchmod.exactmatcher,
1516 matchmod.exactmatcher,
1510 matchmod.includematcher,
1517 matchmod.includematcher,
1511 )
1518 )
1512
1519
1513 if rustmod is None:
1520 if rustmod is None:
1514 use_rust = False
1521 use_rust = False
1515 elif self._checkcase:
1522 elif self._checkcase:
1516 # Case-insensitive filesystems are not handled yet
1523 # Case-insensitive filesystems are not handled yet
1517 use_rust = False
1524 use_rust = False
1518 elif subrepos:
1525 elif subrepos:
1519 use_rust = False
1526 use_rust = False
1520 elif sparse.enabled:
1527 elif sparse.enabled:
1521 use_rust = False
1528 use_rust = False
1522 elif not isinstance(match, allowed_matchers):
1529 elif not isinstance(match, allowed_matchers):
1523 # Some matchers have yet to be implemented
1530 # Some matchers have yet to be implemented
1524 use_rust = False
1531 use_rust = False
1525
1532
1526 if use_rust:
1533 if use_rust:
1527 try:
1534 try:
1528 return self._rust_status(
1535 return self._rust_status(
1529 match, listclean, listignored, listunknown
1536 match, listclean, listignored, listunknown
1530 )
1537 )
1531 except rustmod.FallbackError:
1538 except rustmod.FallbackError:
1532 pass
1539 pass
1533
1540
1534 def noop(f):
1541 def noop(f):
1535 pass
1542 pass
1536
1543
1537 dcontains = dmap.__contains__
1544 dcontains = dmap.__contains__
1538 dget = dmap.__getitem__
1545 dget = dmap.__getitem__
1539 ladd = lookup.append # aka "unsure"
1546 ladd = lookup.append # aka "unsure"
1540 madd = modified.append
1547 madd = modified.append
1541 aadd = added.append
1548 aadd = added.append
1542 uadd = unknown.append if listunknown else noop
1549 uadd = unknown.append if listunknown else noop
1543 iadd = ignored.append if listignored else noop
1550 iadd = ignored.append if listignored else noop
1544 radd = removed.append
1551 radd = removed.append
1545 dadd = deleted.append
1552 dadd = deleted.append
1546 cadd = clean.append if listclean else noop
1553 cadd = clean.append if listclean else noop
1547 mexact = match.exact
1554 mexact = match.exact
1548 dirignore = self._dirignore
1555 dirignore = self._dirignore
1549 checkexec = self._checkexec
1556 checkexec = self._checkexec
1550 copymap = self._map.copymap
1557 copymap = self._map.copymap
1551 lastnormaltime = self._lastnormaltime
1558 lastnormaltime = self._lastnormaltime
1552
1559
1553 # We need to do full walks when either
1560 # We need to do full walks when either
1554 # - we're listing all clean files, or
1561 # - we're listing all clean files, or
1555 # - match.traversedir does something, because match.traversedir should
1562 # - match.traversedir does something, because match.traversedir should
1556 # be called for every dir in the working dir
1563 # be called for every dir in the working dir
1557 full = listclean or match.traversedir is not None
1564 full = listclean or match.traversedir is not None
1558 for fn, st in pycompat.iteritems(
1565 for fn, st in pycompat.iteritems(
1559 self.walk(match, subrepos, listunknown, listignored, full=full)
1566 self.walk(match, subrepos, listunknown, listignored, full=full)
1560 ):
1567 ):
1561 if not dcontains(fn):
1568 if not dcontains(fn):
1562 if (listignored or mexact(fn)) and dirignore(fn):
1569 if (listignored or mexact(fn)) and dirignore(fn):
1563 if listignored:
1570 if listignored:
1564 iadd(fn)
1571 iadd(fn)
1565 else:
1572 else:
1566 uadd(fn)
1573 uadd(fn)
1567 continue
1574 continue
1568
1575
1569 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1576 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1570 # written like that for performance reasons. dmap[fn] is not a
1577 # written like that for performance reasons. dmap[fn] is not a
1571 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1578 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1572 # opcode has fast paths when the value to be unpacked is a tuple or
1579 # opcode has fast paths when the value to be unpacked is a tuple or
1573 # a list, but falls back to creating a full-fledged iterator in
1580 # a list, but falls back to creating a full-fledged iterator in
1574 # general. That is much slower than simply accessing and storing the
1581 # general. That is much slower than simply accessing and storing the
1575 # tuple members one by one.
1582 # tuple members one by one.
1576 t = dget(fn)
1583 t = dget(fn)
1577 mode = t.mode
1584 mode = t.mode
1578 size = t.size
1585 size = t.size
1579 time = t.mtime
1586 time = t.mtime
1580
1587
1581 if not st and t.tracked:
1588 if not st and t.tracked:
1582 dadd(fn)
1589 dadd(fn)
1583 elif t.merged:
1590 elif t.merged:
1584 madd(fn)
1591 madd(fn)
1585 elif t.added:
1592 elif t.added:
1586 aadd(fn)
1593 aadd(fn)
1587 elif t.removed:
1594 elif t.removed:
1588 radd(fn)
1595 radd(fn)
1589 elif t.tracked:
1596 elif t.tracked:
1590 if (
1597 if (
1591 size >= 0
1598 size >= 0
1592 and (
1599 and (
1593 (size != st.st_size and size != st.st_size & _rangemask)
1600 (size != st.st_size and size != st.st_size & _rangemask)
1594 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1601 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1595 )
1602 )
1596 or t.from_p2
1603 or t.from_p2
1597 or fn in copymap
1604 or fn in copymap
1598 ):
1605 ):
1599 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1606 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1600 # issue6456: Size returned may be longer due to
1607 # issue6456: Size returned may be longer due to
1601 # encryption on EXT-4 fscrypt, undecided.
1608 # encryption on EXT-4 fscrypt, undecided.
1602 ladd(fn)
1609 ladd(fn)
1603 else:
1610 else:
1604 madd(fn)
1611 madd(fn)
1605 elif (
1612 elif (
1606 time != st[stat.ST_MTIME]
1613 time != st[stat.ST_MTIME]
1607 and time != st[stat.ST_MTIME] & _rangemask
1614 and time != st[stat.ST_MTIME] & _rangemask
1608 ):
1615 ):
1609 ladd(fn)
1616 ladd(fn)
1610 elif st[stat.ST_MTIME] == lastnormaltime:
1617 elif st[stat.ST_MTIME] == lastnormaltime:
1611 # fn may have just been marked as normal and it may have
1618 # fn may have just been marked as normal and it may have
1612 # changed in the same second without changing its size.
1619 # changed in the same second without changing its size.
1613 # This can happen if we quickly do multiple commits.
1620 # This can happen if we quickly do multiple commits.
1614 # Force lookup, so we don't miss such a racy file change.
1621 # Force lookup, so we don't miss such a racy file change.
1615 ladd(fn)
1622 ladd(fn)
1616 elif listclean:
1623 elif listclean:
1617 cadd(fn)
1624 cadd(fn)
1618 status = scmutil.status(
1625 status = scmutil.status(
1619 modified, added, removed, deleted, unknown, ignored, clean
1626 modified, added, removed, deleted, unknown, ignored, clean
1620 )
1627 )
1621 return (lookup, status)
1628 return (lookup, status)
1622
1629
1623 def matches(self, match):
1630 def matches(self, match):
1624 """
1631 """
1625 return files in the dirstate (in whatever state) filtered by match
1632 return files in the dirstate (in whatever state) filtered by match
1626 """
1633 """
1627 dmap = self._map
1634 dmap = self._map
1628 if rustmod is not None:
1635 if rustmod is not None:
1629 dmap = self._map._rustmap
1636 dmap = self._map._rustmap
1630
1637
1631 if match.always():
1638 if match.always():
1632 return dmap.keys()
1639 return dmap.keys()
1633 files = match.files()
1640 files = match.files()
1634 if match.isexact():
1641 if match.isexact():
1635 # fast path -- filter the other way around, since typically files is
1642 # fast path -- filter the other way around, since typically files is
1636 # much smaller than dmap
1643 # much smaller than dmap
1637 return [f for f in files if f in dmap]
1644 return [f for f in files if f in dmap]
1638 if match.prefix() and all(fn in dmap for fn in files):
1645 if match.prefix() and all(fn in dmap for fn in files):
1639 # fast path -- all the values are known to be files, so just return
1646 # fast path -- all the values are known to be files, so just return
1640 # that
1647 # that
1641 return list(files)
1648 return list(files)
1642 return [f for f in dmap if match(f)]
1649 return [f for f in dmap if match(f)]
1643
1650
1644 def _actualfilename(self, tr):
1651 def _actualfilename(self, tr):
1645 if tr:
1652 if tr:
1646 return self._pendingfilename
1653 return self._pendingfilename
1647 else:
1654 else:
1648 return self._filename
1655 return self._filename
1649
1656
1650 def savebackup(self, tr, backupname):
1657 def savebackup(self, tr, backupname):
1651 '''Save current dirstate into backup file'''
1658 '''Save current dirstate into backup file'''
1652 filename = self._actualfilename(tr)
1659 filename = self._actualfilename(tr)
1653 assert backupname != filename
1660 assert backupname != filename
1654
1661
1655 # use '_writedirstate' instead of 'write' to write changes certainly,
1662 # use '_writedirstate' instead of 'write' to write changes certainly,
1656 # because the latter omits writing out if transaction is running.
1663 # because the latter omits writing out if transaction is running.
1657 # output file will be used to create backup of dirstate at this point.
1664 # output file will be used to create backup of dirstate at this point.
1658 if self._dirty or not self._opener.exists(filename):
1665 if self._dirty or not self._opener.exists(filename):
1659 self._writedirstate(
1666 self._writedirstate(
1660 tr,
1667 tr,
1661 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1668 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1662 )
1669 )
1663
1670
1664 if tr:
1671 if tr:
1665 # ensure that subsequent tr.writepending returns True for
1672 # ensure that subsequent tr.writepending returns True for
1666 # changes written out above, even if dirstate is never
1673 # changes written out above, even if dirstate is never
1667 # changed after this
1674 # changed after this
1668 tr.addfilegenerator(
1675 tr.addfilegenerator(
1669 b'dirstate',
1676 b'dirstate',
1670 (self._filename,),
1677 (self._filename,),
1671 lambda f: self._writedirstate(tr, f),
1678 lambda f: self._writedirstate(tr, f),
1672 location=b'plain',
1679 location=b'plain',
1673 )
1680 )
1674
1681
1675 # ensure that pending file written above is unlinked at
1682 # ensure that pending file written above is unlinked at
1676 # failure, even if tr.writepending isn't invoked until the
1683 # failure, even if tr.writepending isn't invoked until the
1677 # end of this transaction
1684 # end of this transaction
1678 tr.registertmp(filename, location=b'plain')
1685 tr.registertmp(filename, location=b'plain')
1679
1686
1680 self._opener.tryunlink(backupname)
1687 self._opener.tryunlink(backupname)
1681 # hardlink backup is okay because _writedirstate is always called
1688 # hardlink backup is okay because _writedirstate is always called
1682 # with an "atomictemp=True" file.
1689 # with an "atomictemp=True" file.
1683 util.copyfile(
1690 util.copyfile(
1684 self._opener.join(filename),
1691 self._opener.join(filename),
1685 self._opener.join(backupname),
1692 self._opener.join(backupname),
1686 hardlink=True,
1693 hardlink=True,
1687 )
1694 )
1688
1695
1689 def restorebackup(self, tr, backupname):
1696 def restorebackup(self, tr, backupname):
1690 '''Restore dirstate by backup file'''
1697 '''Restore dirstate by backup file'''
1691 # this "invalidate()" prevents "wlock.release()" from writing
1698 # this "invalidate()" prevents "wlock.release()" from writing
1692 # changes of dirstate out after restoring from backup file
1699 # changes of dirstate out after restoring from backup file
1693 self.invalidate()
1700 self.invalidate()
1694 filename = self._actualfilename(tr)
1701 filename = self._actualfilename(tr)
1695 o = self._opener
1702 o = self._opener
1696 if util.samefile(o.join(backupname), o.join(filename)):
1703 if util.samefile(o.join(backupname), o.join(filename)):
1697 o.unlink(backupname)
1704 o.unlink(backupname)
1698 else:
1705 else:
1699 o.rename(backupname, filename, checkambig=True)
1706 o.rename(backupname, filename, checkambig=True)
1700
1707
1701 def clearbackup(self, tr, backupname):
1708 def clearbackup(self, tr, backupname):
1702 '''Clear backup file'''
1709 '''Clear backup file'''
1703 self._opener.unlink(backupname)
1710 self._opener.unlink(backupname)
@@ -1,912 +1,922 b''
1 # dirstatemap.py
1 # dirstatemap.py
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 from __future__ import absolute_import
6 from __future__ import absolute_import
7
7
8 import errno
8 import errno
9
9
10 from .i18n import _
10 from .i18n import _
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 pathutil,
14 pathutil,
15 policy,
15 policy,
16 pycompat,
16 pycompat,
17 txnutil,
17 txnutil,
18 util,
18 util,
19 )
19 )
20
20
21 from .dirstateutils import (
21 from .dirstateutils import (
22 docket as docketmod,
22 docket as docketmod,
23 )
23 )
24
24
25 parsers = policy.importmod('parsers')
25 parsers = policy.importmod('parsers')
26 rustmod = policy.importrust('dirstate')
26 rustmod = policy.importrust('dirstate')
27
27
28 propertycache = util.propertycache
28 propertycache = util.propertycache
29
29
30 DirstateItem = parsers.DirstateItem
30 DirstateItem = parsers.DirstateItem
31
31
32
32
33 # a special value used internally for `size` if the file come from the other parent
33 # a special value used internally for `size` if the file come from the other parent
34 FROM_P2 = -2
34 FROM_P2 = -2
35
35
36 # a special value used internally for `size` if the file is modified/merged/added
36 # a special value used internally for `size` if the file is modified/merged/added
37 NONNORMAL = -1
37 NONNORMAL = -1
38
38
39 # a special value used internally for `time` if the time is ambigeous
39 # a special value used internally for `time` if the time is ambigeous
40 AMBIGUOUS_TIME = -1
40 AMBIGUOUS_TIME = -1
41
41
42 rangemask = 0x7FFFFFFF
42 rangemask = 0x7FFFFFFF
43
43
44
44
45 class dirstatemap(object):
45 class dirstatemap(object):
46 """Map encapsulating the dirstate's contents.
46 """Map encapsulating the dirstate's contents.
47
47
48 The dirstate contains the following state:
48 The dirstate contains the following state:
49
49
50 - `identity` is the identity of the dirstate file, which can be used to
50 - `identity` is the identity of the dirstate file, which can be used to
51 detect when changes have occurred to the dirstate file.
51 detect when changes have occurred to the dirstate file.
52
52
53 - `parents` is a pair containing the parents of the working copy. The
53 - `parents` is a pair containing the parents of the working copy. The
54 parents are updated by calling `setparents`.
54 parents are updated by calling `setparents`.
55
55
56 - the state map maps filenames to tuples of (state, mode, size, mtime),
56 - the state map maps filenames to tuples of (state, mode, size, mtime),
57 where state is a single character representing 'normal', 'added',
57 where state is a single character representing 'normal', 'added',
58 'removed', or 'merged'. It is read by treating the dirstate as a
58 'removed', or 'merged'. It is read by treating the dirstate as a
59 dict. File state is updated by calling the `addfile`, `removefile` and
59 dict. File state is updated by calling the `addfile`, `removefile` and
60 `dropfile` methods.
60 `dropfile` methods.
61
61
62 - `copymap` maps destination filenames to their source filename.
62 - `copymap` maps destination filenames to their source filename.
63
63
64 The dirstate also provides the following views onto the state:
64 The dirstate also provides the following views onto the state:
65
65
66 - `nonnormalset` is a set of the filenames that have state other
66 - `nonnormalset` is a set of the filenames that have state other
67 than 'normal', or are normal but have an mtime of -1 ('normallookup').
67 than 'normal', or are normal but have an mtime of -1 ('normallookup').
68
68
69 - `otherparentset` is a set of the filenames that are marked as coming
69 - `otherparentset` is a set of the filenames that are marked as coming
70 from the second parent when the dirstate is currently being merged.
70 from the second parent when the dirstate is currently being merged.
71
71
72 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
72 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
73 form that they appear as in the dirstate.
73 form that they appear as in the dirstate.
74
74
75 - `dirfoldmap` is a dict mapping normalized directory names to the
75 - `dirfoldmap` is a dict mapping normalized directory names to the
76 denormalized form that they appear as in the dirstate.
76 denormalized form that they appear as in the dirstate.
77 """
77 """
78
78
79 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
79 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
80 self._ui = ui
80 self._ui = ui
81 self._opener = opener
81 self._opener = opener
82 self._root = root
82 self._root = root
83 self._filename = b'dirstate'
83 self._filename = b'dirstate'
84 self._nodelen = 20
84 self._nodelen = 20
85 self._nodeconstants = nodeconstants
85 self._nodeconstants = nodeconstants
86 assert (
86 assert (
87 not use_dirstate_v2
87 not use_dirstate_v2
88 ), "should have detected unsupported requirement"
88 ), "should have detected unsupported requirement"
89
89
90 self._parents = None
90 self._parents = None
91 self._dirtyparents = False
91 self._dirtyparents = False
92
92
93 # for consistent view between _pl() and _read() invocations
93 # for consistent view between _pl() and _read() invocations
94 self._pendingmode = None
94 self._pendingmode = None
95
95
96 @propertycache
96 @propertycache
97 def _map(self):
97 def _map(self):
98 self._map = {}
98 self._map = {}
99 self.read()
99 self.read()
100 return self._map
100 return self._map
101
101
102 @propertycache
102 @propertycache
103 def copymap(self):
103 def copymap(self):
104 self.copymap = {}
104 self.copymap = {}
105 self._map
105 self._map
106 return self.copymap
106 return self.copymap
107
107
108 def clear(self):
108 def clear(self):
109 self._map.clear()
109 self._map.clear()
110 self.copymap.clear()
110 self.copymap.clear()
111 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
111 self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
112 util.clearcachedproperty(self, b"_dirs")
112 util.clearcachedproperty(self, b"_dirs")
113 util.clearcachedproperty(self, b"_alldirs")
113 util.clearcachedproperty(self, b"_alldirs")
114 util.clearcachedproperty(self, b"filefoldmap")
114 util.clearcachedproperty(self, b"filefoldmap")
115 util.clearcachedproperty(self, b"dirfoldmap")
115 util.clearcachedproperty(self, b"dirfoldmap")
116 util.clearcachedproperty(self, b"nonnormalset")
116 util.clearcachedproperty(self, b"nonnormalset")
117 util.clearcachedproperty(self, b"otherparentset")
117 util.clearcachedproperty(self, b"otherparentset")
118
118
119 def items(self):
119 def items(self):
120 return pycompat.iteritems(self._map)
120 return pycompat.iteritems(self._map)
121
121
122 # forward for python2,3 compat
122 # forward for python2,3 compat
123 iteritems = items
123 iteritems = items
124
124
125 debug_iter = items
125 debug_iter = items
126
126
127 def __len__(self):
127 def __len__(self):
128 return len(self._map)
128 return len(self._map)
129
129
130 def __iter__(self):
130 def __iter__(self):
131 return iter(self._map)
131 return iter(self._map)
132
132
133 def get(self, key, default=None):
133 def get(self, key, default=None):
134 return self._map.get(key, default)
134 return self._map.get(key, default)
135
135
136 def __contains__(self, key):
136 def __contains__(self, key):
137 return key in self._map
137 return key in self._map
138
138
139 def __getitem__(self, key):
139 def __getitem__(self, key):
140 return self._map[key]
140 return self._map[key]
141
141
142 def keys(self):
142 def keys(self):
143 return self._map.keys()
143 return self._map.keys()
144
144
145 def preload(self):
145 def preload(self):
146 """Loads the underlying data, if it's not already loaded"""
146 """Loads the underlying data, if it's not already loaded"""
147 self._map
147 self._map
148
148
149 def _dirs_incr(self, filename, old_entry=None):
149 def _dirs_incr(self, filename, old_entry=None):
150 """incremente the dirstate counter if applicable"""
150 """incremente the dirstate counter if applicable"""
151 if (
151 if (
152 old_entry is None or old_entry.removed
152 old_entry is None or old_entry.removed
153 ) and "_dirs" in self.__dict__:
153 ) and "_dirs" in self.__dict__:
154 self._dirs.addpath(filename)
154 self._dirs.addpath(filename)
155 if old_entry is None and "_alldirs" in self.__dict__:
155 if old_entry is None and "_alldirs" in self.__dict__:
156 self._alldirs.addpath(filename)
156 self._alldirs.addpath(filename)
157
157
158 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
158 def _dirs_decr(self, filename, old_entry=None, remove_variant=False):
159 """decremente the dirstate counter if applicable"""
159 """decremente the dirstate counter if applicable"""
160 if old_entry is not None:
160 if old_entry is not None:
161 if "_dirs" in self.__dict__ and not old_entry.removed:
161 if "_dirs" in self.__dict__ and not old_entry.removed:
162 self._dirs.delpath(filename)
162 self._dirs.delpath(filename)
163 if "_alldirs" in self.__dict__ and not remove_variant:
163 if "_alldirs" in self.__dict__ and not remove_variant:
164 self._alldirs.delpath(filename)
164 self._alldirs.delpath(filename)
165 elif remove_variant and "_alldirs" in self.__dict__:
165 elif remove_variant and "_alldirs" in self.__dict__:
166 self._alldirs.addpath(filename)
166 self._alldirs.addpath(filename)
167 if "filefoldmap" in self.__dict__:
167 if "filefoldmap" in self.__dict__:
168 normed = util.normcase(filename)
168 normed = util.normcase(filename)
169 self.filefoldmap.pop(normed, None)
169 self.filefoldmap.pop(normed, None)
170
170
171 def set_possibly_dirty(self, filename):
172 """record that the current state of the file on disk is unknown"""
173 self[filename].set_possibly_dirty()
174
171 def addfile(
175 def addfile(
172 self,
176 self,
173 f,
177 f,
174 mode=0,
178 mode=0,
175 size=None,
179 size=None,
176 mtime=None,
180 mtime=None,
177 added=False,
181 added=False,
178 merged=False,
182 merged=False,
179 from_p2=False,
183 from_p2=False,
180 possibly_dirty=False,
184 possibly_dirty=False,
181 ):
185 ):
182 """Add a tracked file to the dirstate."""
186 """Add a tracked file to the dirstate."""
183 if added:
187 if added:
184 assert not merged
188 assert not merged
185 assert not possibly_dirty
189 assert not possibly_dirty
186 assert not from_p2
190 assert not from_p2
187 state = b'a'
191 state = b'a'
188 size = NONNORMAL
192 size = NONNORMAL
189 mtime = AMBIGUOUS_TIME
193 mtime = AMBIGUOUS_TIME
190 elif merged:
194 elif merged:
191 assert not possibly_dirty
195 assert not possibly_dirty
192 assert not from_p2
196 assert not from_p2
193 state = b'm'
197 state = b'm'
194 size = FROM_P2
198 size = FROM_P2
195 mtime = AMBIGUOUS_TIME
199 mtime = AMBIGUOUS_TIME
196 elif from_p2:
200 elif from_p2:
197 assert not possibly_dirty
201 assert not possibly_dirty
198 state = b'n'
202 state = b'n'
199 size = FROM_P2
203 size = FROM_P2
200 mtime = AMBIGUOUS_TIME
204 mtime = AMBIGUOUS_TIME
201 elif possibly_dirty:
205 elif possibly_dirty:
202 state = b'n'
206 state = b'n'
203 size = NONNORMAL
207 size = NONNORMAL
204 mtime = AMBIGUOUS_TIME
208 mtime = AMBIGUOUS_TIME
205 else:
209 else:
206 assert size != FROM_P2
210 assert size != FROM_P2
207 assert size != NONNORMAL
211 assert size != NONNORMAL
208 state = b'n'
212 state = b'n'
209 size = size & rangemask
213 size = size & rangemask
210 mtime = mtime & rangemask
214 mtime = mtime & rangemask
211 assert state is not None
215 assert state is not None
212 assert size is not None
216 assert size is not None
213 assert mtime is not None
217 assert mtime is not None
214 old_entry = self.get(f)
218 old_entry = self.get(f)
215 self._dirs_incr(f, old_entry)
219 self._dirs_incr(f, old_entry)
216 e = self._map[f] = DirstateItem(state, mode, size, mtime)
220 e = self._map[f] = DirstateItem(state, mode, size, mtime)
217 if e.dm_nonnormal:
221 if e.dm_nonnormal:
218 self.nonnormalset.add(f)
222 self.nonnormalset.add(f)
219 if e.dm_otherparent:
223 if e.dm_otherparent:
220 self.otherparentset.add(f)
224 self.otherparentset.add(f)
221
225
222 def reset_state(
226 def reset_state(
223 self,
227 self,
224 filename,
228 filename,
225 wc_tracked,
229 wc_tracked,
226 p1_tracked,
230 p1_tracked,
227 p2_tracked=False,
231 p2_tracked=False,
228 merged=False,
232 merged=False,
229 clean_p1=False,
233 clean_p1=False,
230 clean_p2=False,
234 clean_p2=False,
231 possibly_dirty=False,
235 possibly_dirty=False,
232 parentfiledata=None,
236 parentfiledata=None,
233 ):
237 ):
234 """Set a entry to a given state, diregarding all previous state
238 """Set a entry to a given state, diregarding all previous state
235
239
236 This is to be used by the part of the dirstate API dedicated to
240 This is to be used by the part of the dirstate API dedicated to
237 adjusting the dirstate after a update/merge.
241 adjusting the dirstate after a update/merge.
238
242
239 note: calling this might result to no entry existing at all if the
243 note: calling this might result to no entry existing at all if the
240 dirstate map does not see any point at having one for this file
244 dirstate map does not see any point at having one for this file
241 anymore.
245 anymore.
242 """
246 """
243 if merged and (clean_p1 or clean_p2):
247 if merged and (clean_p1 or clean_p2):
244 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
248 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
245 raise error.ProgrammingError(msg)
249 raise error.ProgrammingError(msg)
246 # copy information are now outdated
250 # copy information are now outdated
247 # (maybe new information should be in directly passed to this function)
251 # (maybe new information should be in directly passed to this function)
248 self.copymap.pop(filename, None)
252 self.copymap.pop(filename, None)
249
253
250 if not (p1_tracked or p2_tracked or wc_tracked):
254 if not (p1_tracked or p2_tracked or wc_tracked):
251 self.dropfile(filename)
255 self.dropfile(filename)
252 elif merged:
256 elif merged:
253 # XXX might be merged and removed ?
257 # XXX might be merged and removed ?
254 entry = self.get(filename)
258 entry = self.get(filename)
255 if entry is not None and entry.tracked:
259 if entry is not None and entry.tracked:
256 # XXX mostly replicate dirstate.other parent. We should get
260 # XXX mostly replicate dirstate.other parent. We should get
257 # the higher layer to pass us more reliable data where `merged`
261 # the higher layer to pass us more reliable data where `merged`
258 # actually mean merged. Dropping the else clause will show
262 # actually mean merged. Dropping the else clause will show
259 # failure in `test-graft.t`
263 # failure in `test-graft.t`
260 self.addfile(filename, merged=True)
264 self.addfile(filename, merged=True)
261 else:
265 else:
262 self.addfile(filename, from_p2=True)
266 self.addfile(filename, from_p2=True)
263 elif not (p1_tracked or p2_tracked) and wc_tracked:
267 elif not (p1_tracked or p2_tracked) and wc_tracked:
264 self.addfile(filename, added=True, possibly_dirty=possibly_dirty)
268 self.addfile(filename, added=True, possibly_dirty=possibly_dirty)
265 elif (p1_tracked or p2_tracked) and not wc_tracked:
269 elif (p1_tracked or p2_tracked) and not wc_tracked:
266 # XXX might be merged and removed ?
270 # XXX might be merged and removed ?
267 old_entry = self._map.get(filename)
271 old_entry = self._map.get(filename)
268 self._dirs_decr(filename, old_entry=old_entry, remove_variant=True)
272 self._dirs_decr(filename, old_entry=old_entry, remove_variant=True)
269 self._map[filename] = DirstateItem(b'r', 0, 0, 0)
273 self._map[filename] = DirstateItem(b'r', 0, 0, 0)
270 self.nonnormalset.add(filename)
274 self.nonnormalset.add(filename)
271 elif clean_p2 and wc_tracked:
275 elif clean_p2 and wc_tracked:
272 if p1_tracked or self.get(filename) is not None:
276 if p1_tracked or self.get(filename) is not None:
273 # XXX the `self.get` call is catching some case in
277 # XXX the `self.get` call is catching some case in
274 # `test-merge-remove.t` where the file is tracked in p1, the
278 # `test-merge-remove.t` where the file is tracked in p1, the
275 # p1_tracked argument is False.
279 # p1_tracked argument is False.
276 #
280 #
277 # In addition, this seems to be a case where the file is marked
281 # In addition, this seems to be a case where the file is marked
278 # as merged without actually being the result of a merge
282 # as merged without actually being the result of a merge
279 # action. So thing are not ideal here.
283 # action. So thing are not ideal here.
280 self.addfile(filename, merged=True)
284 self.addfile(filename, merged=True)
281 else:
285 else:
282 self.addfile(filename, from_p2=True)
286 self.addfile(filename, from_p2=True)
283 elif not p1_tracked and p2_tracked and wc_tracked:
287 elif not p1_tracked and p2_tracked and wc_tracked:
284 self.addfile(filename, from_p2=True, possibly_dirty=possibly_dirty)
288 self.addfile(filename, from_p2=True, possibly_dirty=possibly_dirty)
285 elif possibly_dirty:
289 elif possibly_dirty:
286 self.addfile(filename, possibly_dirty=possibly_dirty)
290 self.addfile(filename, possibly_dirty=possibly_dirty)
287 elif wc_tracked:
291 elif wc_tracked:
288 # this is a "normal" file
292 # this is a "normal" file
289 if parentfiledata is None:
293 if parentfiledata is None:
290 msg = b'failed to pass parentfiledata for a normal file: %s'
294 msg = b'failed to pass parentfiledata for a normal file: %s'
291 msg %= filename
295 msg %= filename
292 raise error.ProgrammingError(msg)
296 raise error.ProgrammingError(msg)
293 mode, size, mtime = parentfiledata
297 mode, size, mtime = parentfiledata
294 self.addfile(filename, mode=mode, size=size, mtime=mtime)
298 self.addfile(filename, mode=mode, size=size, mtime=mtime)
295 self.nonnormalset.discard(filename)
299 self.nonnormalset.discard(filename)
296 else:
300 else:
297 assert False, 'unreachable'
301 assert False, 'unreachable'
298
302
299 def removefile(self, f, in_merge=False):
303 def removefile(self, f, in_merge=False):
300 """
304 """
301 Mark a file as removed in the dirstate.
305 Mark a file as removed in the dirstate.
302
306
303 The `size` parameter is used to store sentinel values that indicate
307 The `size` parameter is used to store sentinel values that indicate
304 the file's previous state. In the future, we should refactor this
308 the file's previous state. In the future, we should refactor this
305 to be more explicit about what that state is.
309 to be more explicit about what that state is.
306 """
310 """
307 entry = self.get(f)
311 entry = self.get(f)
308 size = 0
312 size = 0
309 if in_merge:
313 if in_merge:
310 # XXX we should not be able to have 'm' state and 'FROM_P2' if not
314 # XXX we should not be able to have 'm' state and 'FROM_P2' if not
311 # during a merge. So I (marmoute) am not sure we need the
315 # during a merge. So I (marmoute) am not sure we need the
312 # conditionnal at all. Adding double checking this with assert
316 # conditionnal at all. Adding double checking this with assert
313 # would be nice.
317 # would be nice.
314 if entry is not None:
318 if entry is not None:
315 # backup the previous state
319 # backup the previous state
316 if entry.merged: # merge
320 if entry.merged: # merge
317 size = NONNORMAL
321 size = NONNORMAL
318 elif entry.from_p2:
322 elif entry.from_p2:
319 size = FROM_P2
323 size = FROM_P2
320 self.otherparentset.add(f)
324 self.otherparentset.add(f)
321 if entry is not None and not (entry.merged or entry.from_p2):
325 if entry is not None and not (entry.merged or entry.from_p2):
322 self.copymap.pop(f, None)
326 self.copymap.pop(f, None)
323 self._dirs_decr(f, old_entry=entry, remove_variant=True)
327 self._dirs_decr(f, old_entry=entry, remove_variant=True)
324 self._map[f] = DirstateItem(b'r', 0, size, 0)
328 self._map[f] = DirstateItem(b'r', 0, size, 0)
325 self.nonnormalset.add(f)
329 self.nonnormalset.add(f)
326
330
327 def dropfile(self, f):
331 def dropfile(self, f):
328 """
332 """
329 Remove a file from the dirstate. Returns True if the file was
333 Remove a file from the dirstate. Returns True if the file was
330 previously recorded.
334 previously recorded.
331 """
335 """
332 old_entry = self._map.pop(f, None)
336 old_entry = self._map.pop(f, None)
333 self._dirs_decr(f, old_entry=old_entry)
337 self._dirs_decr(f, old_entry=old_entry)
334 self.nonnormalset.discard(f)
338 self.nonnormalset.discard(f)
335 return old_entry is not None
339 return old_entry is not None
336
340
337 def clearambiguoustimes(self, files, now):
341 def clearambiguoustimes(self, files, now):
338 for f in files:
342 for f in files:
339 e = self.get(f)
343 e = self.get(f)
340 if e is not None and e.need_delay(now):
344 if e is not None and e.need_delay(now):
341 e.set_possibly_dirty()
345 e.set_possibly_dirty()
342 self.nonnormalset.add(f)
346 self.nonnormalset.add(f)
343
347
344 def nonnormalentries(self):
348 def nonnormalentries(self):
345 '''Compute the nonnormal dirstate entries from the dmap'''
349 '''Compute the nonnormal dirstate entries from the dmap'''
346 try:
350 try:
347 return parsers.nonnormalotherparententries(self._map)
351 return parsers.nonnormalotherparententries(self._map)
348 except AttributeError:
352 except AttributeError:
349 nonnorm = set()
353 nonnorm = set()
350 otherparent = set()
354 otherparent = set()
351 for fname, e in pycompat.iteritems(self._map):
355 for fname, e in pycompat.iteritems(self._map):
352 if e.dm_nonnormal:
356 if e.dm_nonnormal:
353 nonnorm.add(fname)
357 nonnorm.add(fname)
354 if e.from_p2:
358 if e.from_p2:
355 otherparent.add(fname)
359 otherparent.add(fname)
356 return nonnorm, otherparent
360 return nonnorm, otherparent
357
361
358 @propertycache
362 @propertycache
359 def filefoldmap(self):
363 def filefoldmap(self):
360 """Returns a dictionary mapping normalized case paths to their
364 """Returns a dictionary mapping normalized case paths to their
361 non-normalized versions.
365 non-normalized versions.
362 """
366 """
363 try:
367 try:
364 makefilefoldmap = parsers.make_file_foldmap
368 makefilefoldmap = parsers.make_file_foldmap
365 except AttributeError:
369 except AttributeError:
366 pass
370 pass
367 else:
371 else:
368 return makefilefoldmap(
372 return makefilefoldmap(
369 self._map, util.normcasespec, util.normcasefallback
373 self._map, util.normcasespec, util.normcasefallback
370 )
374 )
371
375
372 f = {}
376 f = {}
373 normcase = util.normcase
377 normcase = util.normcase
374 for name, s in pycompat.iteritems(self._map):
378 for name, s in pycompat.iteritems(self._map):
375 if not s.removed:
379 if not s.removed:
376 f[normcase(name)] = name
380 f[normcase(name)] = name
377 f[b'.'] = b'.' # prevents useless util.fspath() invocation
381 f[b'.'] = b'.' # prevents useless util.fspath() invocation
378 return f
382 return f
379
383
380 def hastrackeddir(self, d):
384 def hastrackeddir(self, d):
381 """
385 """
382 Returns True if the dirstate contains a tracked (not removed) file
386 Returns True if the dirstate contains a tracked (not removed) file
383 in this directory.
387 in this directory.
384 """
388 """
385 return d in self._dirs
389 return d in self._dirs
386
390
387 def hasdir(self, d):
391 def hasdir(self, d):
388 """
392 """
389 Returns True if the dirstate contains a file (tracked or removed)
393 Returns True if the dirstate contains a file (tracked or removed)
390 in this directory.
394 in this directory.
391 """
395 """
392 return d in self._alldirs
396 return d in self._alldirs
393
397
394 @propertycache
398 @propertycache
395 def _dirs(self):
399 def _dirs(self):
396 return pathutil.dirs(self._map, b'r')
400 return pathutil.dirs(self._map, b'r')
397
401
398 @propertycache
402 @propertycache
399 def _alldirs(self):
403 def _alldirs(self):
400 return pathutil.dirs(self._map)
404 return pathutil.dirs(self._map)
401
405
402 def _opendirstatefile(self):
406 def _opendirstatefile(self):
403 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
407 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
404 if self._pendingmode is not None and self._pendingmode != mode:
408 if self._pendingmode is not None and self._pendingmode != mode:
405 fp.close()
409 fp.close()
406 raise error.Abort(
410 raise error.Abort(
407 _(b'working directory state may be changed parallelly')
411 _(b'working directory state may be changed parallelly')
408 )
412 )
409 self._pendingmode = mode
413 self._pendingmode = mode
410 return fp
414 return fp
411
415
412 def parents(self):
416 def parents(self):
413 if not self._parents:
417 if not self._parents:
414 try:
418 try:
415 fp = self._opendirstatefile()
419 fp = self._opendirstatefile()
416 st = fp.read(2 * self._nodelen)
420 st = fp.read(2 * self._nodelen)
417 fp.close()
421 fp.close()
418 except IOError as err:
422 except IOError as err:
419 if err.errno != errno.ENOENT:
423 if err.errno != errno.ENOENT:
420 raise
424 raise
421 # File doesn't exist, so the current state is empty
425 # File doesn't exist, so the current state is empty
422 st = b''
426 st = b''
423
427
424 l = len(st)
428 l = len(st)
425 if l == self._nodelen * 2:
429 if l == self._nodelen * 2:
426 self._parents = (
430 self._parents = (
427 st[: self._nodelen],
431 st[: self._nodelen],
428 st[self._nodelen : 2 * self._nodelen],
432 st[self._nodelen : 2 * self._nodelen],
429 )
433 )
430 elif l == 0:
434 elif l == 0:
431 self._parents = (
435 self._parents = (
432 self._nodeconstants.nullid,
436 self._nodeconstants.nullid,
433 self._nodeconstants.nullid,
437 self._nodeconstants.nullid,
434 )
438 )
435 else:
439 else:
436 raise error.Abort(
440 raise error.Abort(
437 _(b'working directory state appears damaged!')
441 _(b'working directory state appears damaged!')
438 )
442 )
439
443
440 return self._parents
444 return self._parents
441
445
442 def setparents(self, p1, p2):
446 def setparents(self, p1, p2):
443 self._parents = (p1, p2)
447 self._parents = (p1, p2)
444 self._dirtyparents = True
448 self._dirtyparents = True
445
449
446 def read(self):
450 def read(self):
447 # ignore HG_PENDING because identity is used only for writing
451 # ignore HG_PENDING because identity is used only for writing
448 self.identity = util.filestat.frompath(
452 self.identity = util.filestat.frompath(
449 self._opener.join(self._filename)
453 self._opener.join(self._filename)
450 )
454 )
451
455
452 try:
456 try:
453 fp = self._opendirstatefile()
457 fp = self._opendirstatefile()
454 try:
458 try:
455 st = fp.read()
459 st = fp.read()
456 finally:
460 finally:
457 fp.close()
461 fp.close()
458 except IOError as err:
462 except IOError as err:
459 if err.errno != errno.ENOENT:
463 if err.errno != errno.ENOENT:
460 raise
464 raise
461 return
465 return
462 if not st:
466 if not st:
463 return
467 return
464
468
465 if util.safehasattr(parsers, b'dict_new_presized'):
469 if util.safehasattr(parsers, b'dict_new_presized'):
466 # Make an estimate of the number of files in the dirstate based on
470 # Make an estimate of the number of files in the dirstate based on
467 # its size. This trades wasting some memory for avoiding costly
471 # its size. This trades wasting some memory for avoiding costly
468 # resizes. Each entry have a prefix of 17 bytes followed by one or
472 # resizes. Each entry have a prefix of 17 bytes followed by one or
469 # two path names. Studies on various large-scale real-world repositories
473 # two path names. Studies on various large-scale real-world repositories
470 # found 54 bytes a reasonable upper limit for the average path names.
474 # found 54 bytes a reasonable upper limit for the average path names.
471 # Copy entries are ignored for the sake of this estimate.
475 # Copy entries are ignored for the sake of this estimate.
472 self._map = parsers.dict_new_presized(len(st) // 71)
476 self._map = parsers.dict_new_presized(len(st) // 71)
473
477
474 # Python's garbage collector triggers a GC each time a certain number
478 # Python's garbage collector triggers a GC each time a certain number
475 # of container objects (the number being defined by
479 # of container objects (the number being defined by
476 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
480 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
477 # for each file in the dirstate. The C version then immediately marks
481 # for each file in the dirstate. The C version then immediately marks
478 # them as not to be tracked by the collector. However, this has no
482 # them as not to be tracked by the collector. However, this has no
479 # effect on when GCs are triggered, only on what objects the GC looks
483 # effect on when GCs are triggered, only on what objects the GC looks
480 # into. This means that O(number of files) GCs are unavoidable.
484 # into. This means that O(number of files) GCs are unavoidable.
481 # Depending on when in the process's lifetime the dirstate is parsed,
485 # Depending on when in the process's lifetime the dirstate is parsed,
482 # this can get very expensive. As a workaround, disable GC while
486 # this can get very expensive. As a workaround, disable GC while
483 # parsing the dirstate.
487 # parsing the dirstate.
484 #
488 #
485 # (we cannot decorate the function directly since it is in a C module)
489 # (we cannot decorate the function directly since it is in a C module)
486 parse_dirstate = util.nogc(parsers.parse_dirstate)
490 parse_dirstate = util.nogc(parsers.parse_dirstate)
487 p = parse_dirstate(self._map, self.copymap, st)
491 p = parse_dirstate(self._map, self.copymap, st)
488 if not self._dirtyparents:
492 if not self._dirtyparents:
489 self.setparents(*p)
493 self.setparents(*p)
490
494
491 # Avoid excess attribute lookups by fast pathing certain checks
495 # Avoid excess attribute lookups by fast pathing certain checks
492 self.__contains__ = self._map.__contains__
496 self.__contains__ = self._map.__contains__
493 self.__getitem__ = self._map.__getitem__
497 self.__getitem__ = self._map.__getitem__
494 self.get = self._map.get
498 self.get = self._map.get
495
499
496 def write(self, _tr, st, now):
500 def write(self, _tr, st, now):
497 st.write(
501 st.write(
498 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
502 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
499 )
503 )
500 st.close()
504 st.close()
501 self._dirtyparents = False
505 self._dirtyparents = False
502 self.nonnormalset, self.otherparentset = self.nonnormalentries()
506 self.nonnormalset, self.otherparentset = self.nonnormalentries()
503
507
504 @propertycache
508 @propertycache
505 def nonnormalset(self):
509 def nonnormalset(self):
506 nonnorm, otherparents = self.nonnormalentries()
510 nonnorm, otherparents = self.nonnormalentries()
507 self.otherparentset = otherparents
511 self.otherparentset = otherparents
508 return nonnorm
512 return nonnorm
509
513
510 @propertycache
514 @propertycache
511 def otherparentset(self):
515 def otherparentset(self):
512 nonnorm, otherparents = self.nonnormalentries()
516 nonnorm, otherparents = self.nonnormalentries()
513 self.nonnormalset = nonnorm
517 self.nonnormalset = nonnorm
514 return otherparents
518 return otherparents
515
519
516 def non_normal_or_other_parent_paths(self):
520 def non_normal_or_other_parent_paths(self):
517 return self.nonnormalset.union(self.otherparentset)
521 return self.nonnormalset.union(self.otherparentset)
518
522
519 @propertycache
523 @propertycache
520 def identity(self):
524 def identity(self):
521 self._map
525 self._map
522 return self.identity
526 return self.identity
523
527
524 @propertycache
528 @propertycache
525 def dirfoldmap(self):
529 def dirfoldmap(self):
526 f = {}
530 f = {}
527 normcase = util.normcase
531 normcase = util.normcase
528 for name in self._dirs:
532 for name in self._dirs:
529 f[normcase(name)] = name
533 f[normcase(name)] = name
530 return f
534 return f
531
535
532
536
533 if rustmod is not None:
537 if rustmod is not None:
534
538
535 class dirstatemap(object):
539 class dirstatemap(object):
536 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
540 def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2):
537 self._use_dirstate_v2 = use_dirstate_v2
541 self._use_dirstate_v2 = use_dirstate_v2
538 self._nodeconstants = nodeconstants
542 self._nodeconstants = nodeconstants
539 self._ui = ui
543 self._ui = ui
540 self._opener = opener
544 self._opener = opener
541 self._root = root
545 self._root = root
542 self._filename = b'dirstate'
546 self._filename = b'dirstate'
543 self._nodelen = 20 # Also update Rust code when changing this!
547 self._nodelen = 20 # Also update Rust code when changing this!
544 self._parents = None
548 self._parents = None
545 self._dirtyparents = False
549 self._dirtyparents = False
546 self._docket = None
550 self._docket = None
547
551
548 # for consistent view between _pl() and _read() invocations
552 # for consistent view between _pl() and _read() invocations
549 self._pendingmode = None
553 self._pendingmode = None
550
554
551 self._use_dirstate_tree = self._ui.configbool(
555 self._use_dirstate_tree = self._ui.configbool(
552 b"experimental",
556 b"experimental",
553 b"dirstate-tree.in-memory",
557 b"dirstate-tree.in-memory",
554 False,
558 False,
555 )
559 )
556
560
557 def addfile(
561 def addfile(
558 self,
562 self,
559 f,
563 f,
560 mode=0,
564 mode=0,
561 size=None,
565 size=None,
562 mtime=None,
566 mtime=None,
563 added=False,
567 added=False,
564 merged=False,
568 merged=False,
565 from_p2=False,
569 from_p2=False,
566 possibly_dirty=False,
570 possibly_dirty=False,
567 ):
571 ):
568 return self._rustmap.addfile(
572 return self._rustmap.addfile(
569 f,
573 f,
570 mode,
574 mode,
571 size,
575 size,
572 mtime,
576 mtime,
573 added,
577 added,
574 merged,
578 merged,
575 from_p2,
579 from_p2,
576 possibly_dirty,
580 possibly_dirty,
577 )
581 )
578
582
579 def reset_state(
583 def reset_state(
580 self,
584 self,
581 filename,
585 filename,
582 wc_tracked,
586 wc_tracked,
583 p1_tracked,
587 p1_tracked,
584 p2_tracked=False,
588 p2_tracked=False,
585 merged=False,
589 merged=False,
586 clean_p1=False,
590 clean_p1=False,
587 clean_p2=False,
591 clean_p2=False,
588 possibly_dirty=False,
592 possibly_dirty=False,
589 parentfiledata=None,
593 parentfiledata=None,
590 ):
594 ):
591 """Set a entry to a given state, disregarding all previous state
595 """Set a entry to a given state, disregarding all previous state
592
596
593 This is to be used by the part of the dirstate API dedicated to
597 This is to be used by the part of the dirstate API dedicated to
594 adjusting the dirstate after a update/merge.
598 adjusting the dirstate after a update/merge.
595
599
596 note: calling this might result to no entry existing at all if the
600 note: calling this might result to no entry existing at all if the
597 dirstate map does not see any point at having one for this file
601 dirstate map does not see any point at having one for this file
598 anymore.
602 anymore.
599 """
603 """
600 if merged and (clean_p1 or clean_p2):
604 if merged and (clean_p1 or clean_p2):
601 msg = (
605 msg = (
602 b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
606 b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
603 )
607 )
604 raise error.ProgrammingError(msg)
608 raise error.ProgrammingError(msg)
605 # copy information are now outdated
609 # copy information are now outdated
606 # (maybe new information should be in directly passed to this function)
610 # (maybe new information should be in directly passed to this function)
607 self.copymap.pop(filename, None)
611 self.copymap.pop(filename, None)
608
612
609 if not (p1_tracked or p2_tracked or wc_tracked):
613 if not (p1_tracked or p2_tracked or wc_tracked):
610 self.dropfile(filename)
614 self.dropfile(filename)
611 elif merged:
615 elif merged:
612 # XXX might be merged and removed ?
616 # XXX might be merged and removed ?
613 entry = self.get(filename)
617 entry = self.get(filename)
614 if entry is not None and entry.tracked:
618 if entry is not None and entry.tracked:
615 # XXX mostly replicate dirstate.other parent. We should get
619 # XXX mostly replicate dirstate.other parent. We should get
616 # the higher layer to pass us more reliable data where `merged`
620 # the higher layer to pass us more reliable data where `merged`
617 # actually mean merged. Dropping the else clause will show
621 # actually mean merged. Dropping the else clause will show
618 # failure in `test-graft.t`
622 # failure in `test-graft.t`
619 self.addfile(filename, merged=True)
623 self.addfile(filename, merged=True)
620 else:
624 else:
621 self.addfile(filename, from_p2=True)
625 self.addfile(filename, from_p2=True)
622 elif not (p1_tracked or p2_tracked) and wc_tracked:
626 elif not (p1_tracked or p2_tracked) and wc_tracked:
623 self.addfile(
627 self.addfile(
624 filename, added=True, possibly_dirty=possibly_dirty
628 filename, added=True, possibly_dirty=possibly_dirty
625 )
629 )
626 elif (p1_tracked or p2_tracked) and not wc_tracked:
630 elif (p1_tracked or p2_tracked) and not wc_tracked:
627 # XXX might be merged and removed ?
631 # XXX might be merged and removed ?
628 self[filename] = DirstateItem(b'r', 0, 0, 0)
632 self[filename] = DirstateItem(b'r', 0, 0, 0)
629 self.nonnormalset.add(filename)
633 self.nonnormalset.add(filename)
630 elif clean_p2 and wc_tracked:
634 elif clean_p2 and wc_tracked:
631 if p1_tracked or self.get(filename) is not None:
635 if p1_tracked or self.get(filename) is not None:
632 # XXX the `self.get` call is catching some case in
636 # XXX the `self.get` call is catching some case in
633 # `test-merge-remove.t` where the file is tracked in p1, the
637 # `test-merge-remove.t` where the file is tracked in p1, the
634 # p1_tracked argument is False.
638 # p1_tracked argument is False.
635 #
639 #
636 # In addition, this seems to be a case where the file is marked
640 # In addition, this seems to be a case where the file is marked
637 # as merged without actually being the result of a merge
641 # as merged without actually being the result of a merge
638 # action. So thing are not ideal here.
642 # action. So thing are not ideal here.
639 self.addfile(filename, merged=True)
643 self.addfile(filename, merged=True)
640 else:
644 else:
641 self.addfile(filename, from_p2=True)
645 self.addfile(filename, from_p2=True)
642 elif not p1_tracked and p2_tracked and wc_tracked:
646 elif not p1_tracked and p2_tracked and wc_tracked:
643 self.addfile(
647 self.addfile(
644 filename, from_p2=True, possibly_dirty=possibly_dirty
648 filename, from_p2=True, possibly_dirty=possibly_dirty
645 )
649 )
646 elif possibly_dirty:
650 elif possibly_dirty:
647 self.addfile(filename, possibly_dirty=possibly_dirty)
651 self.addfile(filename, possibly_dirty=possibly_dirty)
648 elif wc_tracked:
652 elif wc_tracked:
649 # this is a "normal" file
653 # this is a "normal" file
650 if parentfiledata is None:
654 if parentfiledata is None:
651 msg = b'failed to pass parentfiledata for a normal file: %s'
655 msg = b'failed to pass parentfiledata for a normal file: %s'
652 msg %= filename
656 msg %= filename
653 raise error.ProgrammingError(msg)
657 raise error.ProgrammingError(msg)
654 mode, size, mtime = parentfiledata
658 mode, size, mtime = parentfiledata
655 self.addfile(filename, mode=mode, size=size, mtime=mtime)
659 self.addfile(filename, mode=mode, size=size, mtime=mtime)
656 self.nonnormalset.discard(filename)
660 self.nonnormalset.discard(filename)
657 else:
661 else:
658 assert False, 'unreachable'
662 assert False, 'unreachable'
659
663
660 def removefile(self, *args, **kwargs):
664 def removefile(self, *args, **kwargs):
661 return self._rustmap.removefile(*args, **kwargs)
665 return self._rustmap.removefile(*args, **kwargs)
662
666
663 def dropfile(self, *args, **kwargs):
667 def dropfile(self, *args, **kwargs):
664 return self._rustmap.dropfile(*args, **kwargs)
668 return self._rustmap.dropfile(*args, **kwargs)
665
669
666 def clearambiguoustimes(self, *args, **kwargs):
670 def clearambiguoustimes(self, *args, **kwargs):
667 return self._rustmap.clearambiguoustimes(*args, **kwargs)
671 return self._rustmap.clearambiguoustimes(*args, **kwargs)
668
672
669 def nonnormalentries(self):
673 def nonnormalentries(self):
670 return self._rustmap.nonnormalentries()
674 return self._rustmap.nonnormalentries()
671
675
672 def get(self, *args, **kwargs):
676 def get(self, *args, **kwargs):
673 return self._rustmap.get(*args, **kwargs)
677 return self._rustmap.get(*args, **kwargs)
674
678
675 @property
679 @property
676 def copymap(self):
680 def copymap(self):
677 return self._rustmap.copymap()
681 return self._rustmap.copymap()
678
682
679 def directories(self):
683 def directories(self):
680 return self._rustmap.directories()
684 return self._rustmap.directories()
681
685
682 def debug_iter(self):
686 def debug_iter(self):
683 return self._rustmap.debug_iter()
687 return self._rustmap.debug_iter()
684
688
685 def preload(self):
689 def preload(self):
686 self._rustmap
690 self._rustmap
687
691
688 def clear(self):
692 def clear(self):
689 self._rustmap.clear()
693 self._rustmap.clear()
690 self.setparents(
694 self.setparents(
691 self._nodeconstants.nullid, self._nodeconstants.nullid
695 self._nodeconstants.nullid, self._nodeconstants.nullid
692 )
696 )
693 util.clearcachedproperty(self, b"_dirs")
697 util.clearcachedproperty(self, b"_dirs")
694 util.clearcachedproperty(self, b"_alldirs")
698 util.clearcachedproperty(self, b"_alldirs")
695 util.clearcachedproperty(self, b"dirfoldmap")
699 util.clearcachedproperty(self, b"dirfoldmap")
696
700
697 def items(self):
701 def items(self):
698 return self._rustmap.items()
702 return self._rustmap.items()
699
703
700 def keys(self):
704 def keys(self):
701 return iter(self._rustmap)
705 return iter(self._rustmap)
702
706
703 def __contains__(self, key):
707 def __contains__(self, key):
704 return key in self._rustmap
708 return key in self._rustmap
705
709
706 def __getitem__(self, item):
710 def __getitem__(self, item):
707 return self._rustmap[item]
711 return self._rustmap[item]
708
712
709 def __len__(self):
713 def __len__(self):
710 return len(self._rustmap)
714 return len(self._rustmap)
711
715
712 def __iter__(self):
716 def __iter__(self):
713 return iter(self._rustmap)
717 return iter(self._rustmap)
714
718
715 # forward for python2,3 compat
719 # forward for python2,3 compat
716 iteritems = items
720 iteritems = items
717
721
718 def _opendirstatefile(self):
722 def _opendirstatefile(self):
719 fp, mode = txnutil.trypending(
723 fp, mode = txnutil.trypending(
720 self._root, self._opener, self._filename
724 self._root, self._opener, self._filename
721 )
725 )
722 if self._pendingmode is not None and self._pendingmode != mode:
726 if self._pendingmode is not None and self._pendingmode != mode:
723 fp.close()
727 fp.close()
724 raise error.Abort(
728 raise error.Abort(
725 _(b'working directory state may be changed parallelly')
729 _(b'working directory state may be changed parallelly')
726 )
730 )
727 self._pendingmode = mode
731 self._pendingmode = mode
728 return fp
732 return fp
729
733
730 def _readdirstatefile(self, size=-1):
734 def _readdirstatefile(self, size=-1):
731 try:
735 try:
732 with self._opendirstatefile() as fp:
736 with self._opendirstatefile() as fp:
733 return fp.read(size)
737 return fp.read(size)
734 except IOError as err:
738 except IOError as err:
735 if err.errno != errno.ENOENT:
739 if err.errno != errno.ENOENT:
736 raise
740 raise
737 # File doesn't exist, so the current state is empty
741 # File doesn't exist, so the current state is empty
738 return b''
742 return b''
739
743
740 def setparents(self, p1, p2):
744 def setparents(self, p1, p2):
741 self._parents = (p1, p2)
745 self._parents = (p1, p2)
742 self._dirtyparents = True
746 self._dirtyparents = True
743
747
744 def parents(self):
748 def parents(self):
745 if not self._parents:
749 if not self._parents:
746 if self._use_dirstate_v2:
750 if self._use_dirstate_v2:
747 self._parents = self.docket.parents
751 self._parents = self.docket.parents
748 else:
752 else:
749 read_len = self._nodelen * 2
753 read_len = self._nodelen * 2
750 st = self._readdirstatefile(read_len)
754 st = self._readdirstatefile(read_len)
751 l = len(st)
755 l = len(st)
752 if l == read_len:
756 if l == read_len:
753 self._parents = (
757 self._parents = (
754 st[: self._nodelen],
758 st[: self._nodelen],
755 st[self._nodelen : 2 * self._nodelen],
759 st[self._nodelen : 2 * self._nodelen],
756 )
760 )
757 elif l == 0:
761 elif l == 0:
758 self._parents = (
762 self._parents = (
759 self._nodeconstants.nullid,
763 self._nodeconstants.nullid,
760 self._nodeconstants.nullid,
764 self._nodeconstants.nullid,
761 )
765 )
762 else:
766 else:
763 raise error.Abort(
767 raise error.Abort(
764 _(b'working directory state appears damaged!')
768 _(b'working directory state appears damaged!')
765 )
769 )
766
770
767 return self._parents
771 return self._parents
768
772
769 @property
773 @property
770 def docket(self):
774 def docket(self):
771 if not self._docket:
775 if not self._docket:
772 if not self._use_dirstate_v2:
776 if not self._use_dirstate_v2:
773 raise error.ProgrammingError(
777 raise error.ProgrammingError(
774 b'dirstate only has a docket in v2 format'
778 b'dirstate only has a docket in v2 format'
775 )
779 )
776 self._docket = docketmod.DirstateDocket.parse(
780 self._docket = docketmod.DirstateDocket.parse(
777 self._readdirstatefile(), self._nodeconstants
781 self._readdirstatefile(), self._nodeconstants
778 )
782 )
779 return self._docket
783 return self._docket
780
784
781 @propertycache
785 @propertycache
782 def _rustmap(self):
786 def _rustmap(self):
783 """
787 """
784 Fills the Dirstatemap when called.
788 Fills the Dirstatemap when called.
785 """
789 """
786 # ignore HG_PENDING because identity is used only for writing
790 # ignore HG_PENDING because identity is used only for writing
787 self.identity = util.filestat.frompath(
791 self.identity = util.filestat.frompath(
788 self._opener.join(self._filename)
792 self._opener.join(self._filename)
789 )
793 )
790
794
791 if self._use_dirstate_v2:
795 if self._use_dirstate_v2:
792 if self.docket.uuid:
796 if self.docket.uuid:
793 # TODO: use mmap when possible
797 # TODO: use mmap when possible
794 data = self._opener.read(self.docket.data_filename())
798 data = self._opener.read(self.docket.data_filename())
795 else:
799 else:
796 data = b''
800 data = b''
797 self._rustmap = rustmod.DirstateMap.new_v2(
801 self._rustmap = rustmod.DirstateMap.new_v2(
798 data, self.docket.data_size, self.docket.tree_metadata
802 data, self.docket.data_size, self.docket.tree_metadata
799 )
803 )
800 parents = self.docket.parents
804 parents = self.docket.parents
801 else:
805 else:
802 self._rustmap, parents = rustmod.DirstateMap.new_v1(
806 self._rustmap, parents = rustmod.DirstateMap.new_v1(
803 self._use_dirstate_tree, self._readdirstatefile()
807 self._use_dirstate_tree, self._readdirstatefile()
804 )
808 )
805
809
806 if parents and not self._dirtyparents:
810 if parents and not self._dirtyparents:
807 self.setparents(*parents)
811 self.setparents(*parents)
808
812
809 self.__contains__ = self._rustmap.__contains__
813 self.__contains__ = self._rustmap.__contains__
810 self.__getitem__ = self._rustmap.__getitem__
814 self.__getitem__ = self._rustmap.__getitem__
811 self.get = self._rustmap.get
815 self.get = self._rustmap.get
812 return self._rustmap
816 return self._rustmap
813
817
814 def write(self, tr, st, now):
818 def write(self, tr, st, now):
815 if not self._use_dirstate_v2:
819 if not self._use_dirstate_v2:
816 p1, p2 = self.parents()
820 p1, p2 = self.parents()
817 packed = self._rustmap.write_v1(p1, p2, now)
821 packed = self._rustmap.write_v1(p1, p2, now)
818 st.write(packed)
822 st.write(packed)
819 st.close()
823 st.close()
820 self._dirtyparents = False
824 self._dirtyparents = False
821 return
825 return
822
826
823 # We can only append to an existing data file if there is one
827 # We can only append to an existing data file if there is one
824 can_append = self.docket.uuid is not None
828 can_append = self.docket.uuid is not None
825 packed, meta, append = self._rustmap.write_v2(now, can_append)
829 packed, meta, append = self._rustmap.write_v2(now, can_append)
826 if append:
830 if append:
827 docket = self.docket
831 docket = self.docket
828 data_filename = docket.data_filename()
832 data_filename = docket.data_filename()
829 if tr:
833 if tr:
830 tr.add(data_filename, docket.data_size)
834 tr.add(data_filename, docket.data_size)
831 with self._opener(data_filename, b'r+b') as fp:
835 with self._opener(data_filename, b'r+b') as fp:
832 fp.seek(docket.data_size)
836 fp.seek(docket.data_size)
833 assert fp.tell() == docket.data_size
837 assert fp.tell() == docket.data_size
834 written = fp.write(packed)
838 written = fp.write(packed)
835 if written is not None: # py2 may return None
839 if written is not None: # py2 may return None
836 assert written == len(packed), (written, len(packed))
840 assert written == len(packed), (written, len(packed))
837 docket.data_size += len(packed)
841 docket.data_size += len(packed)
838 docket.parents = self.parents()
842 docket.parents = self.parents()
839 docket.tree_metadata = meta
843 docket.tree_metadata = meta
840 st.write(docket.serialize())
844 st.write(docket.serialize())
841 st.close()
845 st.close()
842 else:
846 else:
843 old_docket = self.docket
847 old_docket = self.docket
844 new_docket = docketmod.DirstateDocket.with_new_uuid(
848 new_docket = docketmod.DirstateDocket.with_new_uuid(
845 self.parents(), len(packed), meta
849 self.parents(), len(packed), meta
846 )
850 )
847 data_filename = new_docket.data_filename()
851 data_filename = new_docket.data_filename()
848 if tr:
852 if tr:
849 tr.add(data_filename, 0)
853 tr.add(data_filename, 0)
850 self._opener.write(data_filename, packed)
854 self._opener.write(data_filename, packed)
851 # Write the new docket after the new data file has been
855 # Write the new docket after the new data file has been
852 # written. Because `st` was opened with `atomictemp=True`,
856 # written. Because `st` was opened with `atomictemp=True`,
853 # the actual `.hg/dirstate` file is only affected on close.
857 # the actual `.hg/dirstate` file is only affected on close.
854 st.write(new_docket.serialize())
858 st.write(new_docket.serialize())
855 st.close()
859 st.close()
856 # Remove the old data file after the new docket pointing to
860 # Remove the old data file after the new docket pointing to
857 # the new data file was written.
861 # the new data file was written.
858 if old_docket.uuid:
862 if old_docket.uuid:
859 data_filename = old_docket.data_filename()
863 data_filename = old_docket.data_filename()
860 unlink = lambda _tr=None: self._opener.unlink(data_filename)
864 unlink = lambda _tr=None: self._opener.unlink(data_filename)
861 if tr:
865 if tr:
862 category = b"dirstate-v2-clean-" + old_docket.uuid
866 category = b"dirstate-v2-clean-" + old_docket.uuid
863 tr.addpostclose(category, unlink)
867 tr.addpostclose(category, unlink)
864 else:
868 else:
865 unlink()
869 unlink()
866 self._docket = new_docket
870 self._docket = new_docket
867 # Reload from the newly-written file
871 # Reload from the newly-written file
868 util.clearcachedproperty(self, b"_rustmap")
872 util.clearcachedproperty(self, b"_rustmap")
869 self._dirtyparents = False
873 self._dirtyparents = False
870
874
871 @propertycache
875 @propertycache
872 def filefoldmap(self):
876 def filefoldmap(self):
873 """Returns a dictionary mapping normalized case paths to their
877 """Returns a dictionary mapping normalized case paths to their
874 non-normalized versions.
878 non-normalized versions.
875 """
879 """
876 return self._rustmap.filefoldmapasdict()
880 return self._rustmap.filefoldmapasdict()
877
881
878 def hastrackeddir(self, d):
882 def hastrackeddir(self, d):
879 return self._rustmap.hastrackeddir(d)
883 return self._rustmap.hastrackeddir(d)
880
884
881 def hasdir(self, d):
885 def hasdir(self, d):
882 return self._rustmap.hasdir(d)
886 return self._rustmap.hasdir(d)
883
887
884 @propertycache
888 @propertycache
885 def identity(self):
889 def identity(self):
886 self._rustmap
890 self._rustmap
887 return self.identity
891 return self.identity
888
892
889 @property
893 @property
890 def nonnormalset(self):
894 def nonnormalset(self):
891 nonnorm = self._rustmap.non_normal_entries()
895 nonnorm = self._rustmap.non_normal_entries()
892 return nonnorm
896 return nonnorm
893
897
894 @propertycache
898 @propertycache
895 def otherparentset(self):
899 def otherparentset(self):
896 otherparents = self._rustmap.other_parent_entries()
900 otherparents = self._rustmap.other_parent_entries()
897 return otherparents
901 return otherparents
898
902
899 def non_normal_or_other_parent_paths(self):
903 def non_normal_or_other_parent_paths(self):
900 return self._rustmap.non_normal_or_other_parent_paths()
904 return self._rustmap.non_normal_or_other_parent_paths()
901
905
902 @propertycache
906 @propertycache
903 def dirfoldmap(self):
907 def dirfoldmap(self):
904 f = {}
908 f = {}
905 normcase = util.normcase
909 normcase = util.normcase
906 for name in self._rustmap.tracked_dirs():
910 for name in self._rustmap.tracked_dirs():
907 f[normcase(name)] = name
911 f[normcase(name)] = name
908 return f
912 return f
909
913
914 def set_possibly_dirty(self, filename):
915 """record that the current state of the file on disk is unknown"""
916 entry = self[filename]
917 entry.set_possibly_dirty()
918 self._rustmap.set_v1(filename, entry)
919
910 def __setitem__(self, key, value):
920 def __setitem__(self, key, value):
911 assert isinstance(value, DirstateItem)
921 assert isinstance(value, DirstateItem)
912 self._rustmap.set_v1(key, value)
922 self._rustmap.set_v1(key, value)
General Comments 0
You need to be logged in to leave comments. Login now