##// END OF EJS Templates
dirstate: move "get fs now" in the timestamp utility module...
marmoute -
r49202:08b060ab default
parent child Browse files
Show More
@@ -1,1534 +1,1524 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .dirstateutils import (
34 from .dirstateutils import (
35 timestamp,
35 timestamp,
36 )
36 )
37
37
38 from .interfaces import (
38 from .interfaces import (
39 dirstate as intdirstate,
39 dirstate as intdirstate,
40 util as interfaceutil,
40 util as interfaceutil,
41 )
41 )
42
42
43 parsers = policy.importmod('parsers')
43 parsers = policy.importmod('parsers')
44 rustmod = policy.importrust('dirstate')
44 rustmod = policy.importrust('dirstate')
45
45
46 HAS_FAST_DIRSTATE_V2 = rustmod is not None
46 HAS_FAST_DIRSTATE_V2 = rustmod is not None
47
47
48 propertycache = util.propertycache
48 propertycache = util.propertycache
49 filecache = scmutil.filecache
49 filecache = scmutil.filecache
50 _rangemask = dirstatemap.rangemask
50 _rangemask = dirstatemap.rangemask
51
51
52 DirstateItem = dirstatemap.DirstateItem
52 DirstateItem = dirstatemap.DirstateItem
53
53
54
54
55 class repocache(filecache):
55 class repocache(filecache):
56 """filecache for files in .hg/"""
56 """filecache for files in .hg/"""
57
57
58 def join(self, obj, fname):
58 def join(self, obj, fname):
59 return obj._opener.join(fname)
59 return obj._opener.join(fname)
60
60
61
61
62 class rootcache(filecache):
62 class rootcache(filecache):
63 """filecache for files in the repository root"""
63 """filecache for files in the repository root"""
64
64
65 def join(self, obj, fname):
65 def join(self, obj, fname):
66 return obj._join(fname)
66 return obj._join(fname)
67
67
68
68
69 def _getfsnow(vfs):
70 '''Get "now" timestamp on filesystem'''
71 tmpfd, tmpname = vfs.mkstemp()
72 try:
73 return timestamp.mtime_of(os.fstat(tmpfd))
74 finally:
75 os.close(tmpfd)
76 vfs.unlink(tmpname)
77
78
79 def requires_parents_change(func):
69 def requires_parents_change(func):
80 def wrap(self, *args, **kwargs):
70 def wrap(self, *args, **kwargs):
81 if not self.pendingparentchange():
71 if not self.pendingparentchange():
82 msg = 'calling `%s` outside of a parentchange context'
72 msg = 'calling `%s` outside of a parentchange context'
83 msg %= func.__name__
73 msg %= func.__name__
84 raise error.ProgrammingError(msg)
74 raise error.ProgrammingError(msg)
85 return func(self, *args, **kwargs)
75 return func(self, *args, **kwargs)
86
76
87 return wrap
77 return wrap
88
78
89
79
90 def requires_no_parents_change(func):
80 def requires_no_parents_change(func):
91 def wrap(self, *args, **kwargs):
81 def wrap(self, *args, **kwargs):
92 if self.pendingparentchange():
82 if self.pendingparentchange():
93 msg = 'calling `%s` inside of a parentchange context'
83 msg = 'calling `%s` inside of a parentchange context'
94 msg %= func.__name__
84 msg %= func.__name__
95 raise error.ProgrammingError(msg)
85 raise error.ProgrammingError(msg)
96 return func(self, *args, **kwargs)
86 return func(self, *args, **kwargs)
97
87
98 return wrap
88 return wrap
99
89
100
90
101 @interfaceutil.implementer(intdirstate.idirstate)
91 @interfaceutil.implementer(intdirstate.idirstate)
102 class dirstate(object):
92 class dirstate(object):
103 def __init__(
93 def __init__(
104 self,
94 self,
105 opener,
95 opener,
106 ui,
96 ui,
107 root,
97 root,
108 validate,
98 validate,
109 sparsematchfn,
99 sparsematchfn,
110 nodeconstants,
100 nodeconstants,
111 use_dirstate_v2,
101 use_dirstate_v2,
112 ):
102 ):
113 """Create a new dirstate object.
103 """Create a new dirstate object.
114
104
115 opener is an open()-like callable that can be used to open the
105 opener is an open()-like callable that can be used to open the
116 dirstate file; root is the root of the directory tracked by
106 dirstate file; root is the root of the directory tracked by
117 the dirstate.
107 the dirstate.
118 """
108 """
119 self._use_dirstate_v2 = use_dirstate_v2
109 self._use_dirstate_v2 = use_dirstate_v2
120 self._nodeconstants = nodeconstants
110 self._nodeconstants = nodeconstants
121 self._opener = opener
111 self._opener = opener
122 self._validate = validate
112 self._validate = validate
123 self._root = root
113 self._root = root
124 self._sparsematchfn = sparsematchfn
114 self._sparsematchfn = sparsematchfn
125 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
115 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
126 # UNC path pointing to root share (issue4557)
116 # UNC path pointing to root share (issue4557)
127 self._rootdir = pathutil.normasprefix(root)
117 self._rootdir = pathutil.normasprefix(root)
128 self._dirty = False
118 self._dirty = False
129 self._lastnormaltime = timestamp.zero()
119 self._lastnormaltime = timestamp.zero()
130 self._ui = ui
120 self._ui = ui
131 self._filecache = {}
121 self._filecache = {}
132 self._parentwriters = 0
122 self._parentwriters = 0
133 self._filename = b'dirstate'
123 self._filename = b'dirstate'
134 self._pendingfilename = b'%s.pending' % self._filename
124 self._pendingfilename = b'%s.pending' % self._filename
135 self._plchangecallbacks = {}
125 self._plchangecallbacks = {}
136 self._origpl = None
126 self._origpl = None
137 self._mapcls = dirstatemap.dirstatemap
127 self._mapcls = dirstatemap.dirstatemap
138 # Access and cache cwd early, so we don't access it for the first time
128 # Access and cache cwd early, so we don't access it for the first time
139 # after a working-copy update caused it to not exist (accessing it then
129 # after a working-copy update caused it to not exist (accessing it then
140 # raises an exception).
130 # raises an exception).
141 self._cwd
131 self._cwd
142
132
143 def prefetch_parents(self):
133 def prefetch_parents(self):
144 """make sure the parents are loaded
134 """make sure the parents are loaded
145
135
146 Used to avoid a race condition.
136 Used to avoid a race condition.
147 """
137 """
148 self._pl
138 self._pl
149
139
150 @contextlib.contextmanager
140 @contextlib.contextmanager
151 def parentchange(self):
141 def parentchange(self):
152 """Context manager for handling dirstate parents.
142 """Context manager for handling dirstate parents.
153
143
154 If an exception occurs in the scope of the context manager,
144 If an exception occurs in the scope of the context manager,
155 the incoherent dirstate won't be written when wlock is
145 the incoherent dirstate won't be written when wlock is
156 released.
146 released.
157 """
147 """
158 self._parentwriters += 1
148 self._parentwriters += 1
159 yield
149 yield
160 # Typically we want the "undo" step of a context manager in a
150 # Typically we want the "undo" step of a context manager in a
161 # finally block so it happens even when an exception
151 # finally block so it happens even when an exception
162 # occurs. In this case, however, we only want to decrement
152 # occurs. In this case, however, we only want to decrement
163 # parentwriters if the code in the with statement exits
153 # parentwriters if the code in the with statement exits
164 # normally, so we don't have a try/finally here on purpose.
154 # normally, so we don't have a try/finally here on purpose.
165 self._parentwriters -= 1
155 self._parentwriters -= 1
166
156
167 def pendingparentchange(self):
157 def pendingparentchange(self):
168 """Returns true if the dirstate is in the middle of a set of changes
158 """Returns true if the dirstate is in the middle of a set of changes
169 that modify the dirstate parent.
159 that modify the dirstate parent.
170 """
160 """
171 return self._parentwriters > 0
161 return self._parentwriters > 0
172
162
173 @propertycache
163 @propertycache
174 def _map(self):
164 def _map(self):
175 """Return the dirstate contents (see documentation for dirstatemap)."""
165 """Return the dirstate contents (see documentation for dirstatemap)."""
176 self._map = self._mapcls(
166 self._map = self._mapcls(
177 self._ui,
167 self._ui,
178 self._opener,
168 self._opener,
179 self._root,
169 self._root,
180 self._nodeconstants,
170 self._nodeconstants,
181 self._use_dirstate_v2,
171 self._use_dirstate_v2,
182 )
172 )
183 return self._map
173 return self._map
184
174
185 @property
175 @property
186 def _sparsematcher(self):
176 def _sparsematcher(self):
187 """The matcher for the sparse checkout.
177 """The matcher for the sparse checkout.
188
178
189 The working directory may not include every file from a manifest. The
179 The working directory may not include every file from a manifest. The
190 matcher obtained by this property will match a path if it is to be
180 matcher obtained by this property will match a path if it is to be
191 included in the working directory.
181 included in the working directory.
192 """
182 """
193 # TODO there is potential to cache this property. For now, the matcher
183 # TODO there is potential to cache this property. For now, the matcher
194 # is resolved on every access. (But the called function does use a
184 # is resolved on every access. (But the called function does use a
195 # cache to keep the lookup fast.)
185 # cache to keep the lookup fast.)
196 return self._sparsematchfn()
186 return self._sparsematchfn()
197
187
198 @repocache(b'branch')
188 @repocache(b'branch')
199 def _branch(self):
189 def _branch(self):
200 try:
190 try:
201 return self._opener.read(b"branch").strip() or b"default"
191 return self._opener.read(b"branch").strip() or b"default"
202 except IOError as inst:
192 except IOError as inst:
203 if inst.errno != errno.ENOENT:
193 if inst.errno != errno.ENOENT:
204 raise
194 raise
205 return b"default"
195 return b"default"
206
196
207 @property
197 @property
208 def _pl(self):
198 def _pl(self):
209 return self._map.parents()
199 return self._map.parents()
210
200
211 def hasdir(self, d):
201 def hasdir(self, d):
212 return self._map.hastrackeddir(d)
202 return self._map.hastrackeddir(d)
213
203
214 @rootcache(b'.hgignore')
204 @rootcache(b'.hgignore')
215 def _ignore(self):
205 def _ignore(self):
216 files = self._ignorefiles()
206 files = self._ignorefiles()
217 if not files:
207 if not files:
218 return matchmod.never()
208 return matchmod.never()
219
209
220 pats = [b'include:%s' % f for f in files]
210 pats = [b'include:%s' % f for f in files]
221 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
211 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
222
212
223 @propertycache
213 @propertycache
224 def _slash(self):
214 def _slash(self):
225 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
215 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
226
216
227 @propertycache
217 @propertycache
228 def _checklink(self):
218 def _checklink(self):
229 return util.checklink(self._root)
219 return util.checklink(self._root)
230
220
231 @propertycache
221 @propertycache
232 def _checkexec(self):
222 def _checkexec(self):
233 return bool(util.checkexec(self._root))
223 return bool(util.checkexec(self._root))
234
224
235 @propertycache
225 @propertycache
236 def _checkcase(self):
226 def _checkcase(self):
237 return not util.fscasesensitive(self._join(b'.hg'))
227 return not util.fscasesensitive(self._join(b'.hg'))
238
228
239 def _join(self, f):
229 def _join(self, f):
240 # much faster than os.path.join()
230 # much faster than os.path.join()
241 # it's safe because f is always a relative path
231 # it's safe because f is always a relative path
242 return self._rootdir + f
232 return self._rootdir + f
243
233
244 def flagfunc(self, buildfallback):
234 def flagfunc(self, buildfallback):
245 """build a callable that returns flags associated with a filename
235 """build a callable that returns flags associated with a filename
246
236
247 The information is extracted from three possible layers:
237 The information is extracted from three possible layers:
248 1. the file system if it supports the information
238 1. the file system if it supports the information
249 2. the "fallback" information stored in the dirstate if any
239 2. the "fallback" information stored in the dirstate if any
250 3. a more expensive mechanism inferring the flags from the parents.
240 3. a more expensive mechanism inferring the flags from the parents.
251 """
241 """
252
242
253 # small hack to cache the result of buildfallback()
243 # small hack to cache the result of buildfallback()
254 fallback_func = []
244 fallback_func = []
255
245
256 def get_flags(x):
246 def get_flags(x):
257 entry = None
247 entry = None
258 fallback_value = None
248 fallback_value = None
259 try:
249 try:
260 st = os.lstat(self._join(x))
250 st = os.lstat(self._join(x))
261 except OSError:
251 except OSError:
262 return b''
252 return b''
263
253
264 if self._checklink:
254 if self._checklink:
265 if util.statislink(st):
255 if util.statislink(st):
266 return b'l'
256 return b'l'
267 else:
257 else:
268 entry = self.get_entry(x)
258 entry = self.get_entry(x)
269 if entry.has_fallback_symlink:
259 if entry.has_fallback_symlink:
270 if entry.fallback_symlink:
260 if entry.fallback_symlink:
271 return b'l'
261 return b'l'
272 else:
262 else:
273 if not fallback_func:
263 if not fallback_func:
274 fallback_func.append(buildfallback())
264 fallback_func.append(buildfallback())
275 fallback_value = fallback_func[0](x)
265 fallback_value = fallback_func[0](x)
276 if b'l' in fallback_value:
266 if b'l' in fallback_value:
277 return b'l'
267 return b'l'
278
268
279 if self._checkexec:
269 if self._checkexec:
280 if util.statisexec(st):
270 if util.statisexec(st):
281 return b'x'
271 return b'x'
282 else:
272 else:
283 if entry is None:
273 if entry is None:
284 entry = self.get_entry(x)
274 entry = self.get_entry(x)
285 if entry.has_fallback_exec:
275 if entry.has_fallback_exec:
286 if entry.fallback_exec:
276 if entry.fallback_exec:
287 return b'x'
277 return b'x'
288 else:
278 else:
289 if fallback_value is None:
279 if fallback_value is None:
290 if not fallback_func:
280 if not fallback_func:
291 fallback_func.append(buildfallback())
281 fallback_func.append(buildfallback())
292 fallback_value = fallback_func[0](x)
282 fallback_value = fallback_func[0](x)
293 if b'x' in fallback_value:
283 if b'x' in fallback_value:
294 return b'x'
284 return b'x'
295 return b''
285 return b''
296
286
297 return get_flags
287 return get_flags
298
288
299 @propertycache
289 @propertycache
300 def _cwd(self):
290 def _cwd(self):
301 # internal config: ui.forcecwd
291 # internal config: ui.forcecwd
302 forcecwd = self._ui.config(b'ui', b'forcecwd')
292 forcecwd = self._ui.config(b'ui', b'forcecwd')
303 if forcecwd:
293 if forcecwd:
304 return forcecwd
294 return forcecwd
305 return encoding.getcwd()
295 return encoding.getcwd()
306
296
307 def getcwd(self):
297 def getcwd(self):
308 """Return the path from which a canonical path is calculated.
298 """Return the path from which a canonical path is calculated.
309
299
310 This path should be used to resolve file patterns or to convert
300 This path should be used to resolve file patterns or to convert
311 canonical paths back to file paths for display. It shouldn't be
301 canonical paths back to file paths for display. It shouldn't be
312 used to get real file paths. Use vfs functions instead.
302 used to get real file paths. Use vfs functions instead.
313 """
303 """
314 cwd = self._cwd
304 cwd = self._cwd
315 if cwd == self._root:
305 if cwd == self._root:
316 return b''
306 return b''
317 # self._root ends with a path separator if self._root is '/' or 'C:\'
307 # self._root ends with a path separator if self._root is '/' or 'C:\'
318 rootsep = self._root
308 rootsep = self._root
319 if not util.endswithsep(rootsep):
309 if not util.endswithsep(rootsep):
320 rootsep += pycompat.ossep
310 rootsep += pycompat.ossep
321 if cwd.startswith(rootsep):
311 if cwd.startswith(rootsep):
322 return cwd[len(rootsep) :]
312 return cwd[len(rootsep) :]
323 else:
313 else:
324 # we're outside the repo. return an absolute path.
314 # we're outside the repo. return an absolute path.
325 return cwd
315 return cwd
326
316
327 def pathto(self, f, cwd=None):
317 def pathto(self, f, cwd=None):
328 if cwd is None:
318 if cwd is None:
329 cwd = self.getcwd()
319 cwd = self.getcwd()
330 path = util.pathto(self._root, cwd, f)
320 path = util.pathto(self._root, cwd, f)
331 if self._slash:
321 if self._slash:
332 return util.pconvert(path)
322 return util.pconvert(path)
333 return path
323 return path
334
324
335 def __getitem__(self, key):
325 def __getitem__(self, key):
336 """Return the current state of key (a filename) in the dirstate.
326 """Return the current state of key (a filename) in the dirstate.
337
327
338 States are:
328 States are:
339 n normal
329 n normal
340 m needs merging
330 m needs merging
341 r marked for removal
331 r marked for removal
342 a marked for addition
332 a marked for addition
343 ? not tracked
333 ? not tracked
344
334
345 XXX The "state" is a bit obscure to be in the "public" API. we should
335 XXX The "state" is a bit obscure to be in the "public" API. we should
346 consider migrating all user of this to going through the dirstate entry
336 consider migrating all user of this to going through the dirstate entry
347 instead.
337 instead.
348 """
338 """
349 msg = b"don't use dirstate[file], use dirstate.get_entry(file)"
339 msg = b"don't use dirstate[file], use dirstate.get_entry(file)"
350 util.nouideprecwarn(msg, b'6.1', stacklevel=2)
340 util.nouideprecwarn(msg, b'6.1', stacklevel=2)
351 entry = self._map.get(key)
341 entry = self._map.get(key)
352 if entry is not None:
342 if entry is not None:
353 return entry.state
343 return entry.state
354 return b'?'
344 return b'?'
355
345
356 def get_entry(self, path):
346 def get_entry(self, path):
357 """return a DirstateItem for the associated path"""
347 """return a DirstateItem for the associated path"""
358 entry = self._map.get(path)
348 entry = self._map.get(path)
359 if entry is None:
349 if entry is None:
360 return DirstateItem()
350 return DirstateItem()
361 return entry
351 return entry
362
352
363 def __contains__(self, key):
353 def __contains__(self, key):
364 return key in self._map
354 return key in self._map
365
355
366 def __iter__(self):
356 def __iter__(self):
367 return iter(sorted(self._map))
357 return iter(sorted(self._map))
368
358
369 def items(self):
359 def items(self):
370 return pycompat.iteritems(self._map)
360 return pycompat.iteritems(self._map)
371
361
372 iteritems = items
362 iteritems = items
373
363
374 def parents(self):
364 def parents(self):
375 return [self._validate(p) for p in self._pl]
365 return [self._validate(p) for p in self._pl]
376
366
377 def p1(self):
367 def p1(self):
378 return self._validate(self._pl[0])
368 return self._validate(self._pl[0])
379
369
380 def p2(self):
370 def p2(self):
381 return self._validate(self._pl[1])
371 return self._validate(self._pl[1])
382
372
383 @property
373 @property
384 def in_merge(self):
374 def in_merge(self):
385 """True if a merge is in progress"""
375 """True if a merge is in progress"""
386 return self._pl[1] != self._nodeconstants.nullid
376 return self._pl[1] != self._nodeconstants.nullid
387
377
388 def branch(self):
378 def branch(self):
389 return encoding.tolocal(self._branch)
379 return encoding.tolocal(self._branch)
390
380
391 def setparents(self, p1, p2=None):
381 def setparents(self, p1, p2=None):
392 """Set dirstate parents to p1 and p2.
382 """Set dirstate parents to p1 and p2.
393
383
394 When moving from two parents to one, "merged" entries a
384 When moving from two parents to one, "merged" entries a
395 adjusted to normal and previous copy records discarded and
385 adjusted to normal and previous copy records discarded and
396 returned by the call.
386 returned by the call.
397
387
398 See localrepo.setparents()
388 See localrepo.setparents()
399 """
389 """
400 if p2 is None:
390 if p2 is None:
401 p2 = self._nodeconstants.nullid
391 p2 = self._nodeconstants.nullid
402 if self._parentwriters == 0:
392 if self._parentwriters == 0:
403 raise ValueError(
393 raise ValueError(
404 b"cannot set dirstate parent outside of "
394 b"cannot set dirstate parent outside of "
405 b"dirstate.parentchange context manager"
395 b"dirstate.parentchange context manager"
406 )
396 )
407
397
408 self._dirty = True
398 self._dirty = True
409 oldp2 = self._pl[1]
399 oldp2 = self._pl[1]
410 if self._origpl is None:
400 if self._origpl is None:
411 self._origpl = self._pl
401 self._origpl = self._pl
412 nullid = self._nodeconstants.nullid
402 nullid = self._nodeconstants.nullid
413 # True if we need to fold p2 related state back to a linear case
403 # True if we need to fold p2 related state back to a linear case
414 fold_p2 = oldp2 != nullid and p2 == nullid
404 fold_p2 = oldp2 != nullid and p2 == nullid
415 return self._map.setparents(p1, p2, fold_p2=fold_p2)
405 return self._map.setparents(p1, p2, fold_p2=fold_p2)
416
406
417 def setbranch(self, branch):
407 def setbranch(self, branch):
418 self.__class__._branch.set(self, encoding.fromlocal(branch))
408 self.__class__._branch.set(self, encoding.fromlocal(branch))
419 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
409 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
420 try:
410 try:
421 f.write(self._branch + b'\n')
411 f.write(self._branch + b'\n')
422 f.close()
412 f.close()
423
413
424 # make sure filecache has the correct stat info for _branch after
414 # make sure filecache has the correct stat info for _branch after
425 # replacing the underlying file
415 # replacing the underlying file
426 ce = self._filecache[b'_branch']
416 ce = self._filecache[b'_branch']
427 if ce:
417 if ce:
428 ce.refresh()
418 ce.refresh()
429 except: # re-raises
419 except: # re-raises
430 f.discard()
420 f.discard()
431 raise
421 raise
432
422
433 def invalidate(self):
423 def invalidate(self):
434 """Causes the next access to reread the dirstate.
424 """Causes the next access to reread the dirstate.
435
425
436 This is different from localrepo.invalidatedirstate() because it always
426 This is different from localrepo.invalidatedirstate() because it always
437 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
427 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
438 check whether the dirstate has changed before rereading it."""
428 check whether the dirstate has changed before rereading it."""
439
429
440 for a in ("_map", "_branch", "_ignore"):
430 for a in ("_map", "_branch", "_ignore"):
441 if a in self.__dict__:
431 if a in self.__dict__:
442 delattr(self, a)
432 delattr(self, a)
443 self._lastnormaltime = timestamp.zero()
433 self._lastnormaltime = timestamp.zero()
444 self._dirty = False
434 self._dirty = False
445 self._parentwriters = 0
435 self._parentwriters = 0
446 self._origpl = None
436 self._origpl = None
447
437
448 def copy(self, source, dest):
438 def copy(self, source, dest):
449 """Mark dest as a copy of source. Unmark dest if source is None."""
439 """Mark dest as a copy of source. Unmark dest if source is None."""
450 if source == dest:
440 if source == dest:
451 return
441 return
452 self._dirty = True
442 self._dirty = True
453 if source is not None:
443 if source is not None:
454 self._map.copymap[dest] = source
444 self._map.copymap[dest] = source
455 else:
445 else:
456 self._map.copymap.pop(dest, None)
446 self._map.copymap.pop(dest, None)
457
447
458 def copied(self, file):
448 def copied(self, file):
459 return self._map.copymap.get(file, None)
449 return self._map.copymap.get(file, None)
460
450
461 def copies(self):
451 def copies(self):
462 return self._map.copymap
452 return self._map.copymap
463
453
464 @requires_no_parents_change
454 @requires_no_parents_change
465 def set_tracked(self, filename):
455 def set_tracked(self, filename):
466 """a "public" method for generic code to mark a file as tracked
456 """a "public" method for generic code to mark a file as tracked
467
457
468 This function is to be called outside of "update/merge" case. For
458 This function is to be called outside of "update/merge" case. For
469 example by a command like `hg add X`.
459 example by a command like `hg add X`.
470
460
471 return True the file was previously untracked, False otherwise.
461 return True the file was previously untracked, False otherwise.
472 """
462 """
473 self._dirty = True
463 self._dirty = True
474 entry = self._map.get(filename)
464 entry = self._map.get(filename)
475 if entry is None or not entry.tracked:
465 if entry is None or not entry.tracked:
476 self._check_new_tracked_filename(filename)
466 self._check_new_tracked_filename(filename)
477 return self._map.set_tracked(filename)
467 return self._map.set_tracked(filename)
478
468
479 @requires_no_parents_change
469 @requires_no_parents_change
480 def set_untracked(self, filename):
470 def set_untracked(self, filename):
481 """a "public" method for generic code to mark a file as untracked
471 """a "public" method for generic code to mark a file as untracked
482
472
483 This function is to be called outside of "update/merge" case. For
473 This function is to be called outside of "update/merge" case. For
484 example by a command like `hg remove X`.
474 example by a command like `hg remove X`.
485
475
486 return True the file was previously tracked, False otherwise.
476 return True the file was previously tracked, False otherwise.
487 """
477 """
488 ret = self._map.set_untracked(filename)
478 ret = self._map.set_untracked(filename)
489 if ret:
479 if ret:
490 self._dirty = True
480 self._dirty = True
491 return ret
481 return ret
492
482
493 @requires_no_parents_change
483 @requires_no_parents_change
494 def set_clean(self, filename, parentfiledata=None):
484 def set_clean(self, filename, parentfiledata=None):
495 """record that the current state of the file on disk is known to be clean"""
485 """record that the current state of the file on disk is known to be clean"""
496 self._dirty = True
486 self._dirty = True
497 if parentfiledata:
487 if parentfiledata:
498 (mode, size, mtime) = parentfiledata
488 (mode, size, mtime) = parentfiledata
499 else:
489 else:
500 (mode, size, mtime) = self._get_filedata(filename)
490 (mode, size, mtime) = self._get_filedata(filename)
501 if not self._map[filename].tracked:
491 if not self._map[filename].tracked:
502 self._check_new_tracked_filename(filename)
492 self._check_new_tracked_filename(filename)
503 self._map.set_clean(filename, mode, size, mtime)
493 self._map.set_clean(filename, mode, size, mtime)
504 if mtime > self._lastnormaltime:
494 if mtime > self._lastnormaltime:
505 # Remember the most recent modification timeslot for status(),
495 # Remember the most recent modification timeslot for status(),
506 # to make sure we won't miss future size-preserving file content
496 # to make sure we won't miss future size-preserving file content
507 # modifications that happen within the same timeslot.
497 # modifications that happen within the same timeslot.
508 self._lastnormaltime = mtime
498 self._lastnormaltime = mtime
509
499
510 @requires_no_parents_change
500 @requires_no_parents_change
511 def set_possibly_dirty(self, filename):
501 def set_possibly_dirty(self, filename):
512 """record that the current state of the file on disk is unknown"""
502 """record that the current state of the file on disk is unknown"""
513 self._dirty = True
503 self._dirty = True
514 self._map.set_possibly_dirty(filename)
504 self._map.set_possibly_dirty(filename)
515
505
516 @requires_parents_change
506 @requires_parents_change
517 def update_file_p1(
507 def update_file_p1(
518 self,
508 self,
519 filename,
509 filename,
520 p1_tracked,
510 p1_tracked,
521 ):
511 ):
522 """Set a file as tracked in the parent (or not)
512 """Set a file as tracked in the parent (or not)
523
513
524 This is to be called when adjust the dirstate to a new parent after an history
514 This is to be called when adjust the dirstate to a new parent after an history
525 rewriting operation.
515 rewriting operation.
526
516
527 It should not be called during a merge (p2 != nullid) and only within
517 It should not be called during a merge (p2 != nullid) and only within
528 a `with dirstate.parentchange():` context.
518 a `with dirstate.parentchange():` context.
529 """
519 """
530 if self.in_merge:
520 if self.in_merge:
531 msg = b'update_file_reference should not be called when merging'
521 msg = b'update_file_reference should not be called when merging'
532 raise error.ProgrammingError(msg)
522 raise error.ProgrammingError(msg)
533 entry = self._map.get(filename)
523 entry = self._map.get(filename)
534 if entry is None:
524 if entry is None:
535 wc_tracked = False
525 wc_tracked = False
536 else:
526 else:
537 wc_tracked = entry.tracked
527 wc_tracked = entry.tracked
538 if not (p1_tracked or wc_tracked):
528 if not (p1_tracked or wc_tracked):
539 # the file is no longer relevant to anyone
529 # the file is no longer relevant to anyone
540 if self._map.get(filename) is not None:
530 if self._map.get(filename) is not None:
541 self._map.reset_state(filename)
531 self._map.reset_state(filename)
542 self._dirty = True
532 self._dirty = True
543 elif (not p1_tracked) and wc_tracked:
533 elif (not p1_tracked) and wc_tracked:
544 if entry is not None and entry.added:
534 if entry is not None and entry.added:
545 return # avoid dropping copy information (maybe?)
535 return # avoid dropping copy information (maybe?)
546
536
547 parentfiledata = None
537 parentfiledata = None
548 if wc_tracked and p1_tracked:
538 if wc_tracked and p1_tracked:
549 parentfiledata = self._get_filedata(filename)
539 parentfiledata = self._get_filedata(filename)
550
540
551 self._map.reset_state(
541 self._map.reset_state(
552 filename,
542 filename,
553 wc_tracked,
543 wc_tracked,
554 p1_tracked,
544 p1_tracked,
555 # the underlying reference might have changed, we will have to
545 # the underlying reference might have changed, we will have to
556 # check it.
546 # check it.
557 has_meaningful_mtime=False,
547 has_meaningful_mtime=False,
558 parentfiledata=parentfiledata,
548 parentfiledata=parentfiledata,
559 )
549 )
560 if (
550 if (
561 parentfiledata is not None
551 parentfiledata is not None
562 and parentfiledata[2] > self._lastnormaltime
552 and parentfiledata[2] > self._lastnormaltime
563 ):
553 ):
564 # Remember the most recent modification timeslot for status(),
554 # Remember the most recent modification timeslot for status(),
565 # to make sure we won't miss future size-preserving file content
555 # to make sure we won't miss future size-preserving file content
566 # modifications that happen within the same timeslot.
556 # modifications that happen within the same timeslot.
567 self._lastnormaltime = parentfiledata[2]
557 self._lastnormaltime = parentfiledata[2]
568
558
569 @requires_parents_change
559 @requires_parents_change
570 def update_file(
560 def update_file(
571 self,
561 self,
572 filename,
562 filename,
573 wc_tracked,
563 wc_tracked,
574 p1_tracked,
564 p1_tracked,
575 p2_info=False,
565 p2_info=False,
576 possibly_dirty=False,
566 possibly_dirty=False,
577 parentfiledata=None,
567 parentfiledata=None,
578 ):
568 ):
579 """update the information about a file in the dirstate
569 """update the information about a file in the dirstate
580
570
581 This is to be called when the direstates parent changes to keep track
571 This is to be called when the direstates parent changes to keep track
582 of what is the file situation in regards to the working copy and its parent.
572 of what is the file situation in regards to the working copy and its parent.
583
573
584 This function must be called within a `dirstate.parentchange` context.
574 This function must be called within a `dirstate.parentchange` context.
585
575
586 note: the API is at an early stage and we might need to adjust it
576 note: the API is at an early stage and we might need to adjust it
587 depending of what information ends up being relevant and useful to
577 depending of what information ends up being relevant and useful to
588 other processing.
578 other processing.
589 """
579 """
590
580
591 # note: I do not think we need to double check name clash here since we
581 # note: I do not think we need to double check name clash here since we
592 # are in a update/merge case that should already have taken care of
582 # are in a update/merge case that should already have taken care of
593 # this. The test agrees
583 # this. The test agrees
594
584
595 self._dirty = True
585 self._dirty = True
596
586
597 need_parent_file_data = (
587 need_parent_file_data = (
598 not possibly_dirty and not p2_info and wc_tracked and p1_tracked
588 not possibly_dirty and not p2_info and wc_tracked and p1_tracked
599 )
589 )
600
590
601 if need_parent_file_data and parentfiledata is None:
591 if need_parent_file_data and parentfiledata is None:
602 parentfiledata = self._get_filedata(filename)
592 parentfiledata = self._get_filedata(filename)
603
593
604 self._map.reset_state(
594 self._map.reset_state(
605 filename,
595 filename,
606 wc_tracked,
596 wc_tracked,
607 p1_tracked,
597 p1_tracked,
608 p2_info=p2_info,
598 p2_info=p2_info,
609 has_meaningful_mtime=not possibly_dirty,
599 has_meaningful_mtime=not possibly_dirty,
610 parentfiledata=parentfiledata,
600 parentfiledata=parentfiledata,
611 )
601 )
612 if (
602 if (
613 parentfiledata is not None
603 parentfiledata is not None
614 and parentfiledata[2] is not None
604 and parentfiledata[2] is not None
615 and parentfiledata[2] > self._lastnormaltime
605 and parentfiledata[2] > self._lastnormaltime
616 ):
606 ):
617 # Remember the most recent modification timeslot for status(),
607 # Remember the most recent modification timeslot for status(),
618 # to make sure we won't miss future size-preserving file content
608 # to make sure we won't miss future size-preserving file content
619 # modifications that happen within the same timeslot.
609 # modifications that happen within the same timeslot.
620 self._lastnormaltime = parentfiledata[2]
610 self._lastnormaltime = parentfiledata[2]
621
611
622 def _check_new_tracked_filename(self, filename):
612 def _check_new_tracked_filename(self, filename):
623 scmutil.checkfilename(filename)
613 scmutil.checkfilename(filename)
624 if self._map.hastrackeddir(filename):
614 if self._map.hastrackeddir(filename):
625 msg = _(b'directory %r already in dirstate')
615 msg = _(b'directory %r already in dirstate')
626 msg %= pycompat.bytestr(filename)
616 msg %= pycompat.bytestr(filename)
627 raise error.Abort(msg)
617 raise error.Abort(msg)
628 # shadows
618 # shadows
629 for d in pathutil.finddirs(filename):
619 for d in pathutil.finddirs(filename):
630 if self._map.hastrackeddir(d):
620 if self._map.hastrackeddir(d):
631 break
621 break
632 entry = self._map.get(d)
622 entry = self._map.get(d)
633 if entry is not None and not entry.removed:
623 if entry is not None and not entry.removed:
634 msg = _(b'file %r in dirstate clashes with %r')
624 msg = _(b'file %r in dirstate clashes with %r')
635 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
625 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
636 raise error.Abort(msg)
626 raise error.Abort(msg)
637
627
638 def _get_filedata(self, filename):
628 def _get_filedata(self, filename):
639 """returns"""
629 """returns"""
640 s = os.lstat(self._join(filename))
630 s = os.lstat(self._join(filename))
641 mode = s.st_mode
631 mode = s.st_mode
642 size = s.st_size
632 size = s.st_size
643 mtime = timestamp.mtime_of(s)
633 mtime = timestamp.mtime_of(s)
644 return (mode, size, mtime)
634 return (mode, size, mtime)
645
635
646 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
636 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
647 if exists is None:
637 if exists is None:
648 exists = os.path.lexists(os.path.join(self._root, path))
638 exists = os.path.lexists(os.path.join(self._root, path))
649 if not exists:
639 if not exists:
650 # Maybe a path component exists
640 # Maybe a path component exists
651 if not ignoremissing and b'/' in path:
641 if not ignoremissing and b'/' in path:
652 d, f = path.rsplit(b'/', 1)
642 d, f = path.rsplit(b'/', 1)
653 d = self._normalize(d, False, ignoremissing, None)
643 d = self._normalize(d, False, ignoremissing, None)
654 folded = d + b"/" + f
644 folded = d + b"/" + f
655 else:
645 else:
656 # No path components, preserve original case
646 # No path components, preserve original case
657 folded = path
647 folded = path
658 else:
648 else:
659 # recursively normalize leading directory components
649 # recursively normalize leading directory components
660 # against dirstate
650 # against dirstate
661 if b'/' in normed:
651 if b'/' in normed:
662 d, f = normed.rsplit(b'/', 1)
652 d, f = normed.rsplit(b'/', 1)
663 d = self._normalize(d, False, ignoremissing, True)
653 d = self._normalize(d, False, ignoremissing, True)
664 r = self._root + b"/" + d
654 r = self._root + b"/" + d
665 folded = d + b"/" + util.fspath(f, r)
655 folded = d + b"/" + util.fspath(f, r)
666 else:
656 else:
667 folded = util.fspath(normed, self._root)
657 folded = util.fspath(normed, self._root)
668 storemap[normed] = folded
658 storemap[normed] = folded
669
659
670 return folded
660 return folded
671
661
672 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
662 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
673 normed = util.normcase(path)
663 normed = util.normcase(path)
674 folded = self._map.filefoldmap.get(normed, None)
664 folded = self._map.filefoldmap.get(normed, None)
675 if folded is None:
665 if folded is None:
676 if isknown:
666 if isknown:
677 folded = path
667 folded = path
678 else:
668 else:
679 folded = self._discoverpath(
669 folded = self._discoverpath(
680 path, normed, ignoremissing, exists, self._map.filefoldmap
670 path, normed, ignoremissing, exists, self._map.filefoldmap
681 )
671 )
682 return folded
672 return folded
683
673
684 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
674 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
685 normed = util.normcase(path)
675 normed = util.normcase(path)
686 folded = self._map.filefoldmap.get(normed, None)
676 folded = self._map.filefoldmap.get(normed, None)
687 if folded is None:
677 if folded is None:
688 folded = self._map.dirfoldmap.get(normed, None)
678 folded = self._map.dirfoldmap.get(normed, None)
689 if folded is None:
679 if folded is None:
690 if isknown:
680 if isknown:
691 folded = path
681 folded = path
692 else:
682 else:
693 # store discovered result in dirfoldmap so that future
683 # store discovered result in dirfoldmap so that future
694 # normalizefile calls don't start matching directories
684 # normalizefile calls don't start matching directories
695 folded = self._discoverpath(
685 folded = self._discoverpath(
696 path, normed, ignoremissing, exists, self._map.dirfoldmap
686 path, normed, ignoremissing, exists, self._map.dirfoldmap
697 )
687 )
698 return folded
688 return folded
699
689
700 def normalize(self, path, isknown=False, ignoremissing=False):
690 def normalize(self, path, isknown=False, ignoremissing=False):
701 """
691 """
702 normalize the case of a pathname when on a casefolding filesystem
692 normalize the case of a pathname when on a casefolding filesystem
703
693
704 isknown specifies whether the filename came from walking the
694 isknown specifies whether the filename came from walking the
705 disk, to avoid extra filesystem access.
695 disk, to avoid extra filesystem access.
706
696
707 If ignoremissing is True, missing path are returned
697 If ignoremissing is True, missing path are returned
708 unchanged. Otherwise, we try harder to normalize possibly
698 unchanged. Otherwise, we try harder to normalize possibly
709 existing path components.
699 existing path components.
710
700
711 The normalized case is determined based on the following precedence:
701 The normalized case is determined based on the following precedence:
712
702
713 - version of name already stored in the dirstate
703 - version of name already stored in the dirstate
714 - version of name stored on disk
704 - version of name stored on disk
715 - version provided via command arguments
705 - version provided via command arguments
716 """
706 """
717
707
718 if self._checkcase:
708 if self._checkcase:
719 return self._normalize(path, isknown, ignoremissing)
709 return self._normalize(path, isknown, ignoremissing)
720 return path
710 return path
721
711
722 def clear(self):
712 def clear(self):
723 self._map.clear()
713 self._map.clear()
724 self._lastnormaltime = timestamp.zero()
714 self._lastnormaltime = timestamp.zero()
725 self._dirty = True
715 self._dirty = True
726
716
727 def rebuild(self, parent, allfiles, changedfiles=None):
717 def rebuild(self, parent, allfiles, changedfiles=None):
728 if changedfiles is None:
718 if changedfiles is None:
729 # Rebuild entire dirstate
719 # Rebuild entire dirstate
730 to_lookup = allfiles
720 to_lookup = allfiles
731 to_drop = []
721 to_drop = []
732 lastnormaltime = self._lastnormaltime
722 lastnormaltime = self._lastnormaltime
733 self.clear()
723 self.clear()
734 self._lastnormaltime = lastnormaltime
724 self._lastnormaltime = lastnormaltime
735 elif len(changedfiles) < 10:
725 elif len(changedfiles) < 10:
736 # Avoid turning allfiles into a set, which can be expensive if it's
726 # Avoid turning allfiles into a set, which can be expensive if it's
737 # large.
727 # large.
738 to_lookup = []
728 to_lookup = []
739 to_drop = []
729 to_drop = []
740 for f in changedfiles:
730 for f in changedfiles:
741 if f in allfiles:
731 if f in allfiles:
742 to_lookup.append(f)
732 to_lookup.append(f)
743 else:
733 else:
744 to_drop.append(f)
734 to_drop.append(f)
745 else:
735 else:
746 changedfilesset = set(changedfiles)
736 changedfilesset = set(changedfiles)
747 to_lookup = changedfilesset & set(allfiles)
737 to_lookup = changedfilesset & set(allfiles)
748 to_drop = changedfilesset - to_lookup
738 to_drop = changedfilesset - to_lookup
749
739
750 if self._origpl is None:
740 if self._origpl is None:
751 self._origpl = self._pl
741 self._origpl = self._pl
752 self._map.setparents(parent, self._nodeconstants.nullid)
742 self._map.setparents(parent, self._nodeconstants.nullid)
753
743
754 for f in to_lookup:
744 for f in to_lookup:
755
745
756 if self.in_merge:
746 if self.in_merge:
757 self.set_tracked(f)
747 self.set_tracked(f)
758 else:
748 else:
759 self._map.reset_state(
749 self._map.reset_state(
760 f,
750 f,
761 wc_tracked=True,
751 wc_tracked=True,
762 p1_tracked=True,
752 p1_tracked=True,
763 )
753 )
764 for f in to_drop:
754 for f in to_drop:
765 self._map.reset_state(f)
755 self._map.reset_state(f)
766
756
767 self._dirty = True
757 self._dirty = True
768
758
769 def identity(self):
759 def identity(self):
770 """Return identity of dirstate itself to detect changing in storage
760 """Return identity of dirstate itself to detect changing in storage
771
761
772 If identity of previous dirstate is equal to this, writing
762 If identity of previous dirstate is equal to this, writing
773 changes based on the former dirstate out can keep consistency.
763 changes based on the former dirstate out can keep consistency.
774 """
764 """
775 return self._map.identity
765 return self._map.identity
776
766
777 def write(self, tr):
767 def write(self, tr):
778 if not self._dirty:
768 if not self._dirty:
779 return
769 return
780
770
781 filename = self._filename
771 filename = self._filename
782 if tr:
772 if tr:
783 # 'dirstate.write()' is not only for writing in-memory
773 # 'dirstate.write()' is not only for writing in-memory
784 # changes out, but also for dropping ambiguous timestamp.
774 # changes out, but also for dropping ambiguous timestamp.
785 # delayed writing re-raise "ambiguous timestamp issue".
775 # delayed writing re-raise "ambiguous timestamp issue".
786 # See also the wiki page below for detail:
776 # See also the wiki page below for detail:
787 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
777 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
788
778
789 # record when mtime start to be ambiguous
779 # record when mtime start to be ambiguous
790 now = _getfsnow(self._opener)
780 now = timestamp.get_fs_now(self._opener)
791
781
792 # delay writing in-memory changes out
782 # delay writing in-memory changes out
793 tr.addfilegenerator(
783 tr.addfilegenerator(
794 b'dirstate',
784 b'dirstate',
795 (self._filename,),
785 (self._filename,),
796 lambda f: self._writedirstate(tr, f, now=now),
786 lambda f: self._writedirstate(tr, f, now=now),
797 location=b'plain',
787 location=b'plain',
798 )
788 )
799 return
789 return
800
790
801 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
791 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
802 self._writedirstate(tr, st)
792 self._writedirstate(tr, st)
803
793
804 def addparentchangecallback(self, category, callback):
794 def addparentchangecallback(self, category, callback):
805 """add a callback to be called when the wd parents are changed
795 """add a callback to be called when the wd parents are changed
806
796
807 Callback will be called with the following arguments:
797 Callback will be called with the following arguments:
808 dirstate, (oldp1, oldp2), (newp1, newp2)
798 dirstate, (oldp1, oldp2), (newp1, newp2)
809
799
810 Category is a unique identifier to allow overwriting an old callback
800 Category is a unique identifier to allow overwriting an old callback
811 with a newer callback.
801 with a newer callback.
812 """
802 """
813 self._plchangecallbacks[category] = callback
803 self._plchangecallbacks[category] = callback
814
804
815 def _writedirstate(self, tr, st, now=None):
805 def _writedirstate(self, tr, st, now=None):
816 # notify callbacks about parents change
806 # notify callbacks about parents change
817 if self._origpl is not None and self._origpl != self._pl:
807 if self._origpl is not None and self._origpl != self._pl:
818 for c, callback in sorted(
808 for c, callback in sorted(
819 pycompat.iteritems(self._plchangecallbacks)
809 pycompat.iteritems(self._plchangecallbacks)
820 ):
810 ):
821 callback(self, self._origpl, self._pl)
811 callback(self, self._origpl, self._pl)
822 self._origpl = None
812 self._origpl = None
823
813
824 if now is None:
814 if now is None:
825 # use the modification time of the newly created temporary file as the
815 # use the modification time of the newly created temporary file as the
826 # filesystem's notion of 'now'
816 # filesystem's notion of 'now'
827 now = timestamp.mtime_of(util.fstat(st))
817 now = timestamp.mtime_of(util.fstat(st))
828
818
829 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
819 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
830 # timestamp of each entries in dirstate, because of 'now > mtime'
820 # timestamp of each entries in dirstate, because of 'now > mtime'
831 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
821 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
832 if delaywrite > 0:
822 if delaywrite > 0:
833 # do we have any files to delay for?
823 # do we have any files to delay for?
834 for f, e in pycompat.iteritems(self._map):
824 for f, e in pycompat.iteritems(self._map):
835 if e.need_delay(now):
825 if e.need_delay(now):
836 import time # to avoid useless import
826 import time # to avoid useless import
837
827
838 # rather than sleep n seconds, sleep until the next
828 # rather than sleep n seconds, sleep until the next
839 # multiple of n seconds
829 # multiple of n seconds
840 clock = time.time()
830 clock = time.time()
841 start = int(clock) - (int(clock) % delaywrite)
831 start = int(clock) - (int(clock) % delaywrite)
842 end = start + delaywrite
832 end = start + delaywrite
843 time.sleep(end - clock)
833 time.sleep(end - clock)
844 # trust our estimate that the end is near now
834 # trust our estimate that the end is near now
845 now = timestamp.timestamp((end, 0))
835 now = timestamp.timestamp((end, 0))
846 break
836 break
847
837
848 self._map.write(tr, st, now)
838 self._map.write(tr, st, now)
849 self._lastnormaltime = timestamp.zero()
839 self._lastnormaltime = timestamp.zero()
850 self._dirty = False
840 self._dirty = False
851
841
852 def _dirignore(self, f):
842 def _dirignore(self, f):
853 if self._ignore(f):
843 if self._ignore(f):
854 return True
844 return True
855 for p in pathutil.finddirs(f):
845 for p in pathutil.finddirs(f):
856 if self._ignore(p):
846 if self._ignore(p):
857 return True
847 return True
858 return False
848 return False
859
849
860 def _ignorefiles(self):
850 def _ignorefiles(self):
861 files = []
851 files = []
862 if os.path.exists(self._join(b'.hgignore')):
852 if os.path.exists(self._join(b'.hgignore')):
863 files.append(self._join(b'.hgignore'))
853 files.append(self._join(b'.hgignore'))
864 for name, path in self._ui.configitems(b"ui"):
854 for name, path in self._ui.configitems(b"ui"):
865 if name == b'ignore' or name.startswith(b'ignore.'):
855 if name == b'ignore' or name.startswith(b'ignore.'):
866 # we need to use os.path.join here rather than self._join
856 # we need to use os.path.join here rather than self._join
867 # because path is arbitrary and user-specified
857 # because path is arbitrary and user-specified
868 files.append(os.path.join(self._rootdir, util.expandpath(path)))
858 files.append(os.path.join(self._rootdir, util.expandpath(path)))
869 return files
859 return files
870
860
871 def _ignorefileandline(self, f):
861 def _ignorefileandline(self, f):
872 files = collections.deque(self._ignorefiles())
862 files = collections.deque(self._ignorefiles())
873 visited = set()
863 visited = set()
874 while files:
864 while files:
875 i = files.popleft()
865 i = files.popleft()
876 patterns = matchmod.readpatternfile(
866 patterns = matchmod.readpatternfile(
877 i, self._ui.warn, sourceinfo=True
867 i, self._ui.warn, sourceinfo=True
878 )
868 )
879 for pattern, lineno, line in patterns:
869 for pattern, lineno, line in patterns:
880 kind, p = matchmod._patsplit(pattern, b'glob')
870 kind, p = matchmod._patsplit(pattern, b'glob')
881 if kind == b"subinclude":
871 if kind == b"subinclude":
882 if p not in visited:
872 if p not in visited:
883 files.append(p)
873 files.append(p)
884 continue
874 continue
885 m = matchmod.match(
875 m = matchmod.match(
886 self._root, b'', [], [pattern], warn=self._ui.warn
876 self._root, b'', [], [pattern], warn=self._ui.warn
887 )
877 )
888 if m(f):
878 if m(f):
889 return (i, lineno, line)
879 return (i, lineno, line)
890 visited.add(i)
880 visited.add(i)
891 return (None, -1, b"")
881 return (None, -1, b"")
892
882
893 def _walkexplicit(self, match, subrepos):
883 def _walkexplicit(self, match, subrepos):
894 """Get stat data about the files explicitly specified by match.
884 """Get stat data about the files explicitly specified by match.
895
885
896 Return a triple (results, dirsfound, dirsnotfound).
886 Return a triple (results, dirsfound, dirsnotfound).
897 - results is a mapping from filename to stat result. It also contains
887 - results is a mapping from filename to stat result. It also contains
898 listings mapping subrepos and .hg to None.
888 listings mapping subrepos and .hg to None.
899 - dirsfound is a list of files found to be directories.
889 - dirsfound is a list of files found to be directories.
900 - dirsnotfound is a list of files that the dirstate thinks are
890 - dirsnotfound is a list of files that the dirstate thinks are
901 directories and that were not found."""
891 directories and that were not found."""
902
892
903 def badtype(mode):
893 def badtype(mode):
904 kind = _(b'unknown')
894 kind = _(b'unknown')
905 if stat.S_ISCHR(mode):
895 if stat.S_ISCHR(mode):
906 kind = _(b'character device')
896 kind = _(b'character device')
907 elif stat.S_ISBLK(mode):
897 elif stat.S_ISBLK(mode):
908 kind = _(b'block device')
898 kind = _(b'block device')
909 elif stat.S_ISFIFO(mode):
899 elif stat.S_ISFIFO(mode):
910 kind = _(b'fifo')
900 kind = _(b'fifo')
911 elif stat.S_ISSOCK(mode):
901 elif stat.S_ISSOCK(mode):
912 kind = _(b'socket')
902 kind = _(b'socket')
913 elif stat.S_ISDIR(mode):
903 elif stat.S_ISDIR(mode):
914 kind = _(b'directory')
904 kind = _(b'directory')
915 return _(b'unsupported file type (type is %s)') % kind
905 return _(b'unsupported file type (type is %s)') % kind
916
906
917 badfn = match.bad
907 badfn = match.bad
918 dmap = self._map
908 dmap = self._map
919 lstat = os.lstat
909 lstat = os.lstat
920 getkind = stat.S_IFMT
910 getkind = stat.S_IFMT
921 dirkind = stat.S_IFDIR
911 dirkind = stat.S_IFDIR
922 regkind = stat.S_IFREG
912 regkind = stat.S_IFREG
923 lnkkind = stat.S_IFLNK
913 lnkkind = stat.S_IFLNK
924 join = self._join
914 join = self._join
925 dirsfound = []
915 dirsfound = []
926 foundadd = dirsfound.append
916 foundadd = dirsfound.append
927 dirsnotfound = []
917 dirsnotfound = []
928 notfoundadd = dirsnotfound.append
918 notfoundadd = dirsnotfound.append
929
919
930 if not match.isexact() and self._checkcase:
920 if not match.isexact() and self._checkcase:
931 normalize = self._normalize
921 normalize = self._normalize
932 else:
922 else:
933 normalize = None
923 normalize = None
934
924
935 files = sorted(match.files())
925 files = sorted(match.files())
936 subrepos.sort()
926 subrepos.sort()
937 i, j = 0, 0
927 i, j = 0, 0
938 while i < len(files) and j < len(subrepos):
928 while i < len(files) and j < len(subrepos):
939 subpath = subrepos[j] + b"/"
929 subpath = subrepos[j] + b"/"
940 if files[i] < subpath:
930 if files[i] < subpath:
941 i += 1
931 i += 1
942 continue
932 continue
943 while i < len(files) and files[i].startswith(subpath):
933 while i < len(files) and files[i].startswith(subpath):
944 del files[i]
934 del files[i]
945 j += 1
935 j += 1
946
936
947 if not files or b'' in files:
937 if not files or b'' in files:
948 files = [b'']
938 files = [b'']
949 # constructing the foldmap is expensive, so don't do it for the
939 # constructing the foldmap is expensive, so don't do it for the
950 # common case where files is ['']
940 # common case where files is ['']
951 normalize = None
941 normalize = None
952 results = dict.fromkeys(subrepos)
942 results = dict.fromkeys(subrepos)
953 results[b'.hg'] = None
943 results[b'.hg'] = None
954
944
955 for ff in files:
945 for ff in files:
956 if normalize:
946 if normalize:
957 nf = normalize(ff, False, True)
947 nf = normalize(ff, False, True)
958 else:
948 else:
959 nf = ff
949 nf = ff
960 if nf in results:
950 if nf in results:
961 continue
951 continue
962
952
963 try:
953 try:
964 st = lstat(join(nf))
954 st = lstat(join(nf))
965 kind = getkind(st.st_mode)
955 kind = getkind(st.st_mode)
966 if kind == dirkind:
956 if kind == dirkind:
967 if nf in dmap:
957 if nf in dmap:
968 # file replaced by dir on disk but still in dirstate
958 # file replaced by dir on disk but still in dirstate
969 results[nf] = None
959 results[nf] = None
970 foundadd((nf, ff))
960 foundadd((nf, ff))
971 elif kind == regkind or kind == lnkkind:
961 elif kind == regkind or kind == lnkkind:
972 results[nf] = st
962 results[nf] = st
973 else:
963 else:
974 badfn(ff, badtype(kind))
964 badfn(ff, badtype(kind))
975 if nf in dmap:
965 if nf in dmap:
976 results[nf] = None
966 results[nf] = None
977 except OSError as inst: # nf not found on disk - it is dirstate only
967 except OSError as inst: # nf not found on disk - it is dirstate only
978 if nf in dmap: # does it exactly match a missing file?
968 if nf in dmap: # does it exactly match a missing file?
979 results[nf] = None
969 results[nf] = None
980 else: # does it match a missing directory?
970 else: # does it match a missing directory?
981 if self._map.hasdir(nf):
971 if self._map.hasdir(nf):
982 notfoundadd(nf)
972 notfoundadd(nf)
983 else:
973 else:
984 badfn(ff, encoding.strtolocal(inst.strerror))
974 badfn(ff, encoding.strtolocal(inst.strerror))
985
975
986 # match.files() may contain explicitly-specified paths that shouldn't
976 # match.files() may contain explicitly-specified paths that shouldn't
987 # be taken; drop them from the list of files found. dirsfound/notfound
977 # be taken; drop them from the list of files found. dirsfound/notfound
988 # aren't filtered here because they will be tested later.
978 # aren't filtered here because they will be tested later.
989 if match.anypats():
979 if match.anypats():
990 for f in list(results):
980 for f in list(results):
991 if f == b'.hg' or f in subrepos:
981 if f == b'.hg' or f in subrepos:
992 # keep sentinel to disable further out-of-repo walks
982 # keep sentinel to disable further out-of-repo walks
993 continue
983 continue
994 if not match(f):
984 if not match(f):
995 del results[f]
985 del results[f]
996
986
997 # Case insensitive filesystems cannot rely on lstat() failing to detect
987 # Case insensitive filesystems cannot rely on lstat() failing to detect
998 # a case-only rename. Prune the stat object for any file that does not
988 # a case-only rename. Prune the stat object for any file that does not
999 # match the case in the filesystem, if there are multiple files that
989 # match the case in the filesystem, if there are multiple files that
1000 # normalize to the same path.
990 # normalize to the same path.
1001 if match.isexact() and self._checkcase:
991 if match.isexact() and self._checkcase:
1002 normed = {}
992 normed = {}
1003
993
1004 for f, st in pycompat.iteritems(results):
994 for f, st in pycompat.iteritems(results):
1005 if st is None:
995 if st is None:
1006 continue
996 continue
1007
997
1008 nc = util.normcase(f)
998 nc = util.normcase(f)
1009 paths = normed.get(nc)
999 paths = normed.get(nc)
1010
1000
1011 if paths is None:
1001 if paths is None:
1012 paths = set()
1002 paths = set()
1013 normed[nc] = paths
1003 normed[nc] = paths
1014
1004
1015 paths.add(f)
1005 paths.add(f)
1016
1006
1017 for norm, paths in pycompat.iteritems(normed):
1007 for norm, paths in pycompat.iteritems(normed):
1018 if len(paths) > 1:
1008 if len(paths) > 1:
1019 for path in paths:
1009 for path in paths:
1020 folded = self._discoverpath(
1010 folded = self._discoverpath(
1021 path, norm, True, None, self._map.dirfoldmap
1011 path, norm, True, None, self._map.dirfoldmap
1022 )
1012 )
1023 if path != folded:
1013 if path != folded:
1024 results[path] = None
1014 results[path] = None
1025
1015
1026 return results, dirsfound, dirsnotfound
1016 return results, dirsfound, dirsnotfound
1027
1017
1028 def walk(self, match, subrepos, unknown, ignored, full=True):
1018 def walk(self, match, subrepos, unknown, ignored, full=True):
1029 """
1019 """
1030 Walk recursively through the directory tree, finding all files
1020 Walk recursively through the directory tree, finding all files
1031 matched by match.
1021 matched by match.
1032
1022
1033 If full is False, maybe skip some known-clean files.
1023 If full is False, maybe skip some known-clean files.
1034
1024
1035 Return a dict mapping filename to stat-like object (either
1025 Return a dict mapping filename to stat-like object (either
1036 mercurial.osutil.stat instance or return value of os.stat()).
1026 mercurial.osutil.stat instance or return value of os.stat()).
1037
1027
1038 """
1028 """
1039 # full is a flag that extensions that hook into walk can use -- this
1029 # full is a flag that extensions that hook into walk can use -- this
1040 # implementation doesn't use it at all. This satisfies the contract
1030 # implementation doesn't use it at all. This satisfies the contract
1041 # because we only guarantee a "maybe".
1031 # because we only guarantee a "maybe".
1042
1032
1043 if ignored:
1033 if ignored:
1044 ignore = util.never
1034 ignore = util.never
1045 dirignore = util.never
1035 dirignore = util.never
1046 elif unknown:
1036 elif unknown:
1047 ignore = self._ignore
1037 ignore = self._ignore
1048 dirignore = self._dirignore
1038 dirignore = self._dirignore
1049 else:
1039 else:
1050 # if not unknown and not ignored, drop dir recursion and step 2
1040 # if not unknown and not ignored, drop dir recursion and step 2
1051 ignore = util.always
1041 ignore = util.always
1052 dirignore = util.always
1042 dirignore = util.always
1053
1043
1054 matchfn = match.matchfn
1044 matchfn = match.matchfn
1055 matchalways = match.always()
1045 matchalways = match.always()
1056 matchtdir = match.traversedir
1046 matchtdir = match.traversedir
1057 dmap = self._map
1047 dmap = self._map
1058 listdir = util.listdir
1048 listdir = util.listdir
1059 lstat = os.lstat
1049 lstat = os.lstat
1060 dirkind = stat.S_IFDIR
1050 dirkind = stat.S_IFDIR
1061 regkind = stat.S_IFREG
1051 regkind = stat.S_IFREG
1062 lnkkind = stat.S_IFLNK
1052 lnkkind = stat.S_IFLNK
1063 join = self._join
1053 join = self._join
1064
1054
1065 exact = skipstep3 = False
1055 exact = skipstep3 = False
1066 if match.isexact(): # match.exact
1056 if match.isexact(): # match.exact
1067 exact = True
1057 exact = True
1068 dirignore = util.always # skip step 2
1058 dirignore = util.always # skip step 2
1069 elif match.prefix(): # match.match, no patterns
1059 elif match.prefix(): # match.match, no patterns
1070 skipstep3 = True
1060 skipstep3 = True
1071
1061
1072 if not exact and self._checkcase:
1062 if not exact and self._checkcase:
1073 normalize = self._normalize
1063 normalize = self._normalize
1074 normalizefile = self._normalizefile
1064 normalizefile = self._normalizefile
1075 skipstep3 = False
1065 skipstep3 = False
1076 else:
1066 else:
1077 normalize = self._normalize
1067 normalize = self._normalize
1078 normalizefile = None
1068 normalizefile = None
1079
1069
1080 # step 1: find all explicit files
1070 # step 1: find all explicit files
1081 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1071 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1082 if matchtdir:
1072 if matchtdir:
1083 for d in work:
1073 for d in work:
1084 matchtdir(d[0])
1074 matchtdir(d[0])
1085 for d in dirsnotfound:
1075 for d in dirsnotfound:
1086 matchtdir(d)
1076 matchtdir(d)
1087
1077
1088 skipstep3 = skipstep3 and not (work or dirsnotfound)
1078 skipstep3 = skipstep3 and not (work or dirsnotfound)
1089 work = [d for d in work if not dirignore(d[0])]
1079 work = [d for d in work if not dirignore(d[0])]
1090
1080
1091 # step 2: visit subdirectories
1081 # step 2: visit subdirectories
1092 def traverse(work, alreadynormed):
1082 def traverse(work, alreadynormed):
1093 wadd = work.append
1083 wadd = work.append
1094 while work:
1084 while work:
1095 tracing.counter('dirstate.walk work', len(work))
1085 tracing.counter('dirstate.walk work', len(work))
1096 nd = work.pop()
1086 nd = work.pop()
1097 visitentries = match.visitchildrenset(nd)
1087 visitentries = match.visitchildrenset(nd)
1098 if not visitentries:
1088 if not visitentries:
1099 continue
1089 continue
1100 if visitentries == b'this' or visitentries == b'all':
1090 if visitentries == b'this' or visitentries == b'all':
1101 visitentries = None
1091 visitentries = None
1102 skip = None
1092 skip = None
1103 if nd != b'':
1093 if nd != b'':
1104 skip = b'.hg'
1094 skip = b'.hg'
1105 try:
1095 try:
1106 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1096 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1107 entries = listdir(join(nd), stat=True, skip=skip)
1097 entries = listdir(join(nd), stat=True, skip=skip)
1108 except OSError as inst:
1098 except OSError as inst:
1109 if inst.errno in (errno.EACCES, errno.ENOENT):
1099 if inst.errno in (errno.EACCES, errno.ENOENT):
1110 match.bad(
1100 match.bad(
1111 self.pathto(nd), encoding.strtolocal(inst.strerror)
1101 self.pathto(nd), encoding.strtolocal(inst.strerror)
1112 )
1102 )
1113 continue
1103 continue
1114 raise
1104 raise
1115 for f, kind, st in entries:
1105 for f, kind, st in entries:
1116 # Some matchers may return files in the visitentries set,
1106 # Some matchers may return files in the visitentries set,
1117 # instead of 'this', if the matcher explicitly mentions them
1107 # instead of 'this', if the matcher explicitly mentions them
1118 # and is not an exactmatcher. This is acceptable; we do not
1108 # and is not an exactmatcher. This is acceptable; we do not
1119 # make any hard assumptions about file-or-directory below
1109 # make any hard assumptions about file-or-directory below
1120 # based on the presence of `f` in visitentries. If
1110 # based on the presence of `f` in visitentries. If
1121 # visitchildrenset returned a set, we can always skip the
1111 # visitchildrenset returned a set, we can always skip the
1122 # entries *not* in the set it provided regardless of whether
1112 # entries *not* in the set it provided regardless of whether
1123 # they're actually a file or a directory.
1113 # they're actually a file or a directory.
1124 if visitentries and f not in visitentries:
1114 if visitentries and f not in visitentries:
1125 continue
1115 continue
1126 if normalizefile:
1116 if normalizefile:
1127 # even though f might be a directory, we're only
1117 # even though f might be a directory, we're only
1128 # interested in comparing it to files currently in the
1118 # interested in comparing it to files currently in the
1129 # dmap -- therefore normalizefile is enough
1119 # dmap -- therefore normalizefile is enough
1130 nf = normalizefile(
1120 nf = normalizefile(
1131 nd and (nd + b"/" + f) or f, True, True
1121 nd and (nd + b"/" + f) or f, True, True
1132 )
1122 )
1133 else:
1123 else:
1134 nf = nd and (nd + b"/" + f) or f
1124 nf = nd and (nd + b"/" + f) or f
1135 if nf not in results:
1125 if nf not in results:
1136 if kind == dirkind:
1126 if kind == dirkind:
1137 if not ignore(nf):
1127 if not ignore(nf):
1138 if matchtdir:
1128 if matchtdir:
1139 matchtdir(nf)
1129 matchtdir(nf)
1140 wadd(nf)
1130 wadd(nf)
1141 if nf in dmap and (matchalways or matchfn(nf)):
1131 if nf in dmap and (matchalways or matchfn(nf)):
1142 results[nf] = None
1132 results[nf] = None
1143 elif kind == regkind or kind == lnkkind:
1133 elif kind == regkind or kind == lnkkind:
1144 if nf in dmap:
1134 if nf in dmap:
1145 if matchalways or matchfn(nf):
1135 if matchalways or matchfn(nf):
1146 results[nf] = st
1136 results[nf] = st
1147 elif (matchalways or matchfn(nf)) and not ignore(
1137 elif (matchalways or matchfn(nf)) and not ignore(
1148 nf
1138 nf
1149 ):
1139 ):
1150 # unknown file -- normalize if necessary
1140 # unknown file -- normalize if necessary
1151 if not alreadynormed:
1141 if not alreadynormed:
1152 nf = normalize(nf, False, True)
1142 nf = normalize(nf, False, True)
1153 results[nf] = st
1143 results[nf] = st
1154 elif nf in dmap and (matchalways or matchfn(nf)):
1144 elif nf in dmap and (matchalways or matchfn(nf)):
1155 results[nf] = None
1145 results[nf] = None
1156
1146
1157 for nd, d in work:
1147 for nd, d in work:
1158 # alreadynormed means that processwork doesn't have to do any
1148 # alreadynormed means that processwork doesn't have to do any
1159 # expensive directory normalization
1149 # expensive directory normalization
1160 alreadynormed = not normalize or nd == d
1150 alreadynormed = not normalize or nd == d
1161 traverse([d], alreadynormed)
1151 traverse([d], alreadynormed)
1162
1152
1163 for s in subrepos:
1153 for s in subrepos:
1164 del results[s]
1154 del results[s]
1165 del results[b'.hg']
1155 del results[b'.hg']
1166
1156
1167 # step 3: visit remaining files from dmap
1157 # step 3: visit remaining files from dmap
1168 if not skipstep3 and not exact:
1158 if not skipstep3 and not exact:
1169 # If a dmap file is not in results yet, it was either
1159 # If a dmap file is not in results yet, it was either
1170 # a) not matching matchfn b) ignored, c) missing, or d) under a
1160 # a) not matching matchfn b) ignored, c) missing, or d) under a
1171 # symlink directory.
1161 # symlink directory.
1172 if not results and matchalways:
1162 if not results and matchalways:
1173 visit = [f for f in dmap]
1163 visit = [f for f in dmap]
1174 else:
1164 else:
1175 visit = [f for f in dmap if f not in results and matchfn(f)]
1165 visit = [f for f in dmap if f not in results and matchfn(f)]
1176 visit.sort()
1166 visit.sort()
1177
1167
1178 if unknown:
1168 if unknown:
1179 # unknown == True means we walked all dirs under the roots
1169 # unknown == True means we walked all dirs under the roots
1180 # that wasn't ignored, and everything that matched was stat'ed
1170 # that wasn't ignored, and everything that matched was stat'ed
1181 # and is already in results.
1171 # and is already in results.
1182 # The rest must thus be ignored or under a symlink.
1172 # The rest must thus be ignored or under a symlink.
1183 audit_path = pathutil.pathauditor(self._root, cached=True)
1173 audit_path = pathutil.pathauditor(self._root, cached=True)
1184
1174
1185 for nf in iter(visit):
1175 for nf in iter(visit):
1186 # If a stat for the same file was already added with a
1176 # If a stat for the same file was already added with a
1187 # different case, don't add one for this, since that would
1177 # different case, don't add one for this, since that would
1188 # make it appear as if the file exists under both names
1178 # make it appear as if the file exists under both names
1189 # on disk.
1179 # on disk.
1190 if (
1180 if (
1191 normalizefile
1181 normalizefile
1192 and normalizefile(nf, True, True) in results
1182 and normalizefile(nf, True, True) in results
1193 ):
1183 ):
1194 results[nf] = None
1184 results[nf] = None
1195 # Report ignored items in the dmap as long as they are not
1185 # Report ignored items in the dmap as long as they are not
1196 # under a symlink directory.
1186 # under a symlink directory.
1197 elif audit_path.check(nf):
1187 elif audit_path.check(nf):
1198 try:
1188 try:
1199 results[nf] = lstat(join(nf))
1189 results[nf] = lstat(join(nf))
1200 # file was just ignored, no links, and exists
1190 # file was just ignored, no links, and exists
1201 except OSError:
1191 except OSError:
1202 # file doesn't exist
1192 # file doesn't exist
1203 results[nf] = None
1193 results[nf] = None
1204 else:
1194 else:
1205 # It's either missing or under a symlink directory
1195 # It's either missing or under a symlink directory
1206 # which we in this case report as missing
1196 # which we in this case report as missing
1207 results[nf] = None
1197 results[nf] = None
1208 else:
1198 else:
1209 # We may not have walked the full directory tree above,
1199 # We may not have walked the full directory tree above,
1210 # so stat and check everything we missed.
1200 # so stat and check everything we missed.
1211 iv = iter(visit)
1201 iv = iter(visit)
1212 for st in util.statfiles([join(i) for i in visit]):
1202 for st in util.statfiles([join(i) for i in visit]):
1213 results[next(iv)] = st
1203 results[next(iv)] = st
1214 return results
1204 return results
1215
1205
1216 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1206 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1217 # Force Rayon (Rust parallelism library) to respect the number of
1207 # Force Rayon (Rust parallelism library) to respect the number of
1218 # workers. This is a temporary workaround until Rust code knows
1208 # workers. This is a temporary workaround until Rust code knows
1219 # how to read the config file.
1209 # how to read the config file.
1220 numcpus = self._ui.configint(b"worker", b"numcpus")
1210 numcpus = self._ui.configint(b"worker", b"numcpus")
1221 if numcpus is not None:
1211 if numcpus is not None:
1222 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1212 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1223
1213
1224 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1214 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1225 if not workers_enabled:
1215 if not workers_enabled:
1226 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1216 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1227
1217
1228 (
1218 (
1229 lookup,
1219 lookup,
1230 modified,
1220 modified,
1231 added,
1221 added,
1232 removed,
1222 removed,
1233 deleted,
1223 deleted,
1234 clean,
1224 clean,
1235 ignored,
1225 ignored,
1236 unknown,
1226 unknown,
1237 warnings,
1227 warnings,
1238 bad,
1228 bad,
1239 traversed,
1229 traversed,
1240 dirty,
1230 dirty,
1241 ) = rustmod.status(
1231 ) = rustmod.status(
1242 self._map._map,
1232 self._map._map,
1243 matcher,
1233 matcher,
1244 self._rootdir,
1234 self._rootdir,
1245 self._ignorefiles(),
1235 self._ignorefiles(),
1246 self._checkexec,
1236 self._checkexec,
1247 self._lastnormaltime,
1237 self._lastnormaltime,
1248 bool(list_clean),
1238 bool(list_clean),
1249 bool(list_ignored),
1239 bool(list_ignored),
1250 bool(list_unknown),
1240 bool(list_unknown),
1251 bool(matcher.traversedir),
1241 bool(matcher.traversedir),
1252 )
1242 )
1253
1243
1254 self._dirty |= dirty
1244 self._dirty |= dirty
1255
1245
1256 if matcher.traversedir:
1246 if matcher.traversedir:
1257 for dir in traversed:
1247 for dir in traversed:
1258 matcher.traversedir(dir)
1248 matcher.traversedir(dir)
1259
1249
1260 if self._ui.warn:
1250 if self._ui.warn:
1261 for item in warnings:
1251 for item in warnings:
1262 if isinstance(item, tuple):
1252 if isinstance(item, tuple):
1263 file_path, syntax = item
1253 file_path, syntax = item
1264 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1254 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1265 file_path,
1255 file_path,
1266 syntax,
1256 syntax,
1267 )
1257 )
1268 self._ui.warn(msg)
1258 self._ui.warn(msg)
1269 else:
1259 else:
1270 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1260 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1271 self._ui.warn(
1261 self._ui.warn(
1272 msg
1262 msg
1273 % (
1263 % (
1274 pathutil.canonpath(
1264 pathutil.canonpath(
1275 self._rootdir, self._rootdir, item
1265 self._rootdir, self._rootdir, item
1276 ),
1266 ),
1277 b"No such file or directory",
1267 b"No such file or directory",
1278 )
1268 )
1279 )
1269 )
1280
1270
1281 for (fn, message) in bad:
1271 for (fn, message) in bad:
1282 matcher.bad(fn, encoding.strtolocal(message))
1272 matcher.bad(fn, encoding.strtolocal(message))
1283
1273
1284 status = scmutil.status(
1274 status = scmutil.status(
1285 modified=modified,
1275 modified=modified,
1286 added=added,
1276 added=added,
1287 removed=removed,
1277 removed=removed,
1288 deleted=deleted,
1278 deleted=deleted,
1289 unknown=unknown,
1279 unknown=unknown,
1290 ignored=ignored,
1280 ignored=ignored,
1291 clean=clean,
1281 clean=clean,
1292 )
1282 )
1293 return (lookup, status)
1283 return (lookup, status)
1294
1284
1295 def status(self, match, subrepos, ignored, clean, unknown):
1285 def status(self, match, subrepos, ignored, clean, unknown):
1296 """Determine the status of the working copy relative to the
1286 """Determine the status of the working copy relative to the
1297 dirstate and return a pair of (unsure, status), where status is of type
1287 dirstate and return a pair of (unsure, status), where status is of type
1298 scmutil.status and:
1288 scmutil.status and:
1299
1289
1300 unsure:
1290 unsure:
1301 files that might have been modified since the dirstate was
1291 files that might have been modified since the dirstate was
1302 written, but need to be read to be sure (size is the same
1292 written, but need to be read to be sure (size is the same
1303 but mtime differs)
1293 but mtime differs)
1304 status.modified:
1294 status.modified:
1305 files that have definitely been modified since the dirstate
1295 files that have definitely been modified since the dirstate
1306 was written (different size or mode)
1296 was written (different size or mode)
1307 status.clean:
1297 status.clean:
1308 files that have definitely not been modified since the
1298 files that have definitely not been modified since the
1309 dirstate was written
1299 dirstate was written
1310 """
1300 """
1311 listignored, listclean, listunknown = ignored, clean, unknown
1301 listignored, listclean, listunknown = ignored, clean, unknown
1312 lookup, modified, added, unknown, ignored = [], [], [], [], []
1302 lookup, modified, added, unknown, ignored = [], [], [], [], []
1313 removed, deleted, clean = [], [], []
1303 removed, deleted, clean = [], [], []
1314
1304
1315 dmap = self._map
1305 dmap = self._map
1316 dmap.preload()
1306 dmap.preload()
1317
1307
1318 use_rust = True
1308 use_rust = True
1319
1309
1320 allowed_matchers = (
1310 allowed_matchers = (
1321 matchmod.alwaysmatcher,
1311 matchmod.alwaysmatcher,
1322 matchmod.exactmatcher,
1312 matchmod.exactmatcher,
1323 matchmod.includematcher,
1313 matchmod.includematcher,
1324 )
1314 )
1325
1315
1326 if rustmod is None:
1316 if rustmod is None:
1327 use_rust = False
1317 use_rust = False
1328 elif self._checkcase:
1318 elif self._checkcase:
1329 # Case-insensitive filesystems are not handled yet
1319 # Case-insensitive filesystems are not handled yet
1330 use_rust = False
1320 use_rust = False
1331 elif subrepos:
1321 elif subrepos:
1332 use_rust = False
1322 use_rust = False
1333 elif sparse.enabled:
1323 elif sparse.enabled:
1334 use_rust = False
1324 use_rust = False
1335 elif not isinstance(match, allowed_matchers):
1325 elif not isinstance(match, allowed_matchers):
1336 # Some matchers have yet to be implemented
1326 # Some matchers have yet to be implemented
1337 use_rust = False
1327 use_rust = False
1338
1328
1339 if use_rust:
1329 if use_rust:
1340 try:
1330 try:
1341 return self._rust_status(
1331 return self._rust_status(
1342 match, listclean, listignored, listunknown
1332 match, listclean, listignored, listunknown
1343 )
1333 )
1344 except rustmod.FallbackError:
1334 except rustmod.FallbackError:
1345 pass
1335 pass
1346
1336
1347 def noop(f):
1337 def noop(f):
1348 pass
1338 pass
1349
1339
1350 dcontains = dmap.__contains__
1340 dcontains = dmap.__contains__
1351 dget = dmap.__getitem__
1341 dget = dmap.__getitem__
1352 ladd = lookup.append # aka "unsure"
1342 ladd = lookup.append # aka "unsure"
1353 madd = modified.append
1343 madd = modified.append
1354 aadd = added.append
1344 aadd = added.append
1355 uadd = unknown.append if listunknown else noop
1345 uadd = unknown.append if listunknown else noop
1356 iadd = ignored.append if listignored else noop
1346 iadd = ignored.append if listignored else noop
1357 radd = removed.append
1347 radd = removed.append
1358 dadd = deleted.append
1348 dadd = deleted.append
1359 cadd = clean.append if listclean else noop
1349 cadd = clean.append if listclean else noop
1360 mexact = match.exact
1350 mexact = match.exact
1361 dirignore = self._dirignore
1351 dirignore = self._dirignore
1362 checkexec = self._checkexec
1352 checkexec = self._checkexec
1363 checklink = self._checklink
1353 checklink = self._checklink
1364 copymap = self._map.copymap
1354 copymap = self._map.copymap
1365 lastnormaltime = self._lastnormaltime
1355 lastnormaltime = self._lastnormaltime
1366
1356
1367 # We need to do full walks when either
1357 # We need to do full walks when either
1368 # - we're listing all clean files, or
1358 # - we're listing all clean files, or
1369 # - match.traversedir does something, because match.traversedir should
1359 # - match.traversedir does something, because match.traversedir should
1370 # be called for every dir in the working dir
1360 # be called for every dir in the working dir
1371 full = listclean or match.traversedir is not None
1361 full = listclean or match.traversedir is not None
1372 for fn, st in pycompat.iteritems(
1362 for fn, st in pycompat.iteritems(
1373 self.walk(match, subrepos, listunknown, listignored, full=full)
1363 self.walk(match, subrepos, listunknown, listignored, full=full)
1374 ):
1364 ):
1375 if not dcontains(fn):
1365 if not dcontains(fn):
1376 if (listignored or mexact(fn)) and dirignore(fn):
1366 if (listignored or mexact(fn)) and dirignore(fn):
1377 if listignored:
1367 if listignored:
1378 iadd(fn)
1368 iadd(fn)
1379 else:
1369 else:
1380 uadd(fn)
1370 uadd(fn)
1381 continue
1371 continue
1382
1372
1383 t = dget(fn)
1373 t = dget(fn)
1384 mode = t.mode
1374 mode = t.mode
1385 size = t.size
1375 size = t.size
1386
1376
1387 if not st and t.tracked:
1377 if not st and t.tracked:
1388 dadd(fn)
1378 dadd(fn)
1389 elif t.p2_info:
1379 elif t.p2_info:
1390 madd(fn)
1380 madd(fn)
1391 elif t.added:
1381 elif t.added:
1392 aadd(fn)
1382 aadd(fn)
1393 elif t.removed:
1383 elif t.removed:
1394 radd(fn)
1384 radd(fn)
1395 elif t.tracked:
1385 elif t.tracked:
1396 if not checklink and t.has_fallback_symlink:
1386 if not checklink and t.has_fallback_symlink:
1397 # If the file system does not support symlink, the mode
1387 # If the file system does not support symlink, the mode
1398 # might not be correctly stored in the dirstate, so do not
1388 # might not be correctly stored in the dirstate, so do not
1399 # trust it.
1389 # trust it.
1400 ladd(fn)
1390 ladd(fn)
1401 elif not checkexec and t.has_fallback_exec:
1391 elif not checkexec and t.has_fallback_exec:
1402 # If the file system does not support exec bits, the mode
1392 # If the file system does not support exec bits, the mode
1403 # might not be correctly stored in the dirstate, so do not
1393 # might not be correctly stored in the dirstate, so do not
1404 # trust it.
1394 # trust it.
1405 ladd(fn)
1395 ladd(fn)
1406 elif (
1396 elif (
1407 size >= 0
1397 size >= 0
1408 and (
1398 and (
1409 (size != st.st_size and size != st.st_size & _rangemask)
1399 (size != st.st_size and size != st.st_size & _rangemask)
1410 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1400 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1411 )
1401 )
1412 or fn in copymap
1402 or fn in copymap
1413 ):
1403 ):
1414 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1404 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1415 # issue6456: Size returned may be longer due to
1405 # issue6456: Size returned may be longer due to
1416 # encryption on EXT-4 fscrypt, undecided.
1406 # encryption on EXT-4 fscrypt, undecided.
1417 ladd(fn)
1407 ladd(fn)
1418 else:
1408 else:
1419 madd(fn)
1409 madd(fn)
1420 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1410 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1421 ladd(fn)
1411 ladd(fn)
1422 elif timestamp.mtime_of(st) == lastnormaltime:
1412 elif timestamp.mtime_of(st) == lastnormaltime:
1423 # fn may have just been marked as normal and it may have
1413 # fn may have just been marked as normal and it may have
1424 # changed in the same second without changing its size.
1414 # changed in the same second without changing its size.
1425 # This can happen if we quickly do multiple commits.
1415 # This can happen if we quickly do multiple commits.
1426 # Force lookup, so we don't miss such a racy file change.
1416 # Force lookup, so we don't miss such a racy file change.
1427 ladd(fn)
1417 ladd(fn)
1428 elif listclean:
1418 elif listclean:
1429 cadd(fn)
1419 cadd(fn)
1430 status = scmutil.status(
1420 status = scmutil.status(
1431 modified, added, removed, deleted, unknown, ignored, clean
1421 modified, added, removed, deleted, unknown, ignored, clean
1432 )
1422 )
1433 return (lookup, status)
1423 return (lookup, status)
1434
1424
1435 def matches(self, match):
1425 def matches(self, match):
1436 """
1426 """
1437 return files in the dirstate (in whatever state) filtered by match
1427 return files in the dirstate (in whatever state) filtered by match
1438 """
1428 """
1439 dmap = self._map
1429 dmap = self._map
1440 if rustmod is not None:
1430 if rustmod is not None:
1441 dmap = self._map._map
1431 dmap = self._map._map
1442
1432
1443 if match.always():
1433 if match.always():
1444 return dmap.keys()
1434 return dmap.keys()
1445 files = match.files()
1435 files = match.files()
1446 if match.isexact():
1436 if match.isexact():
1447 # fast path -- filter the other way around, since typically files is
1437 # fast path -- filter the other way around, since typically files is
1448 # much smaller than dmap
1438 # much smaller than dmap
1449 return [f for f in files if f in dmap]
1439 return [f for f in files if f in dmap]
1450 if match.prefix() and all(fn in dmap for fn in files):
1440 if match.prefix() and all(fn in dmap for fn in files):
1451 # fast path -- all the values are known to be files, so just return
1441 # fast path -- all the values are known to be files, so just return
1452 # that
1442 # that
1453 return list(files)
1443 return list(files)
1454 return [f for f in dmap if match(f)]
1444 return [f for f in dmap if match(f)]
1455
1445
1456 def _actualfilename(self, tr):
1446 def _actualfilename(self, tr):
1457 if tr:
1447 if tr:
1458 return self._pendingfilename
1448 return self._pendingfilename
1459 else:
1449 else:
1460 return self._filename
1450 return self._filename
1461
1451
1462 def savebackup(self, tr, backupname):
1452 def savebackup(self, tr, backupname):
1463 '''Save current dirstate into backup file'''
1453 '''Save current dirstate into backup file'''
1464 filename = self._actualfilename(tr)
1454 filename = self._actualfilename(tr)
1465 assert backupname != filename
1455 assert backupname != filename
1466
1456
1467 # use '_writedirstate' instead of 'write' to write changes certainly,
1457 # use '_writedirstate' instead of 'write' to write changes certainly,
1468 # because the latter omits writing out if transaction is running.
1458 # because the latter omits writing out if transaction is running.
1469 # output file will be used to create backup of dirstate at this point.
1459 # output file will be used to create backup of dirstate at this point.
1470 if self._dirty or not self._opener.exists(filename):
1460 if self._dirty or not self._opener.exists(filename):
1471 self._writedirstate(
1461 self._writedirstate(
1472 tr,
1462 tr,
1473 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1463 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1474 )
1464 )
1475
1465
1476 if tr:
1466 if tr:
1477 # ensure that subsequent tr.writepending returns True for
1467 # ensure that subsequent tr.writepending returns True for
1478 # changes written out above, even if dirstate is never
1468 # changes written out above, even if dirstate is never
1479 # changed after this
1469 # changed after this
1480 tr.addfilegenerator(
1470 tr.addfilegenerator(
1481 b'dirstate',
1471 b'dirstate',
1482 (self._filename,),
1472 (self._filename,),
1483 lambda f: self._writedirstate(tr, f),
1473 lambda f: self._writedirstate(tr, f),
1484 location=b'plain',
1474 location=b'plain',
1485 )
1475 )
1486
1476
1487 # ensure that pending file written above is unlinked at
1477 # ensure that pending file written above is unlinked at
1488 # failure, even if tr.writepending isn't invoked until the
1478 # failure, even if tr.writepending isn't invoked until the
1489 # end of this transaction
1479 # end of this transaction
1490 tr.registertmp(filename, location=b'plain')
1480 tr.registertmp(filename, location=b'plain')
1491
1481
1492 self._opener.tryunlink(backupname)
1482 self._opener.tryunlink(backupname)
1493 # hardlink backup is okay because _writedirstate is always called
1483 # hardlink backup is okay because _writedirstate is always called
1494 # with an "atomictemp=True" file.
1484 # with an "atomictemp=True" file.
1495 util.copyfile(
1485 util.copyfile(
1496 self._opener.join(filename),
1486 self._opener.join(filename),
1497 self._opener.join(backupname),
1487 self._opener.join(backupname),
1498 hardlink=True,
1488 hardlink=True,
1499 )
1489 )
1500
1490
1501 def restorebackup(self, tr, backupname):
1491 def restorebackup(self, tr, backupname):
1502 '''Restore dirstate by backup file'''
1492 '''Restore dirstate by backup file'''
1503 # this "invalidate()" prevents "wlock.release()" from writing
1493 # this "invalidate()" prevents "wlock.release()" from writing
1504 # changes of dirstate out after restoring from backup file
1494 # changes of dirstate out after restoring from backup file
1505 self.invalidate()
1495 self.invalidate()
1506 filename = self._actualfilename(tr)
1496 filename = self._actualfilename(tr)
1507 o = self._opener
1497 o = self._opener
1508 if util.samefile(o.join(backupname), o.join(filename)):
1498 if util.samefile(o.join(backupname), o.join(filename)):
1509 o.unlink(backupname)
1499 o.unlink(backupname)
1510 else:
1500 else:
1511 o.rename(backupname, filename, checkambig=True)
1501 o.rename(backupname, filename, checkambig=True)
1512
1502
1513 def clearbackup(self, tr, backupname):
1503 def clearbackup(self, tr, backupname):
1514 '''Clear backup file'''
1504 '''Clear backup file'''
1515 self._opener.unlink(backupname)
1505 self._opener.unlink(backupname)
1516
1506
1517 def verify(self, m1, m2):
1507 def verify(self, m1, m2):
1518 """check the dirstate content again the parent manifest and yield errors"""
1508 """check the dirstate content again the parent manifest and yield errors"""
1519 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1509 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1520 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1510 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1521 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1511 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1522 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1512 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1523 for f, entry in self.items():
1513 for f, entry in self.items():
1524 state = entry.state
1514 state = entry.state
1525 if state in b"nr" and f not in m1:
1515 if state in b"nr" and f not in m1:
1526 yield (missing_from_p1, f, state)
1516 yield (missing_from_p1, f, state)
1527 if state in b"a" and f in m1:
1517 if state in b"a" and f in m1:
1528 yield (unexpected_in_p1, f, state)
1518 yield (unexpected_in_p1, f, state)
1529 if state in b"m" and f not in m1 and f not in m2:
1519 if state in b"m" and f not in m1 and f not in m2:
1530 yield (missing_from_ps, f, state)
1520 yield (missing_from_ps, f, state)
1531 for f in m1:
1521 for f in m1:
1532 state = self.get_entry(f).state
1522 state = self.get_entry(f).state
1533 if state not in b"nrm":
1523 if state not in b"nrm":
1534 yield (missing_from_ds, f, state)
1524 yield (missing_from_ds, f, state)
@@ -1,87 +1,101 b''
1 # Copyright Mercurial Contributors
1 # Copyright Mercurial Contributors
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 from __future__ import absolute_import
6 from __future__ import absolute_import
7
7
8 import functools
8 import functools
9 import os
9 import stat
10 import stat
10
11
11
12
12 rangemask = 0x7FFFFFFF
13 rangemask = 0x7FFFFFFF
13
14
14
15
15 @functools.total_ordering
16 @functools.total_ordering
16 class timestamp(tuple):
17 class timestamp(tuple):
17 """
18 """
18 A Unix timestamp with optional nanoseconds precision,
19 A Unix timestamp with optional nanoseconds precision,
19 modulo 2**31 seconds.
20 modulo 2**31 seconds.
20
21
21 A 2-tuple containing:
22 A 2-tuple containing:
22
23
23 `truncated_seconds`: seconds since the Unix epoch,
24 `truncated_seconds`: seconds since the Unix epoch,
24 truncated to its lower 31 bits
25 truncated to its lower 31 bits
25
26
26 `subsecond_nanoseconds`: number of nanoseconds since `truncated_seconds`.
27 `subsecond_nanoseconds`: number of nanoseconds since `truncated_seconds`.
27 When this is zero, the sub-second precision is considered unknown.
28 When this is zero, the sub-second precision is considered unknown.
28 """
29 """
29
30
30 def __new__(cls, value):
31 def __new__(cls, value):
31 truncated_seconds, subsec_nanos = value
32 truncated_seconds, subsec_nanos = value
32 value = (truncated_seconds & rangemask, subsec_nanos)
33 value = (truncated_seconds & rangemask, subsec_nanos)
33 return super(timestamp, cls).__new__(cls, value)
34 return super(timestamp, cls).__new__(cls, value)
34
35
35 def __eq__(self, other):
36 def __eq__(self, other):
36 self_secs, self_subsec_nanos = self
37 self_secs, self_subsec_nanos = self
37 other_secs, other_subsec_nanos = other
38 other_secs, other_subsec_nanos = other
38 return self_secs == other_secs and (
39 return self_secs == other_secs and (
39 self_subsec_nanos == other_subsec_nanos
40 self_subsec_nanos == other_subsec_nanos
40 or self_subsec_nanos == 0
41 or self_subsec_nanos == 0
41 or other_subsec_nanos == 0
42 or other_subsec_nanos == 0
42 )
43 )
43
44
44 def __gt__(self, other):
45 def __gt__(self, other):
45 self_secs, self_subsec_nanos = self
46 self_secs, self_subsec_nanos = self
46 other_secs, other_subsec_nanos = other
47 other_secs, other_subsec_nanos = other
47 if self_secs > other_secs:
48 if self_secs > other_secs:
48 return True
49 return True
49 if self_secs < other_secs:
50 if self_secs < other_secs:
50 return False
51 return False
51 if self_subsec_nanos == 0 or other_subsec_nanos == 0:
52 if self_subsec_nanos == 0 or other_subsec_nanos == 0:
52 # they are considered equal, so not "greater than"
53 # they are considered equal, so not "greater than"
53 return False
54 return False
54 return self_subsec_nanos > other_subsec_nanos
55 return self_subsec_nanos > other_subsec_nanos
55
56
56
57
58 def get_fs_now(vfs):
59 """return a timestamp for "now" in the current vfs
60
61 This will raise an exception if no temporary files could be created.
62 """
63 tmpfd, tmpname = vfs.mkstemp()
64 try:
65 return mtime_of(os.fstat(tmpfd))
66 finally:
67 os.close(tmpfd)
68 vfs.unlink(tmpname)
69
70
57 def zero():
71 def zero():
58 """
72 """
59 Returns the `timestamp` at the Unix epoch.
73 Returns the `timestamp` at the Unix epoch.
60 """
74 """
61 return tuple.__new__(timestamp, (0, 0))
75 return tuple.__new__(timestamp, (0, 0))
62
76
63
77
64 def mtime_of(stat_result):
78 def mtime_of(stat_result):
65 """
79 """
66 Takes an `os.stat_result`-like object and returns a `timestamp` object
80 Takes an `os.stat_result`-like object and returns a `timestamp` object
67 for its modification time.
81 for its modification time.
68 """
82 """
69 try:
83 try:
70 # TODO: add this attribute to `osutil.stat` objects,
84 # TODO: add this attribute to `osutil.stat` objects,
71 # see `mercurial/cext/osutil.c`.
85 # see `mercurial/cext/osutil.c`.
72 #
86 #
73 # This attribute is also not available on Python 2.
87 # This attribute is also not available on Python 2.
74 nanos = stat_result.st_mtime_ns
88 nanos = stat_result.st_mtime_ns
75 except AttributeError:
89 except AttributeError:
76 # https://docs.python.org/2/library/os.html#os.stat_float_times
90 # https://docs.python.org/2/library/os.html#os.stat_float_times
77 # "For compatibility with older Python versions,
91 # "For compatibility with older Python versions,
78 # accessing stat_result as a tuple always returns integers."
92 # accessing stat_result as a tuple always returns integers."
79 secs = stat_result[stat.ST_MTIME]
93 secs = stat_result[stat.ST_MTIME]
80
94
81 subsec_nanos = 0
95 subsec_nanos = 0
82 else:
96 else:
83 billion = int(1e9)
97 billion = int(1e9)
84 secs = nanos // billion
98 secs = nanos // billion
85 subsec_nanos = nanos % billion
99 subsec_nanos = nanos % billion
86
100
87 return timestamp((secs, subsec_nanos))
101 return timestamp((secs, subsec_nanos))
@@ -1,107 +1,106 b''
1 # extension to emulate invoking 'dirstate.write()' at the time
1 # extension to emulate invoking 'dirstate.write()' at the time
2 # specified by '[fakedirstatewritetime] fakenow', only when
2 # specified by '[fakedirstatewritetime] fakenow', only when
3 # 'dirstate.write()' is invoked via functions below:
3 # 'dirstate.write()' is invoked via functions below:
4 #
4 #
5 # - 'workingctx._poststatusfixup()' (= 'repo.status()')
5 # - 'workingctx._poststatusfixup()' (= 'repo.status()')
6 # - 'committablectx.markcommitted()'
6 # - 'committablectx.markcommitted()'
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from mercurial import (
10 from mercurial import (
11 context,
11 context,
12 dirstate,
13 dirstatemap as dirstatemapmod,
12 dirstatemap as dirstatemapmod,
14 extensions,
13 extensions,
15 policy,
14 policy,
16 registrar,
15 registrar,
17 )
16 )
18 from mercurial.dirstateutils import timestamp
17 from mercurial.dirstateutils import timestamp
19 from mercurial.utils import dateutil
18 from mercurial.utils import dateutil
20
19
21 try:
20 try:
22 from mercurial import rustext
21 from mercurial import rustext
23
22
24 rustext.__name__ # force actual import (see hgdemandimport)
23 rustext.__name__ # force actual import (see hgdemandimport)
25 except ImportError:
24 except ImportError:
26 rustext = None
25 rustext = None
27
26
28 configtable = {}
27 configtable = {}
29 configitem = registrar.configitem(configtable)
28 configitem = registrar.configitem(configtable)
30
29
31 configitem(
30 configitem(
32 b'fakedirstatewritetime',
31 b'fakedirstatewritetime',
33 b'fakenow',
32 b'fakenow',
34 default=None,
33 default=None,
35 )
34 )
36
35
37 parsers = policy.importmod('parsers')
36 parsers = policy.importmod('parsers')
38 has_rust_dirstate = policy.importrust('dirstate') is not None
37 has_rust_dirstate = policy.importrust('dirstate') is not None
39
38
40
39
41 def pack_dirstate(fakenow, orig, dmap, copymap, pl, now):
40 def pack_dirstate(fakenow, orig, dmap, copymap, pl, now):
42 # execute what original parsers.pack_dirstate should do actually
41 # execute what original parsers.pack_dirstate should do actually
43 # for consistency
42 # for consistency
44 for f, e in dmap.items():
43 for f, e in dmap.items():
45 if e.need_delay(now):
44 if e.need_delay(now):
46 e.set_possibly_dirty()
45 e.set_possibly_dirty()
47
46
48 return orig(dmap, copymap, pl, fakenow)
47 return orig(dmap, copymap, pl, fakenow)
49
48
50
49
51 def fakewrite(ui, func):
50 def fakewrite(ui, func):
52 # fake "now" of 'pack_dirstate' only if it is invoked while 'func'
51 # fake "now" of 'pack_dirstate' only if it is invoked while 'func'
53
52
54 fakenow = ui.config(b'fakedirstatewritetime', b'fakenow')
53 fakenow = ui.config(b'fakedirstatewritetime', b'fakenow')
55 if not fakenow:
54 if not fakenow:
56 # Execute original one, if fakenow isn't configured. This is
55 # Execute original one, if fakenow isn't configured. This is
57 # useful to prevent subrepos from executing replaced one,
56 # useful to prevent subrepos from executing replaced one,
58 # because replacing 'parsers.pack_dirstate' is also effective
57 # because replacing 'parsers.pack_dirstate' is also effective
59 # in subrepos.
58 # in subrepos.
60 return func()
59 return func()
61
60
62 # parsing 'fakenow' in YYYYmmddHHMM format makes comparison between
61 # parsing 'fakenow' in YYYYmmddHHMM format makes comparison between
63 # 'fakenow' value and 'touch -t YYYYmmddHHMM' argument easy
62 # 'fakenow' value and 'touch -t YYYYmmddHHMM' argument easy
64 fakenow = dateutil.parsedate(fakenow, [b'%Y%m%d%H%M'])[0]
63 fakenow = dateutil.parsedate(fakenow, [b'%Y%m%d%H%M'])[0]
65 fakenow = timestamp.timestamp((fakenow, 0))
64 fakenow = timestamp.timestamp((fakenow, 0))
66
65
67 if has_rust_dirstate:
66 if has_rust_dirstate:
68 # The Rust implementation does not use public parse/pack dirstate
67 # The Rust implementation does not use public parse/pack dirstate
69 # to prevent conversion round-trips
68 # to prevent conversion round-trips
70 orig_dirstatemap_write = dirstatemapmod.dirstatemap.write
69 orig_dirstatemap_write = dirstatemapmod.dirstatemap.write
71 wrapper = lambda self, tr, st, now: orig_dirstatemap_write(
70 wrapper = lambda self, tr, st, now: orig_dirstatemap_write(
72 self, tr, st, fakenow
71 self, tr, st, fakenow
73 )
72 )
74 dirstatemapmod.dirstatemap.write = wrapper
73 dirstatemapmod.dirstatemap.write = wrapper
75
74
76 orig_dirstate_getfsnow = dirstate._getfsnow
75 orig_get_fs_now = timestamp.get_fs_now
77 wrapper = lambda *args: pack_dirstate(fakenow, orig_pack_dirstate, *args)
76 wrapper = lambda *args: pack_dirstate(fakenow, orig_pack_dirstate, *args)
78
77
79 orig_module = parsers
78 orig_module = parsers
80 orig_pack_dirstate = parsers.pack_dirstate
79 orig_pack_dirstate = parsers.pack_dirstate
81
80
82 orig_module.pack_dirstate = wrapper
81 orig_module.pack_dirstate = wrapper
83 dirstate._getfsnow = lambda *args: fakenow
82 timestamp.get_fs_now = lambda *args: fakenow
84 try:
83 try:
85 return func()
84 return func()
86 finally:
85 finally:
87 orig_module.pack_dirstate = orig_pack_dirstate
86 orig_module.pack_dirstate = orig_pack_dirstate
88 dirstate._getfsnow = orig_dirstate_getfsnow
87 timestamp.get_fs_now = orig_get_fs_now
89 if has_rust_dirstate:
88 if has_rust_dirstate:
90 dirstatemapmod.dirstatemap.write = orig_dirstatemap_write
89 dirstatemapmod.dirstatemap.write = orig_dirstatemap_write
91
90
92
91
93 def _poststatusfixup(orig, workingctx, status, fixup):
92 def _poststatusfixup(orig, workingctx, status, fixup):
94 ui = workingctx.repo().ui
93 ui = workingctx.repo().ui
95 return fakewrite(ui, lambda: orig(workingctx, status, fixup))
94 return fakewrite(ui, lambda: orig(workingctx, status, fixup))
96
95
97
96
98 def markcommitted(orig, committablectx, node):
97 def markcommitted(orig, committablectx, node):
99 ui = committablectx.repo().ui
98 ui = committablectx.repo().ui
100 return fakewrite(ui, lambda: orig(committablectx, node))
99 return fakewrite(ui, lambda: orig(committablectx, node))
101
100
102
101
103 def extsetup(ui):
102 def extsetup(ui):
104 extensions.wrapfunction(
103 extensions.wrapfunction(
105 context.workingctx, '_poststatusfixup', _poststatusfixup
104 context.workingctx, '_poststatusfixup', _poststatusfixup
106 )
105 )
107 extensions.wrapfunction(context.workingctx, 'markcommitted', markcommitted)
106 extensions.wrapfunction(context.workingctx, 'markcommitted', markcommitted)
General Comments 0
You need to be logged in to leave comments. Login now