##// END OF EJS Templates
dirstate: do not write an empty dirstate just for backup...
marmoute -
r50897:e358f6e0 default
parent child Browse files
Show More
@@ -1,1613 +1,1629 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import collections
9 import collections
10 import contextlib
10 import contextlib
11 import os
11 import os
12 import stat
12 import stat
13 import uuid
13 import uuid
14
14
15 from .i18n import _
15 from .i18n import _
16 from .pycompat import delattr
16 from .pycompat import delattr
17
17
18 from hgdemandimport import tracing
18 from hgdemandimport import tracing
19
19
20 from . import (
20 from . import (
21 dirstatemap,
21 dirstatemap,
22 encoding,
22 encoding,
23 error,
23 error,
24 match as matchmod,
24 match as matchmod,
25 node,
25 node,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 util,
30 util,
31 )
31 )
32
32
33 from .dirstateutils import (
33 from .dirstateutils import (
34 docket as docketmod,
34 docket as docketmod,
35 timestamp,
35 timestamp,
36 )
36 )
37
37
38 from .interfaces import (
38 from .interfaces import (
39 dirstate as intdirstate,
39 dirstate as intdirstate,
40 util as interfaceutil,
40 util as interfaceutil,
41 )
41 )
42
42
43 parsers = policy.importmod('parsers')
43 parsers = policy.importmod('parsers')
44 rustmod = policy.importrust('dirstate')
44 rustmod = policy.importrust('dirstate')
45
45
46 HAS_FAST_DIRSTATE_V2 = rustmod is not None
46 HAS_FAST_DIRSTATE_V2 = rustmod is not None
47
47
48 propertycache = util.propertycache
48 propertycache = util.propertycache
49 filecache = scmutil.filecache
49 filecache = scmutil.filecache
50 _rangemask = dirstatemap.rangemask
50 _rangemask = dirstatemap.rangemask
51
51
52 DirstateItem = dirstatemap.DirstateItem
52 DirstateItem = dirstatemap.DirstateItem
53
53
54
54
55 class repocache(filecache):
55 class repocache(filecache):
56 """filecache for files in .hg/"""
56 """filecache for files in .hg/"""
57
57
58 def join(self, obj, fname):
58 def join(self, obj, fname):
59 return obj._opener.join(fname)
59 return obj._opener.join(fname)
60
60
61
61
62 class rootcache(filecache):
62 class rootcache(filecache):
63 """filecache for files in the repository root"""
63 """filecache for files in the repository root"""
64
64
65 def join(self, obj, fname):
65 def join(self, obj, fname):
66 return obj._join(fname)
66 return obj._join(fname)
67
67
68
68
69 def requires_changing_parents(func):
69 def requires_changing_parents(func):
70 def wrap(self, *args, **kwargs):
70 def wrap(self, *args, **kwargs):
71 if not self.pendingparentchange():
71 if not self.pendingparentchange():
72 msg = 'calling `%s` outside of a changing_parents context'
72 msg = 'calling `%s` outside of a changing_parents context'
73 msg %= func.__name__
73 msg %= func.__name__
74 raise error.ProgrammingError(msg)
74 raise error.ProgrammingError(msg)
75 if self._invalidated_context:
75 if self._invalidated_context:
76 msg = 'calling `%s` after the dirstate was invalidated'
76 msg = 'calling `%s` after the dirstate was invalidated'
77 raise error.ProgrammingError(msg)
77 raise error.ProgrammingError(msg)
78 return func(self, *args, **kwargs)
78 return func(self, *args, **kwargs)
79
79
80 return wrap
80 return wrap
81
81
82
82
83 def requires_not_changing_parents(func):
83 def requires_not_changing_parents(func):
84 def wrap(self, *args, **kwargs):
84 def wrap(self, *args, **kwargs):
85 if self.pendingparentchange():
85 if self.pendingparentchange():
86 msg = 'calling `%s` inside of a changing_parents context'
86 msg = 'calling `%s` inside of a changing_parents context'
87 msg %= func.__name__
87 msg %= func.__name__
88 raise error.ProgrammingError(msg)
88 raise error.ProgrammingError(msg)
89 return func(self, *args, **kwargs)
89 return func(self, *args, **kwargs)
90
90
91 return wrap
91 return wrap
92
92
93
93
94 @interfaceutil.implementer(intdirstate.idirstate)
94 @interfaceutil.implementer(intdirstate.idirstate)
95 class dirstate:
95 class dirstate:
96 def __init__(
96 def __init__(
97 self,
97 self,
98 opener,
98 opener,
99 ui,
99 ui,
100 root,
100 root,
101 validate,
101 validate,
102 sparsematchfn,
102 sparsematchfn,
103 nodeconstants,
103 nodeconstants,
104 use_dirstate_v2,
104 use_dirstate_v2,
105 use_tracked_hint=False,
105 use_tracked_hint=False,
106 ):
106 ):
107 """Create a new dirstate object.
107 """Create a new dirstate object.
108
108
109 opener is an open()-like callable that can be used to open the
109 opener is an open()-like callable that can be used to open the
110 dirstate file; root is the root of the directory tracked by
110 dirstate file; root is the root of the directory tracked by
111 the dirstate.
111 the dirstate.
112 """
112 """
113 self._use_dirstate_v2 = use_dirstate_v2
113 self._use_dirstate_v2 = use_dirstate_v2
114 self._use_tracked_hint = use_tracked_hint
114 self._use_tracked_hint = use_tracked_hint
115 self._nodeconstants = nodeconstants
115 self._nodeconstants = nodeconstants
116 self._opener = opener
116 self._opener = opener
117 self._validate = validate
117 self._validate = validate
118 self._root = root
118 self._root = root
119 # Either build a sparse-matcher or None if sparse is disabled
119 # Either build a sparse-matcher or None if sparse is disabled
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 # True is any internal state may be different
124 # True is any internal state may be different
125 self._dirty = False
125 self._dirty = False
126 # True if the set of tracked file may be different
126 # True if the set of tracked file may be different
127 self._dirty_tracked_set = False
127 self._dirty_tracked_set = False
128 self._ui = ui
128 self._ui = ui
129 self._filecache = {}
129 self._filecache = {}
130 # nesting level of `changing_parents` context
130 # nesting level of `changing_parents` context
131 self._parentwriters = 0
131 self._parentwriters = 0
132 # True if the current dirstate changing operations have been
132 # True if the current dirstate changing operations have been
133 # invalidated (used to make sure all nested contexts have been exited)
133 # invalidated (used to make sure all nested contexts have been exited)
134 self._invalidated_context = False
134 self._invalidated_context = False
135 self._filename = b'dirstate'
135 self._filename = b'dirstate'
136 self._filename_th = b'dirstate-tracked-hint'
136 self._filename_th = b'dirstate-tracked-hint'
137 self._pendingfilename = b'%s.pending' % self._filename
137 self._pendingfilename = b'%s.pending' % self._filename
138 self._plchangecallbacks = {}
138 self._plchangecallbacks = {}
139 self._origpl = None
139 self._origpl = None
140 self._mapcls = dirstatemap.dirstatemap
140 self._mapcls = dirstatemap.dirstatemap
141 # Access and cache cwd early, so we don't access it for the first time
141 # Access and cache cwd early, so we don't access it for the first time
142 # after a working-copy update caused it to not exist (accessing it then
142 # after a working-copy update caused it to not exist (accessing it then
143 # raises an exception).
143 # raises an exception).
144 self._cwd
144 self._cwd
145
145
146 def prefetch_parents(self):
146 def prefetch_parents(self):
147 """make sure the parents are loaded
147 """make sure the parents are loaded
148
148
149 Used to avoid a race condition.
149 Used to avoid a race condition.
150 """
150 """
151 self._pl
151 self._pl
152
152
153 @contextlib.contextmanager
153 @contextlib.contextmanager
154 def changing_parents(self, repo):
154 def changing_parents(self, repo):
155 """Context manager for handling dirstate parents.
155 """Context manager for handling dirstate parents.
156
156
157 If an exception occurs in the scope of the context manager,
157 If an exception occurs in the scope of the context manager,
158 the incoherent dirstate won't be written when wlock is
158 the incoherent dirstate won't be written when wlock is
159 released.
159 released.
160 """
160 """
161 if repo.currentwlock() is None:
161 if repo.currentwlock() is None:
162 msg = b"changing parents without holding the wlock"
162 msg = b"changing parents without holding the wlock"
163 raise error.ProgrammingError(msg)
163 raise error.ProgrammingError(msg)
164 if self._invalidated_context:
164 if self._invalidated_context:
165 msg = "trying to use an invalidated dirstate before it has reset"
165 msg = "trying to use an invalidated dirstate before it has reset"
166 raise error.ProgrammingError(msg)
166 raise error.ProgrammingError(msg)
167 self._parentwriters += 1
167 self._parentwriters += 1
168 try:
168 try:
169 yield
169 yield
170 except Exception:
170 except Exception:
171 self.invalidate()
171 self.invalidate()
172 raise
172 raise
173 finally:
173 finally:
174 if self._parentwriters > 0:
174 if self._parentwriters > 0:
175 if self._invalidated_context:
175 if self._invalidated_context:
176 # make sure we invalidate anything an upper context might
176 # make sure we invalidate anything an upper context might
177 # have changed.
177 # have changed.
178 self.invalidate()
178 self.invalidate()
179 self._parentwriters -= 1
179 self._parentwriters -= 1
180 # The invalidation is complete once we exit the final context
180 # The invalidation is complete once we exit the final context
181 # manager
181 # manager
182 if self._parentwriters <= 0:
182 if self._parentwriters <= 0:
183 assert self._parentwriters == 0
183 assert self._parentwriters == 0
184 if self._invalidated_context:
184 if self._invalidated_context:
185 self._invalidated_context = False
185 self._invalidated_context = False
186 else:
186 else:
187 # When an exception occured, `_invalidated_context`
187 # When an exception occured, `_invalidated_context`
188 # would have been set to True by the `invalidate`
188 # would have been set to True by the `invalidate`
189 # call earlier.
189 # call earlier.
190 #
190 #
191 # We don't have more straightforward code, because the
191 # We don't have more straightforward code, because the
192 # Exception catching (and the associated `invalidate`
192 # Exception catching (and the associated `invalidate`
193 # calling) might have been called by a nested context
193 # calling) might have been called by a nested context
194 # instead of the top level one.
194 # instead of the top level one.
195 self.write(repo.currenttransaction())
195 self.write(repo.currenttransaction())
196
196
197 # here to help migration to the new code
197 # here to help migration to the new code
198 def parentchange(self):
198 def parentchange(self):
199 msg = (
199 msg = (
200 "Mercurial 6.4 and later requires call to "
200 "Mercurial 6.4 and later requires call to "
201 "`dirstate.changing_parents(repo)`"
201 "`dirstate.changing_parents(repo)`"
202 )
202 )
203 raise error.ProgrammingError(msg)
203 raise error.ProgrammingError(msg)
204
204
205 def pendingparentchange(self):
205 def pendingparentchange(self):
206 """Returns true if the dirstate is in the middle of a set of changes
206 """Returns true if the dirstate is in the middle of a set of changes
207 that modify the dirstate parent.
207 that modify the dirstate parent.
208 """
208 """
209 return self._parentwriters > 0
209 return self._parentwriters > 0
210
210
211 @propertycache
211 @propertycache
212 def _map(self):
212 def _map(self):
213 """Return the dirstate contents (see documentation for dirstatemap)."""
213 """Return the dirstate contents (see documentation for dirstatemap)."""
214 self._map = self._mapcls(
214 self._map = self._mapcls(
215 self._ui,
215 self._ui,
216 self._opener,
216 self._opener,
217 self._root,
217 self._root,
218 self._nodeconstants,
218 self._nodeconstants,
219 self._use_dirstate_v2,
219 self._use_dirstate_v2,
220 )
220 )
221 return self._map
221 return self._map
222
222
223 @property
223 @property
224 def _sparsematcher(self):
224 def _sparsematcher(self):
225 """The matcher for the sparse checkout.
225 """The matcher for the sparse checkout.
226
226
227 The working directory may not include every file from a manifest. The
227 The working directory may not include every file from a manifest. The
228 matcher obtained by this property will match a path if it is to be
228 matcher obtained by this property will match a path if it is to be
229 included in the working directory.
229 included in the working directory.
230
230
231 When sparse if disabled, return None.
231 When sparse if disabled, return None.
232 """
232 """
233 if self._sparsematchfn is None:
233 if self._sparsematchfn is None:
234 return None
234 return None
235 # TODO there is potential to cache this property. For now, the matcher
235 # TODO there is potential to cache this property. For now, the matcher
236 # is resolved on every access. (But the called function does use a
236 # is resolved on every access. (But the called function does use a
237 # cache to keep the lookup fast.)
237 # cache to keep the lookup fast.)
238 return self._sparsematchfn()
238 return self._sparsematchfn()
239
239
240 @repocache(b'branch')
240 @repocache(b'branch')
241 def _branch(self):
241 def _branch(self):
242 try:
242 try:
243 return self._opener.read(b"branch").strip() or b"default"
243 return self._opener.read(b"branch").strip() or b"default"
244 except FileNotFoundError:
244 except FileNotFoundError:
245 return b"default"
245 return b"default"
246
246
247 @property
247 @property
248 def _pl(self):
248 def _pl(self):
249 return self._map.parents()
249 return self._map.parents()
250
250
251 def hasdir(self, d):
251 def hasdir(self, d):
252 return self._map.hastrackeddir(d)
252 return self._map.hastrackeddir(d)
253
253
254 @rootcache(b'.hgignore')
254 @rootcache(b'.hgignore')
255 def _ignore(self):
255 def _ignore(self):
256 files = self._ignorefiles()
256 files = self._ignorefiles()
257 if not files:
257 if not files:
258 return matchmod.never()
258 return matchmod.never()
259
259
260 pats = [b'include:%s' % f for f in files]
260 pats = [b'include:%s' % f for f in files]
261 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
261 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
262
262
263 @propertycache
263 @propertycache
264 def _slash(self):
264 def _slash(self):
265 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
265 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
266
266
267 @propertycache
267 @propertycache
268 def _checklink(self):
268 def _checklink(self):
269 return util.checklink(self._root)
269 return util.checklink(self._root)
270
270
271 @propertycache
271 @propertycache
272 def _checkexec(self):
272 def _checkexec(self):
273 return bool(util.checkexec(self._root))
273 return bool(util.checkexec(self._root))
274
274
275 @propertycache
275 @propertycache
276 def _checkcase(self):
276 def _checkcase(self):
277 return not util.fscasesensitive(self._join(b'.hg'))
277 return not util.fscasesensitive(self._join(b'.hg'))
278
278
279 def _join(self, f):
279 def _join(self, f):
280 # much faster than os.path.join()
280 # much faster than os.path.join()
281 # it's safe because f is always a relative path
281 # it's safe because f is always a relative path
282 return self._rootdir + f
282 return self._rootdir + f
283
283
284 def flagfunc(self, buildfallback):
284 def flagfunc(self, buildfallback):
285 """build a callable that returns flags associated with a filename
285 """build a callable that returns flags associated with a filename
286
286
287 The information is extracted from three possible layers:
287 The information is extracted from three possible layers:
288 1. the file system if it supports the information
288 1. the file system if it supports the information
289 2. the "fallback" information stored in the dirstate if any
289 2. the "fallback" information stored in the dirstate if any
290 3. a more expensive mechanism inferring the flags from the parents.
290 3. a more expensive mechanism inferring the flags from the parents.
291 """
291 """
292
292
293 # small hack to cache the result of buildfallback()
293 # small hack to cache the result of buildfallback()
294 fallback_func = []
294 fallback_func = []
295
295
296 def get_flags(x):
296 def get_flags(x):
297 entry = None
297 entry = None
298 fallback_value = None
298 fallback_value = None
299 try:
299 try:
300 st = os.lstat(self._join(x))
300 st = os.lstat(self._join(x))
301 except OSError:
301 except OSError:
302 return b''
302 return b''
303
303
304 if self._checklink:
304 if self._checklink:
305 if util.statislink(st):
305 if util.statislink(st):
306 return b'l'
306 return b'l'
307 else:
307 else:
308 entry = self.get_entry(x)
308 entry = self.get_entry(x)
309 if entry.has_fallback_symlink:
309 if entry.has_fallback_symlink:
310 if entry.fallback_symlink:
310 if entry.fallback_symlink:
311 return b'l'
311 return b'l'
312 else:
312 else:
313 if not fallback_func:
313 if not fallback_func:
314 fallback_func.append(buildfallback())
314 fallback_func.append(buildfallback())
315 fallback_value = fallback_func[0](x)
315 fallback_value = fallback_func[0](x)
316 if b'l' in fallback_value:
316 if b'l' in fallback_value:
317 return b'l'
317 return b'l'
318
318
319 if self._checkexec:
319 if self._checkexec:
320 if util.statisexec(st):
320 if util.statisexec(st):
321 return b'x'
321 return b'x'
322 else:
322 else:
323 if entry is None:
323 if entry is None:
324 entry = self.get_entry(x)
324 entry = self.get_entry(x)
325 if entry.has_fallback_exec:
325 if entry.has_fallback_exec:
326 if entry.fallback_exec:
326 if entry.fallback_exec:
327 return b'x'
327 return b'x'
328 else:
328 else:
329 if fallback_value is None:
329 if fallback_value is None:
330 if not fallback_func:
330 if not fallback_func:
331 fallback_func.append(buildfallback())
331 fallback_func.append(buildfallback())
332 fallback_value = fallback_func[0](x)
332 fallback_value = fallback_func[0](x)
333 if b'x' in fallback_value:
333 if b'x' in fallback_value:
334 return b'x'
334 return b'x'
335 return b''
335 return b''
336
336
337 return get_flags
337 return get_flags
338
338
339 @propertycache
339 @propertycache
340 def _cwd(self):
340 def _cwd(self):
341 # internal config: ui.forcecwd
341 # internal config: ui.forcecwd
342 forcecwd = self._ui.config(b'ui', b'forcecwd')
342 forcecwd = self._ui.config(b'ui', b'forcecwd')
343 if forcecwd:
343 if forcecwd:
344 return forcecwd
344 return forcecwd
345 return encoding.getcwd()
345 return encoding.getcwd()
346
346
347 def getcwd(self):
347 def getcwd(self):
348 """Return the path from which a canonical path is calculated.
348 """Return the path from which a canonical path is calculated.
349
349
350 This path should be used to resolve file patterns or to convert
350 This path should be used to resolve file patterns or to convert
351 canonical paths back to file paths for display. It shouldn't be
351 canonical paths back to file paths for display. It shouldn't be
352 used to get real file paths. Use vfs functions instead.
352 used to get real file paths. Use vfs functions instead.
353 """
353 """
354 cwd = self._cwd
354 cwd = self._cwd
355 if cwd == self._root:
355 if cwd == self._root:
356 return b''
356 return b''
357 # self._root ends with a path separator if self._root is '/' or 'C:\'
357 # self._root ends with a path separator if self._root is '/' or 'C:\'
358 rootsep = self._root
358 rootsep = self._root
359 if not util.endswithsep(rootsep):
359 if not util.endswithsep(rootsep):
360 rootsep += pycompat.ossep
360 rootsep += pycompat.ossep
361 if cwd.startswith(rootsep):
361 if cwd.startswith(rootsep):
362 return cwd[len(rootsep) :]
362 return cwd[len(rootsep) :]
363 else:
363 else:
364 # we're outside the repo. return an absolute path.
364 # we're outside the repo. return an absolute path.
365 return cwd
365 return cwd
366
366
367 def pathto(self, f, cwd=None):
367 def pathto(self, f, cwd=None):
368 if cwd is None:
368 if cwd is None:
369 cwd = self.getcwd()
369 cwd = self.getcwd()
370 path = util.pathto(self._root, cwd, f)
370 path = util.pathto(self._root, cwd, f)
371 if self._slash:
371 if self._slash:
372 return util.pconvert(path)
372 return util.pconvert(path)
373 return path
373 return path
374
374
375 def get_entry(self, path):
375 def get_entry(self, path):
376 """return a DirstateItem for the associated path"""
376 """return a DirstateItem for the associated path"""
377 entry = self._map.get(path)
377 entry = self._map.get(path)
378 if entry is None:
378 if entry is None:
379 return DirstateItem()
379 return DirstateItem()
380 return entry
380 return entry
381
381
382 def __contains__(self, key):
382 def __contains__(self, key):
383 return key in self._map
383 return key in self._map
384
384
385 def __iter__(self):
385 def __iter__(self):
386 return iter(sorted(self._map))
386 return iter(sorted(self._map))
387
387
388 def items(self):
388 def items(self):
389 return self._map.items()
389 return self._map.items()
390
390
391 iteritems = items
391 iteritems = items
392
392
393 def parents(self):
393 def parents(self):
394 return [self._validate(p) for p in self._pl]
394 return [self._validate(p) for p in self._pl]
395
395
396 def p1(self):
396 def p1(self):
397 return self._validate(self._pl[0])
397 return self._validate(self._pl[0])
398
398
399 def p2(self):
399 def p2(self):
400 return self._validate(self._pl[1])
400 return self._validate(self._pl[1])
401
401
402 @property
402 @property
403 def in_merge(self):
403 def in_merge(self):
404 """True if a merge is in progress"""
404 """True if a merge is in progress"""
405 return self._pl[1] != self._nodeconstants.nullid
405 return self._pl[1] != self._nodeconstants.nullid
406
406
407 def branch(self):
407 def branch(self):
408 return encoding.tolocal(self._branch)
408 return encoding.tolocal(self._branch)
409
409
410 def setparents(self, p1, p2=None):
410 def setparents(self, p1, p2=None):
411 """Set dirstate parents to p1 and p2.
411 """Set dirstate parents to p1 and p2.
412
412
413 When moving from two parents to one, "merged" entries a
413 When moving from two parents to one, "merged" entries a
414 adjusted to normal and previous copy records discarded and
414 adjusted to normal and previous copy records discarded and
415 returned by the call.
415 returned by the call.
416
416
417 See localrepo.setparents()
417 See localrepo.setparents()
418 """
418 """
419 if p2 is None:
419 if p2 is None:
420 p2 = self._nodeconstants.nullid
420 p2 = self._nodeconstants.nullid
421 if self._parentwriters == 0:
421 if self._parentwriters == 0:
422 raise ValueError(
422 raise ValueError(
423 b"cannot set dirstate parent outside of "
423 b"cannot set dirstate parent outside of "
424 b"dirstate.changing_parents context manager"
424 b"dirstate.changing_parents context manager"
425 )
425 )
426
426
427 self._dirty = True
427 self._dirty = True
428 oldp2 = self._pl[1]
428 oldp2 = self._pl[1]
429 if self._origpl is None:
429 if self._origpl is None:
430 self._origpl = self._pl
430 self._origpl = self._pl
431 nullid = self._nodeconstants.nullid
431 nullid = self._nodeconstants.nullid
432 # True if we need to fold p2 related state back to a linear case
432 # True if we need to fold p2 related state back to a linear case
433 fold_p2 = oldp2 != nullid and p2 == nullid
433 fold_p2 = oldp2 != nullid and p2 == nullid
434 return self._map.setparents(p1, p2, fold_p2=fold_p2)
434 return self._map.setparents(p1, p2, fold_p2=fold_p2)
435
435
436 def setbranch(self, branch):
436 def setbranch(self, branch):
437 self.__class__._branch.set(self, encoding.fromlocal(branch))
437 self.__class__._branch.set(self, encoding.fromlocal(branch))
438 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
438 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
439 try:
439 try:
440 f.write(self._branch + b'\n')
440 f.write(self._branch + b'\n')
441 f.close()
441 f.close()
442
442
443 # make sure filecache has the correct stat info for _branch after
443 # make sure filecache has the correct stat info for _branch after
444 # replacing the underlying file
444 # replacing the underlying file
445 ce = self._filecache[b'_branch']
445 ce = self._filecache[b'_branch']
446 if ce:
446 if ce:
447 ce.refresh()
447 ce.refresh()
448 except: # re-raises
448 except: # re-raises
449 f.discard()
449 f.discard()
450 raise
450 raise
451
451
452 def invalidate(self):
452 def invalidate(self):
453 """Causes the next access to reread the dirstate.
453 """Causes the next access to reread the dirstate.
454
454
455 This is different from localrepo.invalidatedirstate() because it always
455 This is different from localrepo.invalidatedirstate() because it always
456 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
456 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
457 check whether the dirstate has changed before rereading it."""
457 check whether the dirstate has changed before rereading it."""
458
458
459 for a in ("_map", "_branch", "_ignore"):
459 for a in ("_map", "_branch", "_ignore"):
460 if a in self.__dict__:
460 if a in self.__dict__:
461 delattr(self, a)
461 delattr(self, a)
462 self._dirty = False
462 self._dirty = False
463 self._dirty_tracked_set = False
463 self._dirty_tracked_set = False
464 self._invalidated_context = self._parentwriters > 0
464 self._invalidated_context = self._parentwriters > 0
465 self._origpl = None
465 self._origpl = None
466
466
467 def copy(self, source, dest):
467 def copy(self, source, dest):
468 """Mark dest as a copy of source. Unmark dest if source is None."""
468 """Mark dest as a copy of source. Unmark dest if source is None."""
469 if source == dest:
469 if source == dest:
470 return
470 return
471 self._dirty = True
471 self._dirty = True
472 if source is not None:
472 if source is not None:
473 self._check_sparse(source)
473 self._check_sparse(source)
474 self._map.copymap[dest] = source
474 self._map.copymap[dest] = source
475 else:
475 else:
476 self._map.copymap.pop(dest, None)
476 self._map.copymap.pop(dest, None)
477
477
478 def copied(self, file):
478 def copied(self, file):
479 return self._map.copymap.get(file, None)
479 return self._map.copymap.get(file, None)
480
480
481 def copies(self):
481 def copies(self):
482 return self._map.copymap
482 return self._map.copymap
483
483
484 @requires_not_changing_parents
484 @requires_not_changing_parents
485 def set_tracked(self, filename, reset_copy=False):
485 def set_tracked(self, filename, reset_copy=False):
486 """a "public" method for generic code to mark a file as tracked
486 """a "public" method for generic code to mark a file as tracked
487
487
488 This function is to be called outside of "update/merge" case. For
488 This function is to be called outside of "update/merge" case. For
489 example by a command like `hg add X`.
489 example by a command like `hg add X`.
490
490
491 if reset_copy is set, any existing copy information will be dropped.
491 if reset_copy is set, any existing copy information will be dropped.
492
492
493 return True the file was previously untracked, False otherwise.
493 return True the file was previously untracked, False otherwise.
494 """
494 """
495 self._dirty = True
495 self._dirty = True
496 entry = self._map.get(filename)
496 entry = self._map.get(filename)
497 if entry is None or not entry.tracked:
497 if entry is None or not entry.tracked:
498 self._check_new_tracked_filename(filename)
498 self._check_new_tracked_filename(filename)
499 pre_tracked = self._map.set_tracked(filename)
499 pre_tracked = self._map.set_tracked(filename)
500 if reset_copy:
500 if reset_copy:
501 self._map.copymap.pop(filename, None)
501 self._map.copymap.pop(filename, None)
502 if pre_tracked:
502 if pre_tracked:
503 self._dirty_tracked_set = True
503 self._dirty_tracked_set = True
504 return pre_tracked
504 return pre_tracked
505
505
506 @requires_not_changing_parents
506 @requires_not_changing_parents
507 def set_untracked(self, filename):
507 def set_untracked(self, filename):
508 """a "public" method for generic code to mark a file as untracked
508 """a "public" method for generic code to mark a file as untracked
509
509
510 This function is to be called outside of "update/merge" case. For
510 This function is to be called outside of "update/merge" case. For
511 example by a command like `hg remove X`.
511 example by a command like `hg remove X`.
512
512
513 return True the file was previously tracked, False otherwise.
513 return True the file was previously tracked, False otherwise.
514 """
514 """
515 ret = self._map.set_untracked(filename)
515 ret = self._map.set_untracked(filename)
516 if ret:
516 if ret:
517 self._dirty = True
517 self._dirty = True
518 self._dirty_tracked_set = True
518 self._dirty_tracked_set = True
519 return ret
519 return ret
520
520
521 @requires_not_changing_parents
521 @requires_not_changing_parents
522 def set_clean(self, filename, parentfiledata):
522 def set_clean(self, filename, parentfiledata):
523 """record that the current state of the file on disk is known to be clean"""
523 """record that the current state of the file on disk is known to be clean"""
524 self._dirty = True
524 self._dirty = True
525 if not self._map[filename].tracked:
525 if not self._map[filename].tracked:
526 self._check_new_tracked_filename(filename)
526 self._check_new_tracked_filename(filename)
527 (mode, size, mtime) = parentfiledata
527 (mode, size, mtime) = parentfiledata
528 self._map.set_clean(filename, mode, size, mtime)
528 self._map.set_clean(filename, mode, size, mtime)
529
529
530 @requires_not_changing_parents
530 @requires_not_changing_parents
531 def set_possibly_dirty(self, filename):
531 def set_possibly_dirty(self, filename):
532 """record that the current state of the file on disk is unknown"""
532 """record that the current state of the file on disk is unknown"""
533 self._dirty = True
533 self._dirty = True
534 self._map.set_possibly_dirty(filename)
534 self._map.set_possibly_dirty(filename)
535
535
536 @requires_changing_parents
536 @requires_changing_parents
537 def update_file_p1(
537 def update_file_p1(
538 self,
538 self,
539 filename,
539 filename,
540 p1_tracked,
540 p1_tracked,
541 ):
541 ):
542 """Set a file as tracked in the parent (or not)
542 """Set a file as tracked in the parent (or not)
543
543
544 This is to be called when adjust the dirstate to a new parent after an history
544 This is to be called when adjust the dirstate to a new parent after an history
545 rewriting operation.
545 rewriting operation.
546
546
547 It should not be called during a merge (p2 != nullid) and only within
547 It should not be called during a merge (p2 != nullid) and only within
548 a `with dirstate.changing_parents(repo):` context.
548 a `with dirstate.changing_parents(repo):` context.
549 """
549 """
550 if self.in_merge:
550 if self.in_merge:
551 msg = b'update_file_reference should not be called when merging'
551 msg = b'update_file_reference should not be called when merging'
552 raise error.ProgrammingError(msg)
552 raise error.ProgrammingError(msg)
553 entry = self._map.get(filename)
553 entry = self._map.get(filename)
554 if entry is None:
554 if entry is None:
555 wc_tracked = False
555 wc_tracked = False
556 else:
556 else:
557 wc_tracked = entry.tracked
557 wc_tracked = entry.tracked
558 if not (p1_tracked or wc_tracked):
558 if not (p1_tracked or wc_tracked):
559 # the file is no longer relevant to anyone
559 # the file is no longer relevant to anyone
560 if self._map.get(filename) is not None:
560 if self._map.get(filename) is not None:
561 self._map.reset_state(filename)
561 self._map.reset_state(filename)
562 self._dirty = True
562 self._dirty = True
563 elif (not p1_tracked) and wc_tracked:
563 elif (not p1_tracked) and wc_tracked:
564 if entry is not None and entry.added:
564 if entry is not None and entry.added:
565 return # avoid dropping copy information (maybe?)
565 return # avoid dropping copy information (maybe?)
566
566
567 self._map.reset_state(
567 self._map.reset_state(
568 filename,
568 filename,
569 wc_tracked,
569 wc_tracked,
570 p1_tracked,
570 p1_tracked,
571 # the underlying reference might have changed, we will have to
571 # the underlying reference might have changed, we will have to
572 # check it.
572 # check it.
573 has_meaningful_mtime=False,
573 has_meaningful_mtime=False,
574 )
574 )
575
575
576 @requires_changing_parents
576 @requires_changing_parents
577 def update_file(
577 def update_file(
578 self,
578 self,
579 filename,
579 filename,
580 wc_tracked,
580 wc_tracked,
581 p1_tracked,
581 p1_tracked,
582 p2_info=False,
582 p2_info=False,
583 possibly_dirty=False,
583 possibly_dirty=False,
584 parentfiledata=None,
584 parentfiledata=None,
585 ):
585 ):
586 """update the information about a file in the dirstate
586 """update the information about a file in the dirstate
587
587
588 This is to be called when the direstates parent changes to keep track
588 This is to be called when the direstates parent changes to keep track
589 of what is the file situation in regards to the working copy and its parent.
589 of what is the file situation in regards to the working copy and its parent.
590
590
591 This function must be called within a `dirstate.changing_parents` context.
591 This function must be called within a `dirstate.changing_parents` context.
592
592
593 note: the API is at an early stage and we might need to adjust it
593 note: the API is at an early stage and we might need to adjust it
594 depending of what information ends up being relevant and useful to
594 depending of what information ends up being relevant and useful to
595 other processing.
595 other processing.
596 """
596 """
597
597
598 # note: I do not think we need to double check name clash here since we
598 # note: I do not think we need to double check name clash here since we
599 # are in a update/merge case that should already have taken care of
599 # are in a update/merge case that should already have taken care of
600 # this. The test agrees
600 # this. The test agrees
601
601
602 self._dirty = True
602 self._dirty = True
603 old_entry = self._map.get(filename)
603 old_entry = self._map.get(filename)
604 if old_entry is None:
604 if old_entry is None:
605 prev_tracked = False
605 prev_tracked = False
606 else:
606 else:
607 prev_tracked = old_entry.tracked
607 prev_tracked = old_entry.tracked
608 if prev_tracked != wc_tracked:
608 if prev_tracked != wc_tracked:
609 self._dirty_tracked_set = True
609 self._dirty_tracked_set = True
610
610
611 self._map.reset_state(
611 self._map.reset_state(
612 filename,
612 filename,
613 wc_tracked,
613 wc_tracked,
614 p1_tracked,
614 p1_tracked,
615 p2_info=p2_info,
615 p2_info=p2_info,
616 has_meaningful_mtime=not possibly_dirty,
616 has_meaningful_mtime=not possibly_dirty,
617 parentfiledata=parentfiledata,
617 parentfiledata=parentfiledata,
618 )
618 )
619
619
620 def _check_new_tracked_filename(self, filename):
620 def _check_new_tracked_filename(self, filename):
621 scmutil.checkfilename(filename)
621 scmutil.checkfilename(filename)
622 if self._map.hastrackeddir(filename):
622 if self._map.hastrackeddir(filename):
623 msg = _(b'directory %r already in dirstate')
623 msg = _(b'directory %r already in dirstate')
624 msg %= pycompat.bytestr(filename)
624 msg %= pycompat.bytestr(filename)
625 raise error.Abort(msg)
625 raise error.Abort(msg)
626 # shadows
626 # shadows
627 for d in pathutil.finddirs(filename):
627 for d in pathutil.finddirs(filename):
628 if self._map.hastrackeddir(d):
628 if self._map.hastrackeddir(d):
629 break
629 break
630 entry = self._map.get(d)
630 entry = self._map.get(d)
631 if entry is not None and not entry.removed:
631 if entry is not None and not entry.removed:
632 msg = _(b'file %r in dirstate clashes with %r')
632 msg = _(b'file %r in dirstate clashes with %r')
633 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
633 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
634 raise error.Abort(msg)
634 raise error.Abort(msg)
635 self._check_sparse(filename)
635 self._check_sparse(filename)
636
636
637 def _check_sparse(self, filename):
637 def _check_sparse(self, filename):
638 """Check that a filename is inside the sparse profile"""
638 """Check that a filename is inside the sparse profile"""
639 sparsematch = self._sparsematcher
639 sparsematch = self._sparsematcher
640 if sparsematch is not None and not sparsematch.always():
640 if sparsematch is not None and not sparsematch.always():
641 if not sparsematch(filename):
641 if not sparsematch(filename):
642 msg = _(b"cannot add '%s' - it is outside the sparse checkout")
642 msg = _(b"cannot add '%s' - it is outside the sparse checkout")
643 hint = _(
643 hint = _(
644 b'include file with `hg debugsparse --include <pattern>` or use '
644 b'include file with `hg debugsparse --include <pattern>` or use '
645 b'`hg add -s <file>` to include file directory while adding'
645 b'`hg add -s <file>` to include file directory while adding'
646 )
646 )
647 raise error.Abort(msg % filename, hint=hint)
647 raise error.Abort(msg % filename, hint=hint)
648
648
649 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
649 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
650 if exists is None:
650 if exists is None:
651 exists = os.path.lexists(os.path.join(self._root, path))
651 exists = os.path.lexists(os.path.join(self._root, path))
652 if not exists:
652 if not exists:
653 # Maybe a path component exists
653 # Maybe a path component exists
654 if not ignoremissing and b'/' in path:
654 if not ignoremissing and b'/' in path:
655 d, f = path.rsplit(b'/', 1)
655 d, f = path.rsplit(b'/', 1)
656 d = self._normalize(d, False, ignoremissing, None)
656 d = self._normalize(d, False, ignoremissing, None)
657 folded = d + b"/" + f
657 folded = d + b"/" + f
658 else:
658 else:
659 # No path components, preserve original case
659 # No path components, preserve original case
660 folded = path
660 folded = path
661 else:
661 else:
662 # recursively normalize leading directory components
662 # recursively normalize leading directory components
663 # against dirstate
663 # against dirstate
664 if b'/' in normed:
664 if b'/' in normed:
665 d, f = normed.rsplit(b'/', 1)
665 d, f = normed.rsplit(b'/', 1)
666 d = self._normalize(d, False, ignoremissing, True)
666 d = self._normalize(d, False, ignoremissing, True)
667 r = self._root + b"/" + d
667 r = self._root + b"/" + d
668 folded = d + b"/" + util.fspath(f, r)
668 folded = d + b"/" + util.fspath(f, r)
669 else:
669 else:
670 folded = util.fspath(normed, self._root)
670 folded = util.fspath(normed, self._root)
671 storemap[normed] = folded
671 storemap[normed] = folded
672
672
673 return folded
673 return folded
674
674
675 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
675 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
676 normed = util.normcase(path)
676 normed = util.normcase(path)
677 folded = self._map.filefoldmap.get(normed, None)
677 folded = self._map.filefoldmap.get(normed, None)
678 if folded is None:
678 if folded is None:
679 if isknown:
679 if isknown:
680 folded = path
680 folded = path
681 else:
681 else:
682 folded = self._discoverpath(
682 folded = self._discoverpath(
683 path, normed, ignoremissing, exists, self._map.filefoldmap
683 path, normed, ignoremissing, exists, self._map.filefoldmap
684 )
684 )
685 return folded
685 return folded
686
686
687 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
687 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
688 normed = util.normcase(path)
688 normed = util.normcase(path)
689 folded = self._map.filefoldmap.get(normed, None)
689 folded = self._map.filefoldmap.get(normed, None)
690 if folded is None:
690 if folded is None:
691 folded = self._map.dirfoldmap.get(normed, None)
691 folded = self._map.dirfoldmap.get(normed, None)
692 if folded is None:
692 if folded is None:
693 if isknown:
693 if isknown:
694 folded = path
694 folded = path
695 else:
695 else:
696 # store discovered result in dirfoldmap so that future
696 # store discovered result in dirfoldmap so that future
697 # normalizefile calls don't start matching directories
697 # normalizefile calls don't start matching directories
698 folded = self._discoverpath(
698 folded = self._discoverpath(
699 path, normed, ignoremissing, exists, self._map.dirfoldmap
699 path, normed, ignoremissing, exists, self._map.dirfoldmap
700 )
700 )
701 return folded
701 return folded
702
702
703 def normalize(self, path, isknown=False, ignoremissing=False):
703 def normalize(self, path, isknown=False, ignoremissing=False):
704 """
704 """
705 normalize the case of a pathname when on a casefolding filesystem
705 normalize the case of a pathname when on a casefolding filesystem
706
706
707 isknown specifies whether the filename came from walking the
707 isknown specifies whether the filename came from walking the
708 disk, to avoid extra filesystem access.
708 disk, to avoid extra filesystem access.
709
709
710 If ignoremissing is True, missing path are returned
710 If ignoremissing is True, missing path are returned
711 unchanged. Otherwise, we try harder to normalize possibly
711 unchanged. Otherwise, we try harder to normalize possibly
712 existing path components.
712 existing path components.
713
713
714 The normalized case is determined based on the following precedence:
714 The normalized case is determined based on the following precedence:
715
715
716 - version of name already stored in the dirstate
716 - version of name already stored in the dirstate
717 - version of name stored on disk
717 - version of name stored on disk
718 - version provided via command arguments
718 - version provided via command arguments
719 """
719 """
720
720
721 if self._checkcase:
721 if self._checkcase:
722 return self._normalize(path, isknown, ignoremissing)
722 return self._normalize(path, isknown, ignoremissing)
723 return path
723 return path
724
724
725 def clear(self):
725 def clear(self):
726 self._map.clear()
726 self._map.clear()
727 self._dirty = True
727 self._dirty = True
728
728
729 def rebuild(self, parent, allfiles, changedfiles=None):
729 def rebuild(self, parent, allfiles, changedfiles=None):
730
730
731 matcher = self._sparsematcher
731 matcher = self._sparsematcher
732 if matcher is not None and not matcher.always():
732 if matcher is not None and not matcher.always():
733 # should not add non-matching files
733 # should not add non-matching files
734 allfiles = [f for f in allfiles if matcher(f)]
734 allfiles = [f for f in allfiles if matcher(f)]
735 if changedfiles:
735 if changedfiles:
736 changedfiles = [f for f in changedfiles if matcher(f)]
736 changedfiles = [f for f in changedfiles if matcher(f)]
737
737
738 if changedfiles is not None:
738 if changedfiles is not None:
739 # these files will be deleted from the dirstate when they are
739 # these files will be deleted from the dirstate when they are
740 # not found to be in allfiles
740 # not found to be in allfiles
741 dirstatefilestoremove = {f for f in self if not matcher(f)}
741 dirstatefilestoremove = {f for f in self if not matcher(f)}
742 changedfiles = dirstatefilestoremove.union(changedfiles)
742 changedfiles = dirstatefilestoremove.union(changedfiles)
743
743
744 if changedfiles is None:
744 if changedfiles is None:
745 # Rebuild entire dirstate
745 # Rebuild entire dirstate
746 to_lookup = allfiles
746 to_lookup = allfiles
747 to_drop = []
747 to_drop = []
748 self.clear()
748 self.clear()
749 elif len(changedfiles) < 10:
749 elif len(changedfiles) < 10:
750 # Avoid turning allfiles into a set, which can be expensive if it's
750 # Avoid turning allfiles into a set, which can be expensive if it's
751 # large.
751 # large.
752 to_lookup = []
752 to_lookup = []
753 to_drop = []
753 to_drop = []
754 for f in changedfiles:
754 for f in changedfiles:
755 if f in allfiles:
755 if f in allfiles:
756 to_lookup.append(f)
756 to_lookup.append(f)
757 else:
757 else:
758 to_drop.append(f)
758 to_drop.append(f)
759 else:
759 else:
760 changedfilesset = set(changedfiles)
760 changedfilesset = set(changedfiles)
761 to_lookup = changedfilesset & set(allfiles)
761 to_lookup = changedfilesset & set(allfiles)
762 to_drop = changedfilesset - to_lookup
762 to_drop = changedfilesset - to_lookup
763
763
764 if self._origpl is None:
764 if self._origpl is None:
765 self._origpl = self._pl
765 self._origpl = self._pl
766 self._map.setparents(parent, self._nodeconstants.nullid)
766 self._map.setparents(parent, self._nodeconstants.nullid)
767
767
768 for f in to_lookup:
768 for f in to_lookup:
769
769
770 if self.in_merge:
770 if self.in_merge:
771 self.set_tracked(f)
771 self.set_tracked(f)
772 else:
772 else:
773 self._map.reset_state(
773 self._map.reset_state(
774 f,
774 f,
775 wc_tracked=True,
775 wc_tracked=True,
776 p1_tracked=True,
776 p1_tracked=True,
777 )
777 )
778 for f in to_drop:
778 for f in to_drop:
779 self._map.reset_state(f)
779 self._map.reset_state(f)
780
780
781 self._dirty = True
781 self._dirty = True
782
782
783 def identity(self):
783 def identity(self):
784 """Return identity of dirstate itself to detect changing in storage
784 """Return identity of dirstate itself to detect changing in storage
785
785
786 If identity of previous dirstate is equal to this, writing
786 If identity of previous dirstate is equal to this, writing
787 changes based on the former dirstate out can keep consistency.
787 changes based on the former dirstate out can keep consistency.
788 """
788 """
789 return self._map.identity
789 return self._map.identity
790
790
791 def write(self, tr):
791 def write(self, tr):
792 if not self._dirty:
792 if not self._dirty:
793 return
793 return
794
794
795 write_key = self._use_tracked_hint and self._dirty_tracked_set
795 write_key = self._use_tracked_hint and self._dirty_tracked_set
796 if tr:
796 if tr:
797 # delay writing in-memory changes out
797 # delay writing in-memory changes out
798 tr.addfilegenerator(
798 tr.addfilegenerator(
799 b'dirstate-1-main',
799 b'dirstate-1-main',
800 (self._filename,),
800 (self._filename,),
801 lambda f: self._writedirstate(tr, f),
801 lambda f: self._writedirstate(tr, f),
802 location=b'plain',
802 location=b'plain',
803 post_finalize=True,
803 post_finalize=True,
804 )
804 )
805 if write_key:
805 if write_key:
806 tr.addfilegenerator(
806 tr.addfilegenerator(
807 b'dirstate-2-key-post',
807 b'dirstate-2-key-post',
808 (self._filename_th,),
808 (self._filename_th,),
809 lambda f: self._write_tracked_hint(tr, f),
809 lambda f: self._write_tracked_hint(tr, f),
810 location=b'plain',
810 location=b'plain',
811 post_finalize=True,
811 post_finalize=True,
812 )
812 )
813 return
813 return
814
814
815 file = lambda f: self._opener(f, b"w", atomictemp=True, checkambig=True)
815 file = lambda f: self._opener(f, b"w", atomictemp=True, checkambig=True)
816 with file(self._filename) as f:
816 with file(self._filename) as f:
817 self._writedirstate(tr, f)
817 self._writedirstate(tr, f)
818 if write_key:
818 if write_key:
819 # we update the key-file after writing to make sure reader have a
819 # we update the key-file after writing to make sure reader have a
820 # key that match the newly written content
820 # key that match the newly written content
821 with file(self._filename_th) as f:
821 with file(self._filename_th) as f:
822 self._write_tracked_hint(tr, f)
822 self._write_tracked_hint(tr, f)
823
823
824 def delete_tracked_hint(self):
824 def delete_tracked_hint(self):
825 """remove the tracked_hint file
825 """remove the tracked_hint file
826
826
827 To be used by format downgrades operation"""
827 To be used by format downgrades operation"""
828 self._opener.unlink(self._filename_th)
828 self._opener.unlink(self._filename_th)
829 self._use_tracked_hint = False
829 self._use_tracked_hint = False
830
830
831 def addparentchangecallback(self, category, callback):
831 def addparentchangecallback(self, category, callback):
832 """add a callback to be called when the wd parents are changed
832 """add a callback to be called when the wd parents are changed
833
833
834 Callback will be called with the following arguments:
834 Callback will be called with the following arguments:
835 dirstate, (oldp1, oldp2), (newp1, newp2)
835 dirstate, (oldp1, oldp2), (newp1, newp2)
836
836
837 Category is a unique identifier to allow overwriting an old callback
837 Category is a unique identifier to allow overwriting an old callback
838 with a newer callback.
838 with a newer callback.
839 """
839 """
840 self._plchangecallbacks[category] = callback
840 self._plchangecallbacks[category] = callback
841
841
842 def _writedirstate(self, tr, st):
842 def _writedirstate(self, tr, st):
843 # notify callbacks about parents change
843 # notify callbacks about parents change
844 if self._origpl is not None and self._origpl != self._pl:
844 if self._origpl is not None and self._origpl != self._pl:
845 for c, callback in sorted(self._plchangecallbacks.items()):
845 for c, callback in sorted(self._plchangecallbacks.items()):
846 callback(self, self._origpl, self._pl)
846 callback(self, self._origpl, self._pl)
847 self._origpl = None
847 self._origpl = None
848 self._map.write(tr, st)
848 self._map.write(tr, st)
849 self._dirty = False
849 self._dirty = False
850 self._dirty_tracked_set = False
850 self._dirty_tracked_set = False
851
851
852 def _write_tracked_hint(self, tr, f):
852 def _write_tracked_hint(self, tr, f):
853 key = node.hex(uuid.uuid4().bytes)
853 key = node.hex(uuid.uuid4().bytes)
854 f.write(b"1\n%s\n" % key) # 1 is the format version
854 f.write(b"1\n%s\n" % key) # 1 is the format version
855
855
856 def _dirignore(self, f):
856 def _dirignore(self, f):
857 if self._ignore(f):
857 if self._ignore(f):
858 return True
858 return True
859 for p in pathutil.finddirs(f):
859 for p in pathutil.finddirs(f):
860 if self._ignore(p):
860 if self._ignore(p):
861 return True
861 return True
862 return False
862 return False
863
863
864 def _ignorefiles(self):
864 def _ignorefiles(self):
865 files = []
865 files = []
866 if os.path.exists(self._join(b'.hgignore')):
866 if os.path.exists(self._join(b'.hgignore')):
867 files.append(self._join(b'.hgignore'))
867 files.append(self._join(b'.hgignore'))
868 for name, path in self._ui.configitems(b"ui"):
868 for name, path in self._ui.configitems(b"ui"):
869 if name == b'ignore' or name.startswith(b'ignore.'):
869 if name == b'ignore' or name.startswith(b'ignore.'):
870 # we need to use os.path.join here rather than self._join
870 # we need to use os.path.join here rather than self._join
871 # because path is arbitrary and user-specified
871 # because path is arbitrary and user-specified
872 files.append(os.path.join(self._rootdir, util.expandpath(path)))
872 files.append(os.path.join(self._rootdir, util.expandpath(path)))
873 return files
873 return files
874
874
875 def _ignorefileandline(self, f):
875 def _ignorefileandline(self, f):
876 files = collections.deque(self._ignorefiles())
876 files = collections.deque(self._ignorefiles())
877 visited = set()
877 visited = set()
878 while files:
878 while files:
879 i = files.popleft()
879 i = files.popleft()
880 patterns = matchmod.readpatternfile(
880 patterns = matchmod.readpatternfile(
881 i, self._ui.warn, sourceinfo=True
881 i, self._ui.warn, sourceinfo=True
882 )
882 )
883 for pattern, lineno, line in patterns:
883 for pattern, lineno, line in patterns:
884 kind, p = matchmod._patsplit(pattern, b'glob')
884 kind, p = matchmod._patsplit(pattern, b'glob')
885 if kind == b"subinclude":
885 if kind == b"subinclude":
886 if p not in visited:
886 if p not in visited:
887 files.append(p)
887 files.append(p)
888 continue
888 continue
889 m = matchmod.match(
889 m = matchmod.match(
890 self._root, b'', [], [pattern], warn=self._ui.warn
890 self._root, b'', [], [pattern], warn=self._ui.warn
891 )
891 )
892 if m(f):
892 if m(f):
893 return (i, lineno, line)
893 return (i, lineno, line)
894 visited.add(i)
894 visited.add(i)
895 return (None, -1, b"")
895 return (None, -1, b"")
896
896
897 def _walkexplicit(self, match, subrepos):
897 def _walkexplicit(self, match, subrepos):
898 """Get stat data about the files explicitly specified by match.
898 """Get stat data about the files explicitly specified by match.
899
899
900 Return a triple (results, dirsfound, dirsnotfound).
900 Return a triple (results, dirsfound, dirsnotfound).
901 - results is a mapping from filename to stat result. It also contains
901 - results is a mapping from filename to stat result. It also contains
902 listings mapping subrepos and .hg to None.
902 listings mapping subrepos and .hg to None.
903 - dirsfound is a list of files found to be directories.
903 - dirsfound is a list of files found to be directories.
904 - dirsnotfound is a list of files that the dirstate thinks are
904 - dirsnotfound is a list of files that the dirstate thinks are
905 directories and that were not found."""
905 directories and that were not found."""
906
906
907 def badtype(mode):
907 def badtype(mode):
908 kind = _(b'unknown')
908 kind = _(b'unknown')
909 if stat.S_ISCHR(mode):
909 if stat.S_ISCHR(mode):
910 kind = _(b'character device')
910 kind = _(b'character device')
911 elif stat.S_ISBLK(mode):
911 elif stat.S_ISBLK(mode):
912 kind = _(b'block device')
912 kind = _(b'block device')
913 elif stat.S_ISFIFO(mode):
913 elif stat.S_ISFIFO(mode):
914 kind = _(b'fifo')
914 kind = _(b'fifo')
915 elif stat.S_ISSOCK(mode):
915 elif stat.S_ISSOCK(mode):
916 kind = _(b'socket')
916 kind = _(b'socket')
917 elif stat.S_ISDIR(mode):
917 elif stat.S_ISDIR(mode):
918 kind = _(b'directory')
918 kind = _(b'directory')
919 return _(b'unsupported file type (type is %s)') % kind
919 return _(b'unsupported file type (type is %s)') % kind
920
920
921 badfn = match.bad
921 badfn = match.bad
922 dmap = self._map
922 dmap = self._map
923 lstat = os.lstat
923 lstat = os.lstat
924 getkind = stat.S_IFMT
924 getkind = stat.S_IFMT
925 dirkind = stat.S_IFDIR
925 dirkind = stat.S_IFDIR
926 regkind = stat.S_IFREG
926 regkind = stat.S_IFREG
927 lnkkind = stat.S_IFLNK
927 lnkkind = stat.S_IFLNK
928 join = self._join
928 join = self._join
929 dirsfound = []
929 dirsfound = []
930 foundadd = dirsfound.append
930 foundadd = dirsfound.append
931 dirsnotfound = []
931 dirsnotfound = []
932 notfoundadd = dirsnotfound.append
932 notfoundadd = dirsnotfound.append
933
933
934 if not match.isexact() and self._checkcase:
934 if not match.isexact() and self._checkcase:
935 normalize = self._normalize
935 normalize = self._normalize
936 else:
936 else:
937 normalize = None
937 normalize = None
938
938
939 files = sorted(match.files())
939 files = sorted(match.files())
940 subrepos.sort()
940 subrepos.sort()
941 i, j = 0, 0
941 i, j = 0, 0
942 while i < len(files) and j < len(subrepos):
942 while i < len(files) and j < len(subrepos):
943 subpath = subrepos[j] + b"/"
943 subpath = subrepos[j] + b"/"
944 if files[i] < subpath:
944 if files[i] < subpath:
945 i += 1
945 i += 1
946 continue
946 continue
947 while i < len(files) and files[i].startswith(subpath):
947 while i < len(files) and files[i].startswith(subpath):
948 del files[i]
948 del files[i]
949 j += 1
949 j += 1
950
950
951 if not files or b'' in files:
951 if not files or b'' in files:
952 files = [b'']
952 files = [b'']
953 # constructing the foldmap is expensive, so don't do it for the
953 # constructing the foldmap is expensive, so don't do it for the
954 # common case where files is ['']
954 # common case where files is ['']
955 normalize = None
955 normalize = None
956 results = dict.fromkeys(subrepos)
956 results = dict.fromkeys(subrepos)
957 results[b'.hg'] = None
957 results[b'.hg'] = None
958
958
959 for ff in files:
959 for ff in files:
960 if normalize:
960 if normalize:
961 nf = normalize(ff, False, True)
961 nf = normalize(ff, False, True)
962 else:
962 else:
963 nf = ff
963 nf = ff
964 if nf in results:
964 if nf in results:
965 continue
965 continue
966
966
967 try:
967 try:
968 st = lstat(join(nf))
968 st = lstat(join(nf))
969 kind = getkind(st.st_mode)
969 kind = getkind(st.st_mode)
970 if kind == dirkind:
970 if kind == dirkind:
971 if nf in dmap:
971 if nf in dmap:
972 # file replaced by dir on disk but still in dirstate
972 # file replaced by dir on disk but still in dirstate
973 results[nf] = None
973 results[nf] = None
974 foundadd((nf, ff))
974 foundadd((nf, ff))
975 elif kind == regkind or kind == lnkkind:
975 elif kind == regkind or kind == lnkkind:
976 results[nf] = st
976 results[nf] = st
977 else:
977 else:
978 badfn(ff, badtype(kind))
978 badfn(ff, badtype(kind))
979 if nf in dmap:
979 if nf in dmap:
980 results[nf] = None
980 results[nf] = None
981 except OSError as inst: # nf not found on disk - it is dirstate only
981 except OSError as inst: # nf not found on disk - it is dirstate only
982 if nf in dmap: # does it exactly match a missing file?
982 if nf in dmap: # does it exactly match a missing file?
983 results[nf] = None
983 results[nf] = None
984 else: # does it match a missing directory?
984 else: # does it match a missing directory?
985 if self._map.hasdir(nf):
985 if self._map.hasdir(nf):
986 notfoundadd(nf)
986 notfoundadd(nf)
987 else:
987 else:
988 badfn(ff, encoding.strtolocal(inst.strerror))
988 badfn(ff, encoding.strtolocal(inst.strerror))
989
989
990 # match.files() may contain explicitly-specified paths that shouldn't
990 # match.files() may contain explicitly-specified paths that shouldn't
991 # be taken; drop them from the list of files found. dirsfound/notfound
991 # be taken; drop them from the list of files found. dirsfound/notfound
992 # aren't filtered here because they will be tested later.
992 # aren't filtered here because they will be tested later.
993 if match.anypats():
993 if match.anypats():
994 for f in list(results):
994 for f in list(results):
995 if f == b'.hg' or f in subrepos:
995 if f == b'.hg' or f in subrepos:
996 # keep sentinel to disable further out-of-repo walks
996 # keep sentinel to disable further out-of-repo walks
997 continue
997 continue
998 if not match(f):
998 if not match(f):
999 del results[f]
999 del results[f]
1000
1000
1001 # Case insensitive filesystems cannot rely on lstat() failing to detect
1001 # Case insensitive filesystems cannot rely on lstat() failing to detect
1002 # a case-only rename. Prune the stat object for any file that does not
1002 # a case-only rename. Prune the stat object for any file that does not
1003 # match the case in the filesystem, if there are multiple files that
1003 # match the case in the filesystem, if there are multiple files that
1004 # normalize to the same path.
1004 # normalize to the same path.
1005 if match.isexact() and self._checkcase:
1005 if match.isexact() and self._checkcase:
1006 normed = {}
1006 normed = {}
1007
1007
1008 for f, st in results.items():
1008 for f, st in results.items():
1009 if st is None:
1009 if st is None:
1010 continue
1010 continue
1011
1011
1012 nc = util.normcase(f)
1012 nc = util.normcase(f)
1013 paths = normed.get(nc)
1013 paths = normed.get(nc)
1014
1014
1015 if paths is None:
1015 if paths is None:
1016 paths = set()
1016 paths = set()
1017 normed[nc] = paths
1017 normed[nc] = paths
1018
1018
1019 paths.add(f)
1019 paths.add(f)
1020
1020
1021 for norm, paths in normed.items():
1021 for norm, paths in normed.items():
1022 if len(paths) > 1:
1022 if len(paths) > 1:
1023 for path in paths:
1023 for path in paths:
1024 folded = self._discoverpath(
1024 folded = self._discoverpath(
1025 path, norm, True, None, self._map.dirfoldmap
1025 path, norm, True, None, self._map.dirfoldmap
1026 )
1026 )
1027 if path != folded:
1027 if path != folded:
1028 results[path] = None
1028 results[path] = None
1029
1029
1030 return results, dirsfound, dirsnotfound
1030 return results, dirsfound, dirsnotfound
1031
1031
1032 def walk(self, match, subrepos, unknown, ignored, full=True):
1032 def walk(self, match, subrepos, unknown, ignored, full=True):
1033 """
1033 """
1034 Walk recursively through the directory tree, finding all files
1034 Walk recursively through the directory tree, finding all files
1035 matched by match.
1035 matched by match.
1036
1036
1037 If full is False, maybe skip some known-clean files.
1037 If full is False, maybe skip some known-clean files.
1038
1038
1039 Return a dict mapping filename to stat-like object (either
1039 Return a dict mapping filename to stat-like object (either
1040 mercurial.osutil.stat instance or return value of os.stat()).
1040 mercurial.osutil.stat instance or return value of os.stat()).
1041
1041
1042 """
1042 """
1043 # full is a flag that extensions that hook into walk can use -- this
1043 # full is a flag that extensions that hook into walk can use -- this
1044 # implementation doesn't use it at all. This satisfies the contract
1044 # implementation doesn't use it at all. This satisfies the contract
1045 # because we only guarantee a "maybe".
1045 # because we only guarantee a "maybe".
1046
1046
1047 if ignored:
1047 if ignored:
1048 ignore = util.never
1048 ignore = util.never
1049 dirignore = util.never
1049 dirignore = util.never
1050 elif unknown:
1050 elif unknown:
1051 ignore = self._ignore
1051 ignore = self._ignore
1052 dirignore = self._dirignore
1052 dirignore = self._dirignore
1053 else:
1053 else:
1054 # if not unknown and not ignored, drop dir recursion and step 2
1054 # if not unknown and not ignored, drop dir recursion and step 2
1055 ignore = util.always
1055 ignore = util.always
1056 dirignore = util.always
1056 dirignore = util.always
1057
1057
1058 if self._sparsematchfn is not None:
1058 if self._sparsematchfn is not None:
1059 em = matchmod.exact(match.files())
1059 em = matchmod.exact(match.files())
1060 sm = matchmod.unionmatcher([self._sparsematcher, em])
1060 sm = matchmod.unionmatcher([self._sparsematcher, em])
1061 match = matchmod.intersectmatchers(match, sm)
1061 match = matchmod.intersectmatchers(match, sm)
1062
1062
1063 matchfn = match.matchfn
1063 matchfn = match.matchfn
1064 matchalways = match.always()
1064 matchalways = match.always()
1065 matchtdir = match.traversedir
1065 matchtdir = match.traversedir
1066 dmap = self._map
1066 dmap = self._map
1067 listdir = util.listdir
1067 listdir = util.listdir
1068 lstat = os.lstat
1068 lstat = os.lstat
1069 dirkind = stat.S_IFDIR
1069 dirkind = stat.S_IFDIR
1070 regkind = stat.S_IFREG
1070 regkind = stat.S_IFREG
1071 lnkkind = stat.S_IFLNK
1071 lnkkind = stat.S_IFLNK
1072 join = self._join
1072 join = self._join
1073
1073
1074 exact = skipstep3 = False
1074 exact = skipstep3 = False
1075 if match.isexact(): # match.exact
1075 if match.isexact(): # match.exact
1076 exact = True
1076 exact = True
1077 dirignore = util.always # skip step 2
1077 dirignore = util.always # skip step 2
1078 elif match.prefix(): # match.match, no patterns
1078 elif match.prefix(): # match.match, no patterns
1079 skipstep3 = True
1079 skipstep3 = True
1080
1080
1081 if not exact and self._checkcase:
1081 if not exact and self._checkcase:
1082 normalize = self._normalize
1082 normalize = self._normalize
1083 normalizefile = self._normalizefile
1083 normalizefile = self._normalizefile
1084 skipstep3 = False
1084 skipstep3 = False
1085 else:
1085 else:
1086 normalize = self._normalize
1086 normalize = self._normalize
1087 normalizefile = None
1087 normalizefile = None
1088
1088
1089 # step 1: find all explicit files
1089 # step 1: find all explicit files
1090 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1090 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1091 if matchtdir:
1091 if matchtdir:
1092 for d in work:
1092 for d in work:
1093 matchtdir(d[0])
1093 matchtdir(d[0])
1094 for d in dirsnotfound:
1094 for d in dirsnotfound:
1095 matchtdir(d)
1095 matchtdir(d)
1096
1096
1097 skipstep3 = skipstep3 and not (work or dirsnotfound)
1097 skipstep3 = skipstep3 and not (work or dirsnotfound)
1098 work = [d for d in work if not dirignore(d[0])]
1098 work = [d for d in work if not dirignore(d[0])]
1099
1099
1100 # step 2: visit subdirectories
1100 # step 2: visit subdirectories
1101 def traverse(work, alreadynormed):
1101 def traverse(work, alreadynormed):
1102 wadd = work.append
1102 wadd = work.append
1103 while work:
1103 while work:
1104 tracing.counter('dirstate.walk work', len(work))
1104 tracing.counter('dirstate.walk work', len(work))
1105 nd = work.pop()
1105 nd = work.pop()
1106 visitentries = match.visitchildrenset(nd)
1106 visitentries = match.visitchildrenset(nd)
1107 if not visitentries:
1107 if not visitentries:
1108 continue
1108 continue
1109 if visitentries == b'this' or visitentries == b'all':
1109 if visitentries == b'this' or visitentries == b'all':
1110 visitentries = None
1110 visitentries = None
1111 skip = None
1111 skip = None
1112 if nd != b'':
1112 if nd != b'':
1113 skip = b'.hg'
1113 skip = b'.hg'
1114 try:
1114 try:
1115 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1115 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1116 entries = listdir(join(nd), stat=True, skip=skip)
1116 entries = listdir(join(nd), stat=True, skip=skip)
1117 except (PermissionError, FileNotFoundError) as inst:
1117 except (PermissionError, FileNotFoundError) as inst:
1118 match.bad(
1118 match.bad(
1119 self.pathto(nd), encoding.strtolocal(inst.strerror)
1119 self.pathto(nd), encoding.strtolocal(inst.strerror)
1120 )
1120 )
1121 continue
1121 continue
1122 for f, kind, st in entries:
1122 for f, kind, st in entries:
1123 # Some matchers may return files in the visitentries set,
1123 # Some matchers may return files in the visitentries set,
1124 # instead of 'this', if the matcher explicitly mentions them
1124 # instead of 'this', if the matcher explicitly mentions them
1125 # and is not an exactmatcher. This is acceptable; we do not
1125 # and is not an exactmatcher. This is acceptable; we do not
1126 # make any hard assumptions about file-or-directory below
1126 # make any hard assumptions about file-or-directory below
1127 # based on the presence of `f` in visitentries. If
1127 # based on the presence of `f` in visitentries. If
1128 # visitchildrenset returned a set, we can always skip the
1128 # visitchildrenset returned a set, we can always skip the
1129 # entries *not* in the set it provided regardless of whether
1129 # entries *not* in the set it provided regardless of whether
1130 # they're actually a file or a directory.
1130 # they're actually a file or a directory.
1131 if visitentries and f not in visitentries:
1131 if visitentries and f not in visitentries:
1132 continue
1132 continue
1133 if normalizefile:
1133 if normalizefile:
1134 # even though f might be a directory, we're only
1134 # even though f might be a directory, we're only
1135 # interested in comparing it to files currently in the
1135 # interested in comparing it to files currently in the
1136 # dmap -- therefore normalizefile is enough
1136 # dmap -- therefore normalizefile is enough
1137 nf = normalizefile(
1137 nf = normalizefile(
1138 nd and (nd + b"/" + f) or f, True, True
1138 nd and (nd + b"/" + f) or f, True, True
1139 )
1139 )
1140 else:
1140 else:
1141 nf = nd and (nd + b"/" + f) or f
1141 nf = nd and (nd + b"/" + f) or f
1142 if nf not in results:
1142 if nf not in results:
1143 if kind == dirkind:
1143 if kind == dirkind:
1144 if not ignore(nf):
1144 if not ignore(nf):
1145 if matchtdir:
1145 if matchtdir:
1146 matchtdir(nf)
1146 matchtdir(nf)
1147 wadd(nf)
1147 wadd(nf)
1148 if nf in dmap and (matchalways or matchfn(nf)):
1148 if nf in dmap and (matchalways or matchfn(nf)):
1149 results[nf] = None
1149 results[nf] = None
1150 elif kind == regkind or kind == lnkkind:
1150 elif kind == regkind or kind == lnkkind:
1151 if nf in dmap:
1151 if nf in dmap:
1152 if matchalways or matchfn(nf):
1152 if matchalways or matchfn(nf):
1153 results[nf] = st
1153 results[nf] = st
1154 elif (matchalways or matchfn(nf)) and not ignore(
1154 elif (matchalways or matchfn(nf)) and not ignore(
1155 nf
1155 nf
1156 ):
1156 ):
1157 # unknown file -- normalize if necessary
1157 # unknown file -- normalize if necessary
1158 if not alreadynormed:
1158 if not alreadynormed:
1159 nf = normalize(nf, False, True)
1159 nf = normalize(nf, False, True)
1160 results[nf] = st
1160 results[nf] = st
1161 elif nf in dmap and (matchalways or matchfn(nf)):
1161 elif nf in dmap and (matchalways or matchfn(nf)):
1162 results[nf] = None
1162 results[nf] = None
1163
1163
1164 for nd, d in work:
1164 for nd, d in work:
1165 # alreadynormed means that processwork doesn't have to do any
1165 # alreadynormed means that processwork doesn't have to do any
1166 # expensive directory normalization
1166 # expensive directory normalization
1167 alreadynormed = not normalize or nd == d
1167 alreadynormed = not normalize or nd == d
1168 traverse([d], alreadynormed)
1168 traverse([d], alreadynormed)
1169
1169
1170 for s in subrepos:
1170 for s in subrepos:
1171 del results[s]
1171 del results[s]
1172 del results[b'.hg']
1172 del results[b'.hg']
1173
1173
1174 # step 3: visit remaining files from dmap
1174 # step 3: visit remaining files from dmap
1175 if not skipstep3 and not exact:
1175 if not skipstep3 and not exact:
1176 # If a dmap file is not in results yet, it was either
1176 # If a dmap file is not in results yet, it was either
1177 # a) not matching matchfn b) ignored, c) missing, or d) under a
1177 # a) not matching matchfn b) ignored, c) missing, or d) under a
1178 # symlink directory.
1178 # symlink directory.
1179 if not results and matchalways:
1179 if not results and matchalways:
1180 visit = [f for f in dmap]
1180 visit = [f for f in dmap]
1181 else:
1181 else:
1182 visit = [f for f in dmap if f not in results and matchfn(f)]
1182 visit = [f for f in dmap if f not in results and matchfn(f)]
1183 visit.sort()
1183 visit.sort()
1184
1184
1185 if unknown:
1185 if unknown:
1186 # unknown == True means we walked all dirs under the roots
1186 # unknown == True means we walked all dirs under the roots
1187 # that wasn't ignored, and everything that matched was stat'ed
1187 # that wasn't ignored, and everything that matched was stat'ed
1188 # and is already in results.
1188 # and is already in results.
1189 # The rest must thus be ignored or under a symlink.
1189 # The rest must thus be ignored or under a symlink.
1190 audit_path = pathutil.pathauditor(self._root, cached=True)
1190 audit_path = pathutil.pathauditor(self._root, cached=True)
1191
1191
1192 for nf in iter(visit):
1192 for nf in iter(visit):
1193 # If a stat for the same file was already added with a
1193 # If a stat for the same file was already added with a
1194 # different case, don't add one for this, since that would
1194 # different case, don't add one for this, since that would
1195 # make it appear as if the file exists under both names
1195 # make it appear as if the file exists under both names
1196 # on disk.
1196 # on disk.
1197 if (
1197 if (
1198 normalizefile
1198 normalizefile
1199 and normalizefile(nf, True, True) in results
1199 and normalizefile(nf, True, True) in results
1200 ):
1200 ):
1201 results[nf] = None
1201 results[nf] = None
1202 # Report ignored items in the dmap as long as they are not
1202 # Report ignored items in the dmap as long as they are not
1203 # under a symlink directory.
1203 # under a symlink directory.
1204 elif audit_path.check(nf):
1204 elif audit_path.check(nf):
1205 try:
1205 try:
1206 results[nf] = lstat(join(nf))
1206 results[nf] = lstat(join(nf))
1207 # file was just ignored, no links, and exists
1207 # file was just ignored, no links, and exists
1208 except OSError:
1208 except OSError:
1209 # file doesn't exist
1209 # file doesn't exist
1210 results[nf] = None
1210 results[nf] = None
1211 else:
1211 else:
1212 # It's either missing or under a symlink directory
1212 # It's either missing or under a symlink directory
1213 # which we in this case report as missing
1213 # which we in this case report as missing
1214 results[nf] = None
1214 results[nf] = None
1215 else:
1215 else:
1216 # We may not have walked the full directory tree above,
1216 # We may not have walked the full directory tree above,
1217 # so stat and check everything we missed.
1217 # so stat and check everything we missed.
1218 iv = iter(visit)
1218 iv = iter(visit)
1219 for st in util.statfiles([join(i) for i in visit]):
1219 for st in util.statfiles([join(i) for i in visit]):
1220 results[next(iv)] = st
1220 results[next(iv)] = st
1221 return results
1221 return results
1222
1222
1223 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1223 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1224 if self._sparsematchfn is not None:
1224 if self._sparsematchfn is not None:
1225 em = matchmod.exact(matcher.files())
1225 em = matchmod.exact(matcher.files())
1226 sm = matchmod.unionmatcher([self._sparsematcher, em])
1226 sm = matchmod.unionmatcher([self._sparsematcher, em])
1227 matcher = matchmod.intersectmatchers(matcher, sm)
1227 matcher = matchmod.intersectmatchers(matcher, sm)
1228 # Force Rayon (Rust parallelism library) to respect the number of
1228 # Force Rayon (Rust parallelism library) to respect the number of
1229 # workers. This is a temporary workaround until Rust code knows
1229 # workers. This is a temporary workaround until Rust code knows
1230 # how to read the config file.
1230 # how to read the config file.
1231 numcpus = self._ui.configint(b"worker", b"numcpus")
1231 numcpus = self._ui.configint(b"worker", b"numcpus")
1232 if numcpus is not None:
1232 if numcpus is not None:
1233 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1233 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1234
1234
1235 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1235 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1236 if not workers_enabled:
1236 if not workers_enabled:
1237 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1237 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1238
1238
1239 (
1239 (
1240 lookup,
1240 lookup,
1241 modified,
1241 modified,
1242 added,
1242 added,
1243 removed,
1243 removed,
1244 deleted,
1244 deleted,
1245 clean,
1245 clean,
1246 ignored,
1246 ignored,
1247 unknown,
1247 unknown,
1248 warnings,
1248 warnings,
1249 bad,
1249 bad,
1250 traversed,
1250 traversed,
1251 dirty,
1251 dirty,
1252 ) = rustmod.status(
1252 ) = rustmod.status(
1253 self._map._map,
1253 self._map._map,
1254 matcher,
1254 matcher,
1255 self._rootdir,
1255 self._rootdir,
1256 self._ignorefiles(),
1256 self._ignorefiles(),
1257 self._checkexec,
1257 self._checkexec,
1258 bool(list_clean),
1258 bool(list_clean),
1259 bool(list_ignored),
1259 bool(list_ignored),
1260 bool(list_unknown),
1260 bool(list_unknown),
1261 bool(matcher.traversedir),
1261 bool(matcher.traversedir),
1262 )
1262 )
1263
1263
1264 self._dirty |= dirty
1264 self._dirty |= dirty
1265
1265
1266 if matcher.traversedir:
1266 if matcher.traversedir:
1267 for dir in traversed:
1267 for dir in traversed:
1268 matcher.traversedir(dir)
1268 matcher.traversedir(dir)
1269
1269
1270 if self._ui.warn:
1270 if self._ui.warn:
1271 for item in warnings:
1271 for item in warnings:
1272 if isinstance(item, tuple):
1272 if isinstance(item, tuple):
1273 file_path, syntax = item
1273 file_path, syntax = item
1274 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1274 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1275 file_path,
1275 file_path,
1276 syntax,
1276 syntax,
1277 )
1277 )
1278 self._ui.warn(msg)
1278 self._ui.warn(msg)
1279 else:
1279 else:
1280 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1280 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1281 self._ui.warn(
1281 self._ui.warn(
1282 msg
1282 msg
1283 % (
1283 % (
1284 pathutil.canonpath(
1284 pathutil.canonpath(
1285 self._rootdir, self._rootdir, item
1285 self._rootdir, self._rootdir, item
1286 ),
1286 ),
1287 b"No such file or directory",
1287 b"No such file or directory",
1288 )
1288 )
1289 )
1289 )
1290
1290
1291 for (fn, message) in bad:
1291 for (fn, message) in bad:
1292 matcher.bad(fn, encoding.strtolocal(message))
1292 matcher.bad(fn, encoding.strtolocal(message))
1293
1293
1294 status = scmutil.status(
1294 status = scmutil.status(
1295 modified=modified,
1295 modified=modified,
1296 added=added,
1296 added=added,
1297 removed=removed,
1297 removed=removed,
1298 deleted=deleted,
1298 deleted=deleted,
1299 unknown=unknown,
1299 unknown=unknown,
1300 ignored=ignored,
1300 ignored=ignored,
1301 clean=clean,
1301 clean=clean,
1302 )
1302 )
1303 return (lookup, status)
1303 return (lookup, status)
1304
1304
1305 def status(self, match, subrepos, ignored, clean, unknown):
1305 def status(self, match, subrepos, ignored, clean, unknown):
1306 """Determine the status of the working copy relative to the
1306 """Determine the status of the working copy relative to the
1307 dirstate and return a pair of (unsure, status), where status is of type
1307 dirstate and return a pair of (unsure, status), where status is of type
1308 scmutil.status and:
1308 scmutil.status and:
1309
1309
1310 unsure:
1310 unsure:
1311 files that might have been modified since the dirstate was
1311 files that might have been modified since the dirstate was
1312 written, but need to be read to be sure (size is the same
1312 written, but need to be read to be sure (size is the same
1313 but mtime differs)
1313 but mtime differs)
1314 status.modified:
1314 status.modified:
1315 files that have definitely been modified since the dirstate
1315 files that have definitely been modified since the dirstate
1316 was written (different size or mode)
1316 was written (different size or mode)
1317 status.clean:
1317 status.clean:
1318 files that have definitely not been modified since the
1318 files that have definitely not been modified since the
1319 dirstate was written
1319 dirstate was written
1320 """
1320 """
1321 listignored, listclean, listunknown = ignored, clean, unknown
1321 listignored, listclean, listunknown = ignored, clean, unknown
1322 lookup, modified, added, unknown, ignored = [], [], [], [], []
1322 lookup, modified, added, unknown, ignored = [], [], [], [], []
1323 removed, deleted, clean = [], [], []
1323 removed, deleted, clean = [], [], []
1324
1324
1325 dmap = self._map
1325 dmap = self._map
1326 dmap.preload()
1326 dmap.preload()
1327
1327
1328 use_rust = True
1328 use_rust = True
1329
1329
1330 allowed_matchers = (
1330 allowed_matchers = (
1331 matchmod.alwaysmatcher,
1331 matchmod.alwaysmatcher,
1332 matchmod.differencematcher,
1332 matchmod.differencematcher,
1333 matchmod.exactmatcher,
1333 matchmod.exactmatcher,
1334 matchmod.includematcher,
1334 matchmod.includematcher,
1335 matchmod.intersectionmatcher,
1335 matchmod.intersectionmatcher,
1336 matchmod.nevermatcher,
1336 matchmod.nevermatcher,
1337 matchmod.unionmatcher,
1337 matchmod.unionmatcher,
1338 )
1338 )
1339
1339
1340 if rustmod is None:
1340 if rustmod is None:
1341 use_rust = False
1341 use_rust = False
1342 elif self._checkcase:
1342 elif self._checkcase:
1343 # Case-insensitive filesystems are not handled yet
1343 # Case-insensitive filesystems are not handled yet
1344 use_rust = False
1344 use_rust = False
1345 elif subrepos:
1345 elif subrepos:
1346 use_rust = False
1346 use_rust = False
1347 elif not isinstance(match, allowed_matchers):
1347 elif not isinstance(match, allowed_matchers):
1348 # Some matchers have yet to be implemented
1348 # Some matchers have yet to be implemented
1349 use_rust = False
1349 use_rust = False
1350
1350
1351 # Get the time from the filesystem so we can disambiguate files that
1351 # Get the time from the filesystem so we can disambiguate files that
1352 # appear modified in the present or future.
1352 # appear modified in the present or future.
1353 try:
1353 try:
1354 mtime_boundary = timestamp.get_fs_now(self._opener)
1354 mtime_boundary = timestamp.get_fs_now(self._opener)
1355 except OSError:
1355 except OSError:
1356 # In largefiles or readonly context
1356 # In largefiles or readonly context
1357 mtime_boundary = None
1357 mtime_boundary = None
1358
1358
1359 if use_rust:
1359 if use_rust:
1360 try:
1360 try:
1361 res = self._rust_status(
1361 res = self._rust_status(
1362 match, listclean, listignored, listunknown
1362 match, listclean, listignored, listunknown
1363 )
1363 )
1364 return res + (mtime_boundary,)
1364 return res + (mtime_boundary,)
1365 except rustmod.FallbackError:
1365 except rustmod.FallbackError:
1366 pass
1366 pass
1367
1367
1368 def noop(f):
1368 def noop(f):
1369 pass
1369 pass
1370
1370
1371 dcontains = dmap.__contains__
1371 dcontains = dmap.__contains__
1372 dget = dmap.__getitem__
1372 dget = dmap.__getitem__
1373 ladd = lookup.append # aka "unsure"
1373 ladd = lookup.append # aka "unsure"
1374 madd = modified.append
1374 madd = modified.append
1375 aadd = added.append
1375 aadd = added.append
1376 uadd = unknown.append if listunknown else noop
1376 uadd = unknown.append if listunknown else noop
1377 iadd = ignored.append if listignored else noop
1377 iadd = ignored.append if listignored else noop
1378 radd = removed.append
1378 radd = removed.append
1379 dadd = deleted.append
1379 dadd = deleted.append
1380 cadd = clean.append if listclean else noop
1380 cadd = clean.append if listclean else noop
1381 mexact = match.exact
1381 mexact = match.exact
1382 dirignore = self._dirignore
1382 dirignore = self._dirignore
1383 checkexec = self._checkexec
1383 checkexec = self._checkexec
1384 checklink = self._checklink
1384 checklink = self._checklink
1385 copymap = self._map.copymap
1385 copymap = self._map.copymap
1386
1386
1387 # We need to do full walks when either
1387 # We need to do full walks when either
1388 # - we're listing all clean files, or
1388 # - we're listing all clean files, or
1389 # - match.traversedir does something, because match.traversedir should
1389 # - match.traversedir does something, because match.traversedir should
1390 # be called for every dir in the working dir
1390 # be called for every dir in the working dir
1391 full = listclean or match.traversedir is not None
1391 full = listclean or match.traversedir is not None
1392 for fn, st in self.walk(
1392 for fn, st in self.walk(
1393 match, subrepos, listunknown, listignored, full=full
1393 match, subrepos, listunknown, listignored, full=full
1394 ).items():
1394 ).items():
1395 if not dcontains(fn):
1395 if not dcontains(fn):
1396 if (listignored or mexact(fn)) and dirignore(fn):
1396 if (listignored or mexact(fn)) and dirignore(fn):
1397 if listignored:
1397 if listignored:
1398 iadd(fn)
1398 iadd(fn)
1399 else:
1399 else:
1400 uadd(fn)
1400 uadd(fn)
1401 continue
1401 continue
1402
1402
1403 t = dget(fn)
1403 t = dget(fn)
1404 mode = t.mode
1404 mode = t.mode
1405 size = t.size
1405 size = t.size
1406
1406
1407 if not st and t.tracked:
1407 if not st and t.tracked:
1408 dadd(fn)
1408 dadd(fn)
1409 elif t.p2_info:
1409 elif t.p2_info:
1410 madd(fn)
1410 madd(fn)
1411 elif t.added:
1411 elif t.added:
1412 aadd(fn)
1412 aadd(fn)
1413 elif t.removed:
1413 elif t.removed:
1414 radd(fn)
1414 radd(fn)
1415 elif t.tracked:
1415 elif t.tracked:
1416 if not checklink and t.has_fallback_symlink:
1416 if not checklink and t.has_fallback_symlink:
1417 # If the file system does not support symlink, the mode
1417 # If the file system does not support symlink, the mode
1418 # might not be correctly stored in the dirstate, so do not
1418 # might not be correctly stored in the dirstate, so do not
1419 # trust it.
1419 # trust it.
1420 ladd(fn)
1420 ladd(fn)
1421 elif not checkexec and t.has_fallback_exec:
1421 elif not checkexec and t.has_fallback_exec:
1422 # If the file system does not support exec bits, the mode
1422 # If the file system does not support exec bits, the mode
1423 # might not be correctly stored in the dirstate, so do not
1423 # might not be correctly stored in the dirstate, so do not
1424 # trust it.
1424 # trust it.
1425 ladd(fn)
1425 ladd(fn)
1426 elif (
1426 elif (
1427 size >= 0
1427 size >= 0
1428 and (
1428 and (
1429 (size != st.st_size and size != st.st_size & _rangemask)
1429 (size != st.st_size and size != st.st_size & _rangemask)
1430 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1430 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1431 )
1431 )
1432 or fn in copymap
1432 or fn in copymap
1433 ):
1433 ):
1434 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1434 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1435 # issue6456: Size returned may be longer due to
1435 # issue6456: Size returned may be longer due to
1436 # encryption on EXT-4 fscrypt, undecided.
1436 # encryption on EXT-4 fscrypt, undecided.
1437 ladd(fn)
1437 ladd(fn)
1438 else:
1438 else:
1439 madd(fn)
1439 madd(fn)
1440 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1440 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1441 # There might be a change in the future if for example the
1441 # There might be a change in the future if for example the
1442 # internal clock is off, but this is a case where the issues
1442 # internal clock is off, but this is a case where the issues
1443 # the user would face would be a lot worse and there is
1443 # the user would face would be a lot worse and there is
1444 # nothing we can really do.
1444 # nothing we can really do.
1445 ladd(fn)
1445 ladd(fn)
1446 elif listclean:
1446 elif listclean:
1447 cadd(fn)
1447 cadd(fn)
1448 status = scmutil.status(
1448 status = scmutil.status(
1449 modified, added, removed, deleted, unknown, ignored, clean
1449 modified, added, removed, deleted, unknown, ignored, clean
1450 )
1450 )
1451 return (lookup, status, mtime_boundary)
1451 return (lookup, status, mtime_boundary)
1452
1452
1453 def matches(self, match):
1453 def matches(self, match):
1454 """
1454 """
1455 return files in the dirstate (in whatever state) filtered by match
1455 return files in the dirstate (in whatever state) filtered by match
1456 """
1456 """
1457 dmap = self._map
1457 dmap = self._map
1458 if rustmod is not None:
1458 if rustmod is not None:
1459 dmap = self._map._map
1459 dmap = self._map._map
1460
1460
1461 if match.always():
1461 if match.always():
1462 return dmap.keys()
1462 return dmap.keys()
1463 files = match.files()
1463 files = match.files()
1464 if match.isexact():
1464 if match.isexact():
1465 # fast path -- filter the other way around, since typically files is
1465 # fast path -- filter the other way around, since typically files is
1466 # much smaller than dmap
1466 # much smaller than dmap
1467 return [f for f in files if f in dmap]
1467 return [f for f in files if f in dmap]
1468 if match.prefix() and all(fn in dmap for fn in files):
1468 if match.prefix() and all(fn in dmap for fn in files):
1469 # fast path -- all the values are known to be files, so just return
1469 # fast path -- all the values are known to be files, so just return
1470 # that
1470 # that
1471 return list(files)
1471 return list(files)
1472 return [f for f in dmap if match(f)]
1472 return [f for f in dmap if match(f)]
1473
1473
1474 def _actualfilename(self, tr):
1474 def _actualfilename(self, tr):
1475 if tr:
1475 if tr:
1476 return self._pendingfilename
1476 return self._pendingfilename
1477 else:
1477 else:
1478 return self._filename
1478 return self._filename
1479
1479
1480 def data_backup_filename(self, backupname):
1480 def data_backup_filename(self, backupname):
1481 if not self._use_dirstate_v2:
1481 if not self._use_dirstate_v2:
1482 return None
1482 return None
1483 return backupname + b'.v2-data'
1483 return backupname + b'.v2-data'
1484
1484
1485 def _new_backup_data_filename(self, backupname):
1485 def _new_backup_data_filename(self, backupname):
1486 """return a filename to backup a data-file or None"""
1486 """return a filename to backup a data-file or None"""
1487 if not self._use_dirstate_v2:
1487 if not self._use_dirstate_v2:
1488 return None
1488 return None
1489 if self._map.docket.uuid is None:
1490 # not created yet, nothing to backup
1491 return None
1489 data_filename = self._map.docket.data_filename()
1492 data_filename = self._map.docket.data_filename()
1490 return data_filename, self.data_backup_filename(backupname)
1493 return data_filename, self.data_backup_filename(backupname)
1491
1494
1492 def backup_data_file(self, backupname):
1495 def backup_data_file(self, backupname):
1493 if not self._use_dirstate_v2:
1496 if not self._use_dirstate_v2:
1494 return None
1497 return None
1495 docket = docketmod.DirstateDocket.parse(
1498 docket = docketmod.DirstateDocket.parse(
1496 self._opener.read(backupname),
1499 self._opener.read(backupname),
1497 self._nodeconstants,
1500 self._nodeconstants,
1498 )
1501 )
1499 return self.data_backup_filename(backupname), docket.data_filename()
1502 return self.data_backup_filename(backupname), docket.data_filename()
1500
1503
1501 def savebackup(self, tr, backupname):
1504 def savebackup(self, tr, backupname):
1502 '''Save current dirstate into backup file'''
1505 '''Save current dirstate into backup file'''
1503 filename = self._actualfilename(tr)
1506 filename = self._actualfilename(tr)
1504 assert backupname != filename
1507 assert backupname != filename
1505
1508
1506 # use '_writedirstate' instead of 'write' to write changes certainly,
1509 # use '_writedirstate' instead of 'write' to write changes certainly,
1507 # because the latter omits writing out if transaction is running.
1510 # because the latter omits writing out if transaction is running.
1508 # output file will be used to create backup of dirstate at this point.
1511 # output file will be used to create backup of dirstate at this point.
1509 if self._dirty or not self._opener.exists(filename):
1512 if self._dirty:
1510 self._writedirstate(
1513 self._writedirstate(
1511 tr,
1514 tr,
1512 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1515 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1513 )
1516 )
1514
1517
1515 if tr:
1518 if tr:
1516 # ensure that subsequent tr.writepending returns True for
1519 # ensure that subsequent tr.writepending returns True for
1517 # changes written out above, even if dirstate is never
1520 # changes written out above, even if dirstate is never
1518 # changed after this
1521 # changed after this
1519 tr.addfilegenerator(
1522 tr.addfilegenerator(
1520 b'dirstate-1-main',
1523 b'dirstate-1-main',
1521 (self._filename,),
1524 (self._filename,),
1522 lambda f: self._writedirstate(tr, f),
1525 lambda f: self._writedirstate(tr, f),
1523 location=b'plain',
1526 location=b'plain',
1524 post_finalize=True,
1527 post_finalize=True,
1525 )
1528 )
1526
1529
1527 # ensure that pending file written above is unlinked at
1530 # ensure that pending file written above is unlinked at
1528 # failure, even if tr.writepending isn't invoked until the
1531 # failure, even if tr.writepending isn't invoked until the
1529 # end of this transaction
1532 # end of this transaction
1530 tr.registertmp(filename, location=b'plain')
1533 tr.registertmp(filename, location=b'plain')
1531
1534
1532 self._opener.tryunlink(backupname)
1535 self._opener.tryunlink(backupname)
1533 if True:
1536 if self._opener.exists(filename):
1534 # hardlink backup is okay because _writedirstate is always called
1537 # hardlink backup is okay because _writedirstate is always called
1535 # with an "atomictemp=True" file.
1538 # with an "atomictemp=True" file.
1536 util.copyfile(
1539 util.copyfile(
1537 self._opener.join(filename),
1540 self._opener.join(filename),
1538 self._opener.join(backupname),
1541 self._opener.join(backupname),
1539 hardlink=True,
1542 hardlink=True,
1540 )
1543 )
1541 data_pair = self._new_backup_data_filename(backupname)
1544 data_pair = self._new_backup_data_filename(backupname)
1542 if data_pair is not None:
1545 if data_pair is not None:
1543 data_filename, bck_data_filename = data_pair
1546 data_filename, bck_data_filename = data_pair
1544 util.copyfile(
1547 util.copyfile(
1545 self._opener.join(data_filename),
1548 self._opener.join(data_filename),
1546 self._opener.join(bck_data_filename),
1549 self._opener.join(bck_data_filename),
1547 hardlink=True,
1550 hardlink=True,
1548 )
1551 )
1549 if tr is not None:
1552 if tr is not None:
1550 # ensure that pending file written above is unlinked at
1553 # ensure that pending file written above is unlinked at
1551 # failure, even if tr.writepending isn't invoked until the
1554 # failure, even if tr.writepending isn't invoked until the
1552 # end of this transaction
1555 # end of this transaction
1553 tr.registertmp(bck_data_filename, location=b'plain')
1556 tr.registertmp(bck_data_filename, location=b'plain')
1554
1557
1555 def restorebackup(self, tr, backupname):
1558 def restorebackup(self, tr, backupname):
1556 '''Restore dirstate by backup file'''
1559 '''Restore dirstate by backup file'''
1557 # this "invalidate()" prevents "wlock.release()" from writing
1560 # this "invalidate()" prevents "wlock.release()" from writing
1558 # changes of dirstate out after restoring from backup file
1561 # changes of dirstate out after restoring from backup file
1559 self.invalidate()
1562 self.invalidate()
1563 o = self._opener
1564 if not o.exists(backupname):
1565 # there was no file backup, delete existing files
1566 filename = self._actualfilename(tr)
1567 data_file = None
1568 if self._use_dirstate_v2:
1569 data_file = self._map.docket.data_filename()
1570 if o.exists(filename):
1571 o.unlink(filename)
1572 if data_file is not None and o.exists(data_file):
1573 o.unlink(data_file)
1574 return
1560 filename = self._actualfilename(tr)
1575 filename = self._actualfilename(tr)
1561 o = self._opener
1562 data_pair = self.backup_data_file(backupname)
1576 data_pair = self.backup_data_file(backupname)
1563 if util.samefile(o.join(backupname), o.join(filename)):
1577 if o.exists(filename) and util.samefile(
1578 o.join(backupname), o.join(filename)
1579 ):
1564 o.unlink(backupname)
1580 o.unlink(backupname)
1565 else:
1581 else:
1566 o.rename(backupname, filename, checkambig=True)
1582 o.rename(backupname, filename, checkambig=True)
1567
1583
1568 if data_pair is not None:
1584 if data_pair is not None:
1569 data_backup, target = data_pair
1585 data_backup, target = data_pair
1570 if o.exists(target) and util.samefile(
1586 if o.exists(target) and util.samefile(
1571 o.join(data_backup), o.join(target)
1587 o.join(data_backup), o.join(target)
1572 ):
1588 ):
1573 o.unlink(data_backup)
1589 o.unlink(data_backup)
1574 else:
1590 else:
1575 o.rename(data_backup, target, checkambig=True)
1591 o.rename(data_backup, target, checkambig=True)
1576
1592
1577 def clearbackup(self, tr, backupname):
1593 def clearbackup(self, tr, backupname):
1578 '''Clear backup file'''
1594 '''Clear backup file'''
1579 o = self._opener
1595 o = self._opener
1580 data_backup = self.backup_data_file(backupname)
1596 if o.exists(backupname):
1581 o.unlink(backupname)
1597 data_backup = self.backup_data_file(backupname)
1582
1598 o.unlink(backupname)
1583 if data_backup is not None:
1599 if data_backup is not None:
1584 o.unlink(data_backup[0])
1600 o.unlink(data_backup[0])
1585
1601
1586 def verify(self, m1, m2, p1, narrow_matcher=None):
1602 def verify(self, m1, m2, p1, narrow_matcher=None):
1587 """
1603 """
1588 check the dirstate contents against the parent manifest and yield errors
1604 check the dirstate contents against the parent manifest and yield errors
1589 """
1605 """
1590 missing_from_p1 = _(
1606 missing_from_p1 = _(
1591 b"%s marked as tracked in p1 (%s) but not in manifest1\n"
1607 b"%s marked as tracked in p1 (%s) but not in manifest1\n"
1592 )
1608 )
1593 unexpected_in_p1 = _(b"%s marked as added, but also in manifest1\n")
1609 unexpected_in_p1 = _(b"%s marked as added, but also in manifest1\n")
1594 missing_from_ps = _(
1610 missing_from_ps = _(
1595 b"%s marked as modified, but not in either manifest\n"
1611 b"%s marked as modified, but not in either manifest\n"
1596 )
1612 )
1597 missing_from_ds = _(
1613 missing_from_ds = _(
1598 b"%s in manifest1, but not marked as tracked in p1 (%s)\n"
1614 b"%s in manifest1, but not marked as tracked in p1 (%s)\n"
1599 )
1615 )
1600 for f, entry in self.items():
1616 for f, entry in self.items():
1601 if entry.p1_tracked:
1617 if entry.p1_tracked:
1602 if entry.modified and f not in m1 and f not in m2:
1618 if entry.modified and f not in m1 and f not in m2:
1603 yield missing_from_ps % f
1619 yield missing_from_ps % f
1604 elif f not in m1:
1620 elif f not in m1:
1605 yield missing_from_p1 % (f, node.short(p1))
1621 yield missing_from_p1 % (f, node.short(p1))
1606 if entry.added and f in m1:
1622 if entry.added and f in m1:
1607 yield unexpected_in_p1 % f
1623 yield unexpected_in_p1 % f
1608 for f in m1:
1624 for f in m1:
1609 if narrow_matcher is not None and not narrow_matcher(f):
1625 if narrow_matcher is not None and not narrow_matcher(f):
1610 continue
1626 continue
1611 entry = self.get_entry(f)
1627 entry = self.get_entry(f)
1612 if not entry.p1_tracked:
1628 if not entry.p1_tracked:
1613 yield missing_from_ds % (f, node.short(p1))
1629 yield missing_from_ds % (f, node.short(p1))
@@ -1,50 +1,49 b''
1 Create an empty repo:
1 Create an empty repo:
2
2
3 $ hg init a
3 $ hg init a
4 $ cd a
4 $ cd a
5
5
6 Try some commands:
6 Try some commands:
7
7
8 $ hg log
8 $ hg log
9 $ hg grep wah
9 $ hg grep wah
10 [1]
10 [1]
11 $ hg manifest
11 $ hg manifest
12 $ hg verify -q
12 $ hg verify -q
13
13
14 Check the basic files created:
14 Check the basic files created:
15
15
16 $ ls .hg
16 $ ls .hg
17 00changelog.i
17 00changelog.i
18 cache
18 cache
19 requires
19 requires
20 store
20 store
21 wcache
21 wcache
22
22
23 Should be empty (except for the "basic" requires):
23 Should be empty (except for the "basic" requires):
24
24
25 $ ls .hg/store
25 $ ls .hg/store
26 requires
26 requires
27
27
28 Poke at a clone:
28 Poke at a clone:
29
29
30 $ cd ..
30 $ cd ..
31 $ hg clone a b
31 $ hg clone a b
32 updating to branch default
32 updating to branch default
33 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
33 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
34 $ cd b
34 $ cd b
35 $ hg verify -q
35 $ hg verify -q
36 $ ls .hg
36 $ ls .hg
37 00changelog.i
37 00changelog.i
38 cache
38 cache
39 dirstate
40 hgrc
39 hgrc
41 requires
40 requires
42 store
41 store
43 wcache
42 wcache
44
43
45 Should be empty (except for the "basic" requires):
44 Should be empty (except for the "basic" requires):
46
45
47 $ ls .hg/store
46 $ ls .hg/store
48 requires
47 requires
49
48
50 $ cd ..
49 $ cd ..
@@ -1,186 +1,184 b''
1 #require unix-permissions
1 #require unix-permissions
2
2
3 test that new files created in .hg inherit the permissions from .hg/store
3 test that new files created in .hg inherit the permissions from .hg/store
4
4
5 $ mkdir dir
5 $ mkdir dir
6
6
7 just in case somebody has a strange $TMPDIR
7 just in case somebody has a strange $TMPDIR
8
8
9 $ chmod g-s dir
9 $ chmod g-s dir
10 $ cd dir
10 $ cd dir
11
11
12 $ cat >printmodes.py <<EOF
12 $ cat >printmodes.py <<EOF
13 > import os
13 > import os
14 > import sys
14 > import sys
15 >
15 >
16 > allnames = []
16 > allnames = []
17 > isdir = {}
17 > isdir = {}
18 > for root, dirs, files in os.walk(sys.argv[1]):
18 > for root, dirs, files in os.walk(sys.argv[1]):
19 > for d in dirs:
19 > for d in dirs:
20 > name = os.path.join(root, d)
20 > name = os.path.join(root, d)
21 > isdir[name] = 1
21 > isdir[name] = 1
22 > allnames.append(name)
22 > allnames.append(name)
23 > for f in files:
23 > for f in files:
24 > name = os.path.join(root, f)
24 > name = os.path.join(root, f)
25 > allnames.append(name)
25 > allnames.append(name)
26 > allnames.sort()
26 > allnames.sort()
27 > for name in allnames:
27 > for name in allnames:
28 > suffix = name in isdir and '/' or ''
28 > suffix = name in isdir and '/' or ''
29 > print('%05o %s%s' % (os.lstat(name).st_mode & 0o7777, name, suffix))
29 > print('%05o %s%s' % (os.lstat(name).st_mode & 0o7777, name, suffix))
30 > EOF
30 > EOF
31
31
32 $ cat >mode.py <<EOF
32 $ cat >mode.py <<EOF
33 > import os
33 > import os
34 > import sys
34 > import sys
35 > print('%05o' % os.lstat(sys.argv[1]).st_mode)
35 > print('%05o' % os.lstat(sys.argv[1]).st_mode)
36 > EOF
36 > EOF
37
37
38 $ umask 077
38 $ umask 077
39
39
40 $ hg init repo
40 $ hg init repo
41 $ cd repo
41 $ cd repo
42
42
43 $ chmod 0770 .hg/store .hg/cache .hg/wcache
43 $ chmod 0770 .hg/store .hg/cache .hg/wcache
44
44
45 before commit
45 before commit
46 store can be written by the group, other files cannot
46 store can be written by the group, other files cannot
47 store is setgid
47 store is setgid
48
48
49 $ "$PYTHON" ../printmodes.py .
49 $ "$PYTHON" ../printmodes.py .
50 00700 ./.hg/
50 00700 ./.hg/
51 00600 ./.hg/00changelog.i
51 00600 ./.hg/00changelog.i
52 00770 ./.hg/cache/
52 00770 ./.hg/cache/
53 00600 ./.hg/requires
53 00600 ./.hg/requires
54 00770 ./.hg/store/
54 00770 ./.hg/store/
55 00600 ./.hg/store/requires
55 00600 ./.hg/store/requires
56 00770 ./.hg/wcache/
56 00770 ./.hg/wcache/
57
57
58 $ mkdir dir
58 $ mkdir dir
59 $ touch foo dir/bar
59 $ touch foo dir/bar
60 $ hg ci -qAm 'add files'
60 $ hg ci -qAm 'add files'
61
61
62 after commit
62 after commit
63 working dir files can only be written by the owner
63 working dir files can only be written by the owner
64 files created in .hg can be written by the group
64 files created in .hg can be written by the group
65 (in particular, store/**, dirstate, branch cache file, undo files)
65 (in particular, store/**, dirstate, branch cache file, undo files)
66 new directories are setgid
66 new directories are setgid
67
67
68 $ "$PYTHON" ../printmodes.py .
68 $ "$PYTHON" ../printmodes.py .
69 00700 ./.hg/
69 00700 ./.hg/
70 00600 ./.hg/00changelog.i
70 00600 ./.hg/00changelog.i
71 00770 ./.hg/cache/
71 00770 ./.hg/cache/
72 00660 ./.hg/cache/branch2-served
72 00660 ./.hg/cache/branch2-served
73 00660 ./.hg/cache/rbc-names-v1
73 00660 ./.hg/cache/rbc-names-v1
74 00660 ./.hg/cache/rbc-revs-v1
74 00660 ./.hg/cache/rbc-revs-v1
75 00660 ./.hg/dirstate
75 00660 ./.hg/dirstate
76 00660 ./.hg/fsmonitor.state (fsmonitor !)
76 00660 ./.hg/fsmonitor.state (fsmonitor !)
77 00660 ./.hg/last-message.txt
77 00660 ./.hg/last-message.txt
78 00600 ./.hg/requires
78 00600 ./.hg/requires
79 00770 ./.hg/store/
79 00770 ./.hg/store/
80 00660 ./.hg/store/00changelog.i
80 00660 ./.hg/store/00changelog.i
81 00660 ./.hg/store/00manifest.i
81 00660 ./.hg/store/00manifest.i
82 00770 ./.hg/store/data/
82 00770 ./.hg/store/data/
83 00770 ./.hg/store/data/dir/
83 00770 ./.hg/store/data/dir/
84 00660 ./.hg/store/data/dir/bar.i (reporevlogstore !)
84 00660 ./.hg/store/data/dir/bar.i (reporevlogstore !)
85 00660 ./.hg/store/data/foo.i (reporevlogstore !)
85 00660 ./.hg/store/data/foo.i (reporevlogstore !)
86 00770 ./.hg/store/data/dir/bar/ (reposimplestore !)
86 00770 ./.hg/store/data/dir/bar/ (reposimplestore !)
87 00660 ./.hg/store/data/dir/bar/b80de5d138758541c5f05265ad144ab9fa86d1db (reposimplestore !)
87 00660 ./.hg/store/data/dir/bar/b80de5d138758541c5f05265ad144ab9fa86d1db (reposimplestore !)
88 00660 ./.hg/store/data/dir/bar/index (reposimplestore !)
88 00660 ./.hg/store/data/dir/bar/index (reposimplestore !)
89 00770 ./.hg/store/data/foo/ (reposimplestore !)
89 00770 ./.hg/store/data/foo/ (reposimplestore !)
90 00660 ./.hg/store/data/foo/b80de5d138758541c5f05265ad144ab9fa86d1db (reposimplestore !)
90 00660 ./.hg/store/data/foo/b80de5d138758541c5f05265ad144ab9fa86d1db (reposimplestore !)
91 00660 ./.hg/store/data/foo/index (reposimplestore !)
91 00660 ./.hg/store/data/foo/index (reposimplestore !)
92 00660 ./.hg/store/fncache (repofncache !)
92 00660 ./.hg/store/fncache (repofncache !)
93 00660 ./.hg/store/phaseroots
93 00660 ./.hg/store/phaseroots
94 00600 ./.hg/store/requires
94 00600 ./.hg/store/requires
95 00660 ./.hg/store/undo
95 00660 ./.hg/store/undo
96 00660 ./.hg/store/undo.backupfiles
96 00660 ./.hg/store/undo.backupfiles
97 00660 ./.hg/store/undo.phaseroots
97 00660 ./.hg/store/undo.phaseroots
98 00660 ./.hg/undo.backup.dirstate
98 00660 ./.hg/undo.backup.dirstate
99 00660 ./.hg/undo.bookmarks
99 00660 ./.hg/undo.bookmarks
100 00660 ./.hg/undo.branch
100 00660 ./.hg/undo.branch
101 00660 ./.hg/undo.desc
101 00660 ./.hg/undo.desc
102 00660 ./.hg/undo.dirstate
102 00660 ./.hg/undo.dirstate
103 00770 ./.hg/wcache/
103 00770 ./.hg/wcache/
104 00711 ./.hg/wcache/checkisexec
104 00711 ./.hg/wcache/checkisexec
105 007.. ./.hg/wcache/checklink (re)
105 007.. ./.hg/wcache/checklink (re)
106 00600 ./.hg/wcache/checklink-target
106 00600 ./.hg/wcache/checklink-target
107 00660 ./.hg/wcache/manifestfulltextcache (reporevlogstore !)
107 00660 ./.hg/wcache/manifestfulltextcache (reporevlogstore !)
108 00700 ./dir/
108 00700 ./dir/
109 00600 ./dir/bar
109 00600 ./dir/bar
110 00600 ./foo
110 00600 ./foo
111
111
112 $ umask 007
112 $ umask 007
113 $ hg init ../push
113 $ hg init ../push
114
114
115 before push
115 before push
116 group can write everything
116 group can write everything
117
117
118 $ "$PYTHON" ../printmodes.py ../push
118 $ "$PYTHON" ../printmodes.py ../push
119 00770 ../push/.hg/
119 00770 ../push/.hg/
120 00660 ../push/.hg/00changelog.i
120 00660 ../push/.hg/00changelog.i
121 00770 ../push/.hg/cache/
121 00770 ../push/.hg/cache/
122 00660 ../push/.hg/requires
122 00660 ../push/.hg/requires
123 00770 ../push/.hg/store/
123 00770 ../push/.hg/store/
124 00660 ../push/.hg/store/requires
124 00660 ../push/.hg/store/requires
125 00770 ../push/.hg/wcache/
125 00770 ../push/.hg/wcache/
126
126
127 $ umask 077
127 $ umask 077
128 $ hg -q push ../push
128 $ hg -q push ../push
129
129
130 after push
130 after push
131 group can still write everything
131 group can still write everything
132
132
133 $ "$PYTHON" ../printmodes.py ../push
133 $ "$PYTHON" ../printmodes.py ../push
134 00770 ../push/.hg/
134 00770 ../push/.hg/
135 00660 ../push/.hg/00changelog.i
135 00660 ../push/.hg/00changelog.i
136 00770 ../push/.hg/cache/
136 00770 ../push/.hg/cache/
137 00660 ../push/.hg/cache/branch2-base
137 00660 ../push/.hg/cache/branch2-base
138 00660 ../push/.hg/cache/rbc-names-v1
138 00660 ../push/.hg/cache/rbc-names-v1
139 00660 ../push/.hg/cache/rbc-revs-v1
139 00660 ../push/.hg/cache/rbc-revs-v1
140 00660 ../push/.hg/dirstate
141 00660 ../push/.hg/requires
140 00660 ../push/.hg/requires
142 00770 ../push/.hg/store/
141 00770 ../push/.hg/store/
143 00660 ../push/.hg/store/00changelog.i
142 00660 ../push/.hg/store/00changelog.i
144 00660 ../push/.hg/store/00manifest.i
143 00660 ../push/.hg/store/00manifest.i
145 00770 ../push/.hg/store/data/
144 00770 ../push/.hg/store/data/
146 00770 ../push/.hg/store/data/dir/
145 00770 ../push/.hg/store/data/dir/
147 00660 ../push/.hg/store/data/dir/bar.i (reporevlogstore !)
146 00660 ../push/.hg/store/data/dir/bar.i (reporevlogstore !)
148 00660 ../push/.hg/store/data/foo.i (reporevlogstore !)
147 00660 ../push/.hg/store/data/foo.i (reporevlogstore !)
149 00770 ../push/.hg/store/data/dir/bar/ (reposimplestore !)
148 00770 ../push/.hg/store/data/dir/bar/ (reposimplestore !)
150 00660 ../push/.hg/store/data/dir/bar/b80de5d138758541c5f05265ad144ab9fa86d1db (reposimplestore !)
149 00660 ../push/.hg/store/data/dir/bar/b80de5d138758541c5f05265ad144ab9fa86d1db (reposimplestore !)
151 00660 ../push/.hg/store/data/dir/bar/index (reposimplestore !)
150 00660 ../push/.hg/store/data/dir/bar/index (reposimplestore !)
152 00770 ../push/.hg/store/data/foo/ (reposimplestore !)
151 00770 ../push/.hg/store/data/foo/ (reposimplestore !)
153 00660 ../push/.hg/store/data/foo/b80de5d138758541c5f05265ad144ab9fa86d1db (reposimplestore !)
152 00660 ../push/.hg/store/data/foo/b80de5d138758541c5f05265ad144ab9fa86d1db (reposimplestore !)
154 00660 ../push/.hg/store/data/foo/index (reposimplestore !)
153 00660 ../push/.hg/store/data/foo/index (reposimplestore !)
155 00660 ../push/.hg/store/fncache (repofncache !)
154 00660 ../push/.hg/store/fncache (repofncache !)
156 00660 ../push/.hg/store/requires
155 00660 ../push/.hg/store/requires
157 00660 ../push/.hg/store/undo
156 00660 ../push/.hg/store/undo
158 00660 ../push/.hg/store/undo.backupfiles
157 00660 ../push/.hg/store/undo.backupfiles
159 00660 ../push/.hg/store/undo.phaseroots
158 00660 ../push/.hg/store/undo.phaseroots
160 00660 ../push/.hg/undo.bookmarks
159 00660 ../push/.hg/undo.bookmarks
161 00660 ../push/.hg/undo.branch
160 00660 ../push/.hg/undo.branch
162 00660 ../push/.hg/undo.desc
161 00660 ../push/.hg/undo.desc
163 00660 ../push/.hg/undo.dirstate
164 00770 ../push/.hg/wcache/
162 00770 ../push/.hg/wcache/
165
163
166
164
167 Test that we don't lose the setgid bit when we call chmod.
165 Test that we don't lose the setgid bit when we call chmod.
168 Not all systems support setgid directories (e.g. HFS+), so
166 Not all systems support setgid directories (e.g. HFS+), so
169 just check that directories have the same mode.
167 just check that directories have the same mode.
170
168
171 $ cd ..
169 $ cd ..
172 $ hg init setgid
170 $ hg init setgid
173 $ cd setgid
171 $ cd setgid
174 $ chmod g+rwx .hg/store
172 $ chmod g+rwx .hg/store
175 $ chmod g+s .hg/store 2> /dev/null || true
173 $ chmod g+s .hg/store 2> /dev/null || true
176 $ mkdir dir
174 $ mkdir dir
177 $ touch dir/file
175 $ touch dir/file
178 $ hg ci -qAm 'add dir/file'
176 $ hg ci -qAm 'add dir/file'
179 $ storemode=`"$PYTHON" ../mode.py .hg/store`
177 $ storemode=`"$PYTHON" ../mode.py .hg/store`
180 $ dirmode=`"$PYTHON" ../mode.py .hg/store/data/dir`
178 $ dirmode=`"$PYTHON" ../mode.py .hg/store/data/dir`
181 $ if [ "$storemode" != "$dirmode" ]; then
179 $ if [ "$storemode" != "$dirmode" ]; then
182 > echo "$storemode != $dirmode"
180 > echo "$storemode != $dirmode"
183 > fi
181 > fi
184 $ cd ..
182 $ cd ..
185
183
186 $ cd .. # g-s dir
184 $ cd .. # g-s dir
General Comments 0
You need to be logged in to leave comments. Login now