##// END OF EJS Templates
dirstate: rename the filegenerator used for writing...
marmoute -
r49532:111f5a0c default
parent child Browse files
Show More
@@ -1,1430 +1,1430 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .dirstateutils import (
34 from .dirstateutils import (
35 timestamp,
35 timestamp,
36 )
36 )
37
37
38 from .interfaces import (
38 from .interfaces import (
39 dirstate as intdirstate,
39 dirstate as intdirstate,
40 util as interfaceutil,
40 util as interfaceutil,
41 )
41 )
42
42
43 parsers = policy.importmod('parsers')
43 parsers = policy.importmod('parsers')
44 rustmod = policy.importrust('dirstate')
44 rustmod = policy.importrust('dirstate')
45
45
46 HAS_FAST_DIRSTATE_V2 = rustmod is not None
46 HAS_FAST_DIRSTATE_V2 = rustmod is not None
47
47
48 propertycache = util.propertycache
48 propertycache = util.propertycache
49 filecache = scmutil.filecache
49 filecache = scmutil.filecache
50 _rangemask = dirstatemap.rangemask
50 _rangemask = dirstatemap.rangemask
51
51
52 DirstateItem = dirstatemap.DirstateItem
52 DirstateItem = dirstatemap.DirstateItem
53
53
54
54
55 class repocache(filecache):
55 class repocache(filecache):
56 """filecache for files in .hg/"""
56 """filecache for files in .hg/"""
57
57
58 def join(self, obj, fname):
58 def join(self, obj, fname):
59 return obj._opener.join(fname)
59 return obj._opener.join(fname)
60
60
61
61
62 class rootcache(filecache):
62 class rootcache(filecache):
63 """filecache for files in the repository root"""
63 """filecache for files in the repository root"""
64
64
65 def join(self, obj, fname):
65 def join(self, obj, fname):
66 return obj._join(fname)
66 return obj._join(fname)
67
67
68
68
69 def requires_parents_change(func):
69 def requires_parents_change(func):
70 def wrap(self, *args, **kwargs):
70 def wrap(self, *args, **kwargs):
71 if not self.pendingparentchange():
71 if not self.pendingparentchange():
72 msg = 'calling `%s` outside of a parentchange context'
72 msg = 'calling `%s` outside of a parentchange context'
73 msg %= func.__name__
73 msg %= func.__name__
74 raise error.ProgrammingError(msg)
74 raise error.ProgrammingError(msg)
75 return func(self, *args, **kwargs)
75 return func(self, *args, **kwargs)
76
76
77 return wrap
77 return wrap
78
78
79
79
80 def requires_no_parents_change(func):
80 def requires_no_parents_change(func):
81 def wrap(self, *args, **kwargs):
81 def wrap(self, *args, **kwargs):
82 if self.pendingparentchange():
82 if self.pendingparentchange():
83 msg = 'calling `%s` inside of a parentchange context'
83 msg = 'calling `%s` inside of a parentchange context'
84 msg %= func.__name__
84 msg %= func.__name__
85 raise error.ProgrammingError(msg)
85 raise error.ProgrammingError(msg)
86 return func(self, *args, **kwargs)
86 return func(self, *args, **kwargs)
87
87
88 return wrap
88 return wrap
89
89
90
90
91 @interfaceutil.implementer(intdirstate.idirstate)
91 @interfaceutil.implementer(intdirstate.idirstate)
92 class dirstate(object):
92 class dirstate(object):
93 def __init__(
93 def __init__(
94 self,
94 self,
95 opener,
95 opener,
96 ui,
96 ui,
97 root,
97 root,
98 validate,
98 validate,
99 sparsematchfn,
99 sparsematchfn,
100 nodeconstants,
100 nodeconstants,
101 use_dirstate_v2,
101 use_dirstate_v2,
102 ):
102 ):
103 """Create a new dirstate object.
103 """Create a new dirstate object.
104
104
105 opener is an open()-like callable that can be used to open the
105 opener is an open()-like callable that can be used to open the
106 dirstate file; root is the root of the directory tracked by
106 dirstate file; root is the root of the directory tracked by
107 the dirstate.
107 the dirstate.
108 """
108 """
109 self._use_dirstate_v2 = use_dirstate_v2
109 self._use_dirstate_v2 = use_dirstate_v2
110 self._nodeconstants = nodeconstants
110 self._nodeconstants = nodeconstants
111 self._opener = opener
111 self._opener = opener
112 self._validate = validate
112 self._validate = validate
113 self._root = root
113 self._root = root
114 self._sparsematchfn = sparsematchfn
114 self._sparsematchfn = sparsematchfn
115 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
115 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
116 # UNC path pointing to root share (issue4557)
116 # UNC path pointing to root share (issue4557)
117 self._rootdir = pathutil.normasprefix(root)
117 self._rootdir = pathutil.normasprefix(root)
118 self._dirty = False
118 self._dirty = False
119 self._ui = ui
119 self._ui = ui
120 self._filecache = {}
120 self._filecache = {}
121 self._parentwriters = 0
121 self._parentwriters = 0
122 self._filename = b'dirstate'
122 self._filename = b'dirstate'
123 self._pendingfilename = b'%s.pending' % self._filename
123 self._pendingfilename = b'%s.pending' % self._filename
124 self._plchangecallbacks = {}
124 self._plchangecallbacks = {}
125 self._origpl = None
125 self._origpl = None
126 self._mapcls = dirstatemap.dirstatemap
126 self._mapcls = dirstatemap.dirstatemap
127 # Access and cache cwd early, so we don't access it for the first time
127 # Access and cache cwd early, so we don't access it for the first time
128 # after a working-copy update caused it to not exist (accessing it then
128 # after a working-copy update caused it to not exist (accessing it then
129 # raises an exception).
129 # raises an exception).
130 self._cwd
130 self._cwd
131
131
132 def prefetch_parents(self):
132 def prefetch_parents(self):
133 """make sure the parents are loaded
133 """make sure the parents are loaded
134
134
135 Used to avoid a race condition.
135 Used to avoid a race condition.
136 """
136 """
137 self._pl
137 self._pl
138
138
139 @contextlib.contextmanager
139 @contextlib.contextmanager
140 def parentchange(self):
140 def parentchange(self):
141 """Context manager for handling dirstate parents.
141 """Context manager for handling dirstate parents.
142
142
143 If an exception occurs in the scope of the context manager,
143 If an exception occurs in the scope of the context manager,
144 the incoherent dirstate won't be written when wlock is
144 the incoherent dirstate won't be written when wlock is
145 released.
145 released.
146 """
146 """
147 self._parentwriters += 1
147 self._parentwriters += 1
148 yield
148 yield
149 # Typically we want the "undo" step of a context manager in a
149 # Typically we want the "undo" step of a context manager in a
150 # finally block so it happens even when an exception
150 # finally block so it happens even when an exception
151 # occurs. In this case, however, we only want to decrement
151 # occurs. In this case, however, we only want to decrement
152 # parentwriters if the code in the with statement exits
152 # parentwriters if the code in the with statement exits
153 # normally, so we don't have a try/finally here on purpose.
153 # normally, so we don't have a try/finally here on purpose.
154 self._parentwriters -= 1
154 self._parentwriters -= 1
155
155
156 def pendingparentchange(self):
156 def pendingparentchange(self):
157 """Returns true if the dirstate is in the middle of a set of changes
157 """Returns true if the dirstate is in the middle of a set of changes
158 that modify the dirstate parent.
158 that modify the dirstate parent.
159 """
159 """
160 return self._parentwriters > 0
160 return self._parentwriters > 0
161
161
162 @propertycache
162 @propertycache
163 def _map(self):
163 def _map(self):
164 """Return the dirstate contents (see documentation for dirstatemap)."""
164 """Return the dirstate contents (see documentation for dirstatemap)."""
165 self._map = self._mapcls(
165 self._map = self._mapcls(
166 self._ui,
166 self._ui,
167 self._opener,
167 self._opener,
168 self._root,
168 self._root,
169 self._nodeconstants,
169 self._nodeconstants,
170 self._use_dirstate_v2,
170 self._use_dirstate_v2,
171 )
171 )
172 return self._map
172 return self._map
173
173
174 @property
174 @property
175 def _sparsematcher(self):
175 def _sparsematcher(self):
176 """The matcher for the sparse checkout.
176 """The matcher for the sparse checkout.
177
177
178 The working directory may not include every file from a manifest. The
178 The working directory may not include every file from a manifest. The
179 matcher obtained by this property will match a path if it is to be
179 matcher obtained by this property will match a path if it is to be
180 included in the working directory.
180 included in the working directory.
181 """
181 """
182 # TODO there is potential to cache this property. For now, the matcher
182 # TODO there is potential to cache this property. For now, the matcher
183 # is resolved on every access. (But the called function does use a
183 # is resolved on every access. (But the called function does use a
184 # cache to keep the lookup fast.)
184 # cache to keep the lookup fast.)
185 return self._sparsematchfn()
185 return self._sparsematchfn()
186
186
187 @repocache(b'branch')
187 @repocache(b'branch')
188 def _branch(self):
188 def _branch(self):
189 try:
189 try:
190 return self._opener.read(b"branch").strip() or b"default"
190 return self._opener.read(b"branch").strip() or b"default"
191 except IOError as inst:
191 except IOError as inst:
192 if inst.errno != errno.ENOENT:
192 if inst.errno != errno.ENOENT:
193 raise
193 raise
194 return b"default"
194 return b"default"
195
195
196 @property
196 @property
197 def _pl(self):
197 def _pl(self):
198 return self._map.parents()
198 return self._map.parents()
199
199
200 def hasdir(self, d):
200 def hasdir(self, d):
201 return self._map.hastrackeddir(d)
201 return self._map.hastrackeddir(d)
202
202
203 @rootcache(b'.hgignore')
203 @rootcache(b'.hgignore')
204 def _ignore(self):
204 def _ignore(self):
205 files = self._ignorefiles()
205 files = self._ignorefiles()
206 if not files:
206 if not files:
207 return matchmod.never()
207 return matchmod.never()
208
208
209 pats = [b'include:%s' % f for f in files]
209 pats = [b'include:%s' % f for f in files]
210 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
210 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
211
211
212 @propertycache
212 @propertycache
213 def _slash(self):
213 def _slash(self):
214 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
214 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
215
215
216 @propertycache
216 @propertycache
217 def _checklink(self):
217 def _checklink(self):
218 return util.checklink(self._root)
218 return util.checklink(self._root)
219
219
220 @propertycache
220 @propertycache
221 def _checkexec(self):
221 def _checkexec(self):
222 return bool(util.checkexec(self._root))
222 return bool(util.checkexec(self._root))
223
223
224 @propertycache
224 @propertycache
225 def _checkcase(self):
225 def _checkcase(self):
226 return not util.fscasesensitive(self._join(b'.hg'))
226 return not util.fscasesensitive(self._join(b'.hg'))
227
227
228 def _join(self, f):
228 def _join(self, f):
229 # much faster than os.path.join()
229 # much faster than os.path.join()
230 # it's safe because f is always a relative path
230 # it's safe because f is always a relative path
231 return self._rootdir + f
231 return self._rootdir + f
232
232
233 def flagfunc(self, buildfallback):
233 def flagfunc(self, buildfallback):
234 """build a callable that returns flags associated with a filename
234 """build a callable that returns flags associated with a filename
235
235
236 The information is extracted from three possible layers:
236 The information is extracted from three possible layers:
237 1. the file system if it supports the information
237 1. the file system if it supports the information
238 2. the "fallback" information stored in the dirstate if any
238 2. the "fallback" information stored in the dirstate if any
239 3. a more expensive mechanism inferring the flags from the parents.
239 3. a more expensive mechanism inferring the flags from the parents.
240 """
240 """
241
241
242 # small hack to cache the result of buildfallback()
242 # small hack to cache the result of buildfallback()
243 fallback_func = []
243 fallback_func = []
244
244
245 def get_flags(x):
245 def get_flags(x):
246 entry = None
246 entry = None
247 fallback_value = None
247 fallback_value = None
248 try:
248 try:
249 st = os.lstat(self._join(x))
249 st = os.lstat(self._join(x))
250 except OSError:
250 except OSError:
251 return b''
251 return b''
252
252
253 if self._checklink:
253 if self._checklink:
254 if util.statislink(st):
254 if util.statislink(st):
255 return b'l'
255 return b'l'
256 else:
256 else:
257 entry = self.get_entry(x)
257 entry = self.get_entry(x)
258 if entry.has_fallback_symlink:
258 if entry.has_fallback_symlink:
259 if entry.fallback_symlink:
259 if entry.fallback_symlink:
260 return b'l'
260 return b'l'
261 else:
261 else:
262 if not fallback_func:
262 if not fallback_func:
263 fallback_func.append(buildfallback())
263 fallback_func.append(buildfallback())
264 fallback_value = fallback_func[0](x)
264 fallback_value = fallback_func[0](x)
265 if b'l' in fallback_value:
265 if b'l' in fallback_value:
266 return b'l'
266 return b'l'
267
267
268 if self._checkexec:
268 if self._checkexec:
269 if util.statisexec(st):
269 if util.statisexec(st):
270 return b'x'
270 return b'x'
271 else:
271 else:
272 if entry is None:
272 if entry is None:
273 entry = self.get_entry(x)
273 entry = self.get_entry(x)
274 if entry.has_fallback_exec:
274 if entry.has_fallback_exec:
275 if entry.fallback_exec:
275 if entry.fallback_exec:
276 return b'x'
276 return b'x'
277 else:
277 else:
278 if fallback_value is None:
278 if fallback_value is None:
279 if not fallback_func:
279 if not fallback_func:
280 fallback_func.append(buildfallback())
280 fallback_func.append(buildfallback())
281 fallback_value = fallback_func[0](x)
281 fallback_value = fallback_func[0](x)
282 if b'x' in fallback_value:
282 if b'x' in fallback_value:
283 return b'x'
283 return b'x'
284 return b''
284 return b''
285
285
286 return get_flags
286 return get_flags
287
287
288 @propertycache
288 @propertycache
289 def _cwd(self):
289 def _cwd(self):
290 # internal config: ui.forcecwd
290 # internal config: ui.forcecwd
291 forcecwd = self._ui.config(b'ui', b'forcecwd')
291 forcecwd = self._ui.config(b'ui', b'forcecwd')
292 if forcecwd:
292 if forcecwd:
293 return forcecwd
293 return forcecwd
294 return encoding.getcwd()
294 return encoding.getcwd()
295
295
296 def getcwd(self):
296 def getcwd(self):
297 """Return the path from which a canonical path is calculated.
297 """Return the path from which a canonical path is calculated.
298
298
299 This path should be used to resolve file patterns or to convert
299 This path should be used to resolve file patterns or to convert
300 canonical paths back to file paths for display. It shouldn't be
300 canonical paths back to file paths for display. It shouldn't be
301 used to get real file paths. Use vfs functions instead.
301 used to get real file paths. Use vfs functions instead.
302 """
302 """
303 cwd = self._cwd
303 cwd = self._cwd
304 if cwd == self._root:
304 if cwd == self._root:
305 return b''
305 return b''
306 # self._root ends with a path separator if self._root is '/' or 'C:\'
306 # self._root ends with a path separator if self._root is '/' or 'C:\'
307 rootsep = self._root
307 rootsep = self._root
308 if not util.endswithsep(rootsep):
308 if not util.endswithsep(rootsep):
309 rootsep += pycompat.ossep
309 rootsep += pycompat.ossep
310 if cwd.startswith(rootsep):
310 if cwd.startswith(rootsep):
311 return cwd[len(rootsep) :]
311 return cwd[len(rootsep) :]
312 else:
312 else:
313 # we're outside the repo. return an absolute path.
313 # we're outside the repo. return an absolute path.
314 return cwd
314 return cwd
315
315
316 def pathto(self, f, cwd=None):
316 def pathto(self, f, cwd=None):
317 if cwd is None:
317 if cwd is None:
318 cwd = self.getcwd()
318 cwd = self.getcwd()
319 path = util.pathto(self._root, cwd, f)
319 path = util.pathto(self._root, cwd, f)
320 if self._slash:
320 if self._slash:
321 return util.pconvert(path)
321 return util.pconvert(path)
322 return path
322 return path
323
323
324 def get_entry(self, path):
324 def get_entry(self, path):
325 """return a DirstateItem for the associated path"""
325 """return a DirstateItem for the associated path"""
326 entry = self._map.get(path)
326 entry = self._map.get(path)
327 if entry is None:
327 if entry is None:
328 return DirstateItem()
328 return DirstateItem()
329 return entry
329 return entry
330
330
331 def __contains__(self, key):
331 def __contains__(self, key):
332 return key in self._map
332 return key in self._map
333
333
334 def __iter__(self):
334 def __iter__(self):
335 return iter(sorted(self._map))
335 return iter(sorted(self._map))
336
336
337 def items(self):
337 def items(self):
338 return pycompat.iteritems(self._map)
338 return pycompat.iteritems(self._map)
339
339
340 iteritems = items
340 iteritems = items
341
341
342 def parents(self):
342 def parents(self):
343 return [self._validate(p) for p in self._pl]
343 return [self._validate(p) for p in self._pl]
344
344
345 def p1(self):
345 def p1(self):
346 return self._validate(self._pl[0])
346 return self._validate(self._pl[0])
347
347
348 def p2(self):
348 def p2(self):
349 return self._validate(self._pl[1])
349 return self._validate(self._pl[1])
350
350
351 @property
351 @property
352 def in_merge(self):
352 def in_merge(self):
353 """True if a merge is in progress"""
353 """True if a merge is in progress"""
354 return self._pl[1] != self._nodeconstants.nullid
354 return self._pl[1] != self._nodeconstants.nullid
355
355
356 def branch(self):
356 def branch(self):
357 return encoding.tolocal(self._branch)
357 return encoding.tolocal(self._branch)
358
358
359 def setparents(self, p1, p2=None):
359 def setparents(self, p1, p2=None):
360 """Set dirstate parents to p1 and p2.
360 """Set dirstate parents to p1 and p2.
361
361
362 When moving from two parents to one, "merged" entries a
362 When moving from two parents to one, "merged" entries a
363 adjusted to normal and previous copy records discarded and
363 adjusted to normal and previous copy records discarded and
364 returned by the call.
364 returned by the call.
365
365
366 See localrepo.setparents()
366 See localrepo.setparents()
367 """
367 """
368 if p2 is None:
368 if p2 is None:
369 p2 = self._nodeconstants.nullid
369 p2 = self._nodeconstants.nullid
370 if self._parentwriters == 0:
370 if self._parentwriters == 0:
371 raise ValueError(
371 raise ValueError(
372 b"cannot set dirstate parent outside of "
372 b"cannot set dirstate parent outside of "
373 b"dirstate.parentchange context manager"
373 b"dirstate.parentchange context manager"
374 )
374 )
375
375
376 self._dirty = True
376 self._dirty = True
377 oldp2 = self._pl[1]
377 oldp2 = self._pl[1]
378 if self._origpl is None:
378 if self._origpl is None:
379 self._origpl = self._pl
379 self._origpl = self._pl
380 nullid = self._nodeconstants.nullid
380 nullid = self._nodeconstants.nullid
381 # True if we need to fold p2 related state back to a linear case
381 # True if we need to fold p2 related state back to a linear case
382 fold_p2 = oldp2 != nullid and p2 == nullid
382 fold_p2 = oldp2 != nullid and p2 == nullid
383 return self._map.setparents(p1, p2, fold_p2=fold_p2)
383 return self._map.setparents(p1, p2, fold_p2=fold_p2)
384
384
385 def setbranch(self, branch):
385 def setbranch(self, branch):
386 self.__class__._branch.set(self, encoding.fromlocal(branch))
386 self.__class__._branch.set(self, encoding.fromlocal(branch))
387 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
387 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
388 try:
388 try:
389 f.write(self._branch + b'\n')
389 f.write(self._branch + b'\n')
390 f.close()
390 f.close()
391
391
392 # make sure filecache has the correct stat info for _branch after
392 # make sure filecache has the correct stat info for _branch after
393 # replacing the underlying file
393 # replacing the underlying file
394 ce = self._filecache[b'_branch']
394 ce = self._filecache[b'_branch']
395 if ce:
395 if ce:
396 ce.refresh()
396 ce.refresh()
397 except: # re-raises
397 except: # re-raises
398 f.discard()
398 f.discard()
399 raise
399 raise
400
400
401 def invalidate(self):
401 def invalidate(self):
402 """Causes the next access to reread the dirstate.
402 """Causes the next access to reread the dirstate.
403
403
404 This is different from localrepo.invalidatedirstate() because it always
404 This is different from localrepo.invalidatedirstate() because it always
405 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
405 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
406 check whether the dirstate has changed before rereading it."""
406 check whether the dirstate has changed before rereading it."""
407
407
408 for a in ("_map", "_branch", "_ignore"):
408 for a in ("_map", "_branch", "_ignore"):
409 if a in self.__dict__:
409 if a in self.__dict__:
410 delattr(self, a)
410 delattr(self, a)
411 self._dirty = False
411 self._dirty = False
412 self._parentwriters = 0
412 self._parentwriters = 0
413 self._origpl = None
413 self._origpl = None
414
414
415 def copy(self, source, dest):
415 def copy(self, source, dest):
416 """Mark dest as a copy of source. Unmark dest if source is None."""
416 """Mark dest as a copy of source. Unmark dest if source is None."""
417 if source == dest:
417 if source == dest:
418 return
418 return
419 self._dirty = True
419 self._dirty = True
420 if source is not None:
420 if source is not None:
421 self._map.copymap[dest] = source
421 self._map.copymap[dest] = source
422 else:
422 else:
423 self._map.copymap.pop(dest, None)
423 self._map.copymap.pop(dest, None)
424
424
425 def copied(self, file):
425 def copied(self, file):
426 return self._map.copymap.get(file, None)
426 return self._map.copymap.get(file, None)
427
427
428 def copies(self):
428 def copies(self):
429 return self._map.copymap
429 return self._map.copymap
430
430
431 @requires_no_parents_change
431 @requires_no_parents_change
432 def set_tracked(self, filename, reset_copy=False):
432 def set_tracked(self, filename, reset_copy=False):
433 """a "public" method for generic code to mark a file as tracked
433 """a "public" method for generic code to mark a file as tracked
434
434
435 This function is to be called outside of "update/merge" case. For
435 This function is to be called outside of "update/merge" case. For
436 example by a command like `hg add X`.
436 example by a command like `hg add X`.
437
437
438 if reset_copy is set, any existing copy information will be dropped.
438 if reset_copy is set, any existing copy information will be dropped.
439
439
440 return True the file was previously untracked, False otherwise.
440 return True the file was previously untracked, False otherwise.
441 """
441 """
442 self._dirty = True
442 self._dirty = True
443 entry = self._map.get(filename)
443 entry = self._map.get(filename)
444 if entry is None or not entry.tracked:
444 if entry is None or not entry.tracked:
445 self._check_new_tracked_filename(filename)
445 self._check_new_tracked_filename(filename)
446 pre_tracked = self._map.set_tracked(filename)
446 pre_tracked = self._map.set_tracked(filename)
447 if reset_copy:
447 if reset_copy:
448 self._map.copymap.pop(filename, None)
448 self._map.copymap.pop(filename, None)
449 return pre_tracked
449 return pre_tracked
450
450
451 @requires_no_parents_change
451 @requires_no_parents_change
452 def set_untracked(self, filename):
452 def set_untracked(self, filename):
453 """a "public" method for generic code to mark a file as untracked
453 """a "public" method for generic code to mark a file as untracked
454
454
455 This function is to be called outside of "update/merge" case. For
455 This function is to be called outside of "update/merge" case. For
456 example by a command like `hg remove X`.
456 example by a command like `hg remove X`.
457
457
458 return True the file was previously tracked, False otherwise.
458 return True the file was previously tracked, False otherwise.
459 """
459 """
460 ret = self._map.set_untracked(filename)
460 ret = self._map.set_untracked(filename)
461 if ret:
461 if ret:
462 self._dirty = True
462 self._dirty = True
463 return ret
463 return ret
464
464
465 @requires_no_parents_change
465 @requires_no_parents_change
466 def set_clean(self, filename, parentfiledata):
466 def set_clean(self, filename, parentfiledata):
467 """record that the current state of the file on disk is known to be clean"""
467 """record that the current state of the file on disk is known to be clean"""
468 self._dirty = True
468 self._dirty = True
469 if not self._map[filename].tracked:
469 if not self._map[filename].tracked:
470 self._check_new_tracked_filename(filename)
470 self._check_new_tracked_filename(filename)
471 (mode, size, mtime) = parentfiledata
471 (mode, size, mtime) = parentfiledata
472 self._map.set_clean(filename, mode, size, mtime)
472 self._map.set_clean(filename, mode, size, mtime)
473
473
474 @requires_no_parents_change
474 @requires_no_parents_change
475 def set_possibly_dirty(self, filename):
475 def set_possibly_dirty(self, filename):
476 """record that the current state of the file on disk is unknown"""
476 """record that the current state of the file on disk is unknown"""
477 self._dirty = True
477 self._dirty = True
478 self._map.set_possibly_dirty(filename)
478 self._map.set_possibly_dirty(filename)
479
479
480 @requires_parents_change
480 @requires_parents_change
481 def update_file_p1(
481 def update_file_p1(
482 self,
482 self,
483 filename,
483 filename,
484 p1_tracked,
484 p1_tracked,
485 ):
485 ):
486 """Set a file as tracked in the parent (or not)
486 """Set a file as tracked in the parent (or not)
487
487
488 This is to be called when adjust the dirstate to a new parent after an history
488 This is to be called when adjust the dirstate to a new parent after an history
489 rewriting operation.
489 rewriting operation.
490
490
491 It should not be called during a merge (p2 != nullid) and only within
491 It should not be called during a merge (p2 != nullid) and only within
492 a `with dirstate.parentchange():` context.
492 a `with dirstate.parentchange():` context.
493 """
493 """
494 if self.in_merge:
494 if self.in_merge:
495 msg = b'update_file_reference should not be called when merging'
495 msg = b'update_file_reference should not be called when merging'
496 raise error.ProgrammingError(msg)
496 raise error.ProgrammingError(msg)
497 entry = self._map.get(filename)
497 entry = self._map.get(filename)
498 if entry is None:
498 if entry is None:
499 wc_tracked = False
499 wc_tracked = False
500 else:
500 else:
501 wc_tracked = entry.tracked
501 wc_tracked = entry.tracked
502 if not (p1_tracked or wc_tracked):
502 if not (p1_tracked or wc_tracked):
503 # the file is no longer relevant to anyone
503 # the file is no longer relevant to anyone
504 if self._map.get(filename) is not None:
504 if self._map.get(filename) is not None:
505 self._map.reset_state(filename)
505 self._map.reset_state(filename)
506 self._dirty = True
506 self._dirty = True
507 elif (not p1_tracked) and wc_tracked:
507 elif (not p1_tracked) and wc_tracked:
508 if entry is not None and entry.added:
508 if entry is not None and entry.added:
509 return # avoid dropping copy information (maybe?)
509 return # avoid dropping copy information (maybe?)
510
510
511 self._map.reset_state(
511 self._map.reset_state(
512 filename,
512 filename,
513 wc_tracked,
513 wc_tracked,
514 p1_tracked,
514 p1_tracked,
515 # the underlying reference might have changed, we will have to
515 # the underlying reference might have changed, we will have to
516 # check it.
516 # check it.
517 has_meaningful_mtime=False,
517 has_meaningful_mtime=False,
518 )
518 )
519
519
520 @requires_parents_change
520 @requires_parents_change
521 def update_file(
521 def update_file(
522 self,
522 self,
523 filename,
523 filename,
524 wc_tracked,
524 wc_tracked,
525 p1_tracked,
525 p1_tracked,
526 p2_info=False,
526 p2_info=False,
527 possibly_dirty=False,
527 possibly_dirty=False,
528 parentfiledata=None,
528 parentfiledata=None,
529 ):
529 ):
530 """update the information about a file in the dirstate
530 """update the information about a file in the dirstate
531
531
532 This is to be called when the direstates parent changes to keep track
532 This is to be called when the direstates parent changes to keep track
533 of what is the file situation in regards to the working copy and its parent.
533 of what is the file situation in regards to the working copy and its parent.
534
534
535 This function must be called within a `dirstate.parentchange` context.
535 This function must be called within a `dirstate.parentchange` context.
536
536
537 note: the API is at an early stage and we might need to adjust it
537 note: the API is at an early stage and we might need to adjust it
538 depending of what information ends up being relevant and useful to
538 depending of what information ends up being relevant and useful to
539 other processing.
539 other processing.
540 """
540 """
541
541
542 # note: I do not think we need to double check name clash here since we
542 # note: I do not think we need to double check name clash here since we
543 # are in a update/merge case that should already have taken care of
543 # are in a update/merge case that should already have taken care of
544 # this. The test agrees
544 # this. The test agrees
545
545
546 self._dirty = True
546 self._dirty = True
547
547
548 self._map.reset_state(
548 self._map.reset_state(
549 filename,
549 filename,
550 wc_tracked,
550 wc_tracked,
551 p1_tracked,
551 p1_tracked,
552 p2_info=p2_info,
552 p2_info=p2_info,
553 has_meaningful_mtime=not possibly_dirty,
553 has_meaningful_mtime=not possibly_dirty,
554 parentfiledata=parentfiledata,
554 parentfiledata=parentfiledata,
555 )
555 )
556
556
557 def _check_new_tracked_filename(self, filename):
557 def _check_new_tracked_filename(self, filename):
558 scmutil.checkfilename(filename)
558 scmutil.checkfilename(filename)
559 if self._map.hastrackeddir(filename):
559 if self._map.hastrackeddir(filename):
560 msg = _(b'directory %r already in dirstate')
560 msg = _(b'directory %r already in dirstate')
561 msg %= pycompat.bytestr(filename)
561 msg %= pycompat.bytestr(filename)
562 raise error.Abort(msg)
562 raise error.Abort(msg)
563 # shadows
563 # shadows
564 for d in pathutil.finddirs(filename):
564 for d in pathutil.finddirs(filename):
565 if self._map.hastrackeddir(d):
565 if self._map.hastrackeddir(d):
566 break
566 break
567 entry = self._map.get(d)
567 entry = self._map.get(d)
568 if entry is not None and not entry.removed:
568 if entry is not None and not entry.removed:
569 msg = _(b'file %r in dirstate clashes with %r')
569 msg = _(b'file %r in dirstate clashes with %r')
570 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
570 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
571 raise error.Abort(msg)
571 raise error.Abort(msg)
572
572
573 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
573 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
574 if exists is None:
574 if exists is None:
575 exists = os.path.lexists(os.path.join(self._root, path))
575 exists = os.path.lexists(os.path.join(self._root, path))
576 if not exists:
576 if not exists:
577 # Maybe a path component exists
577 # Maybe a path component exists
578 if not ignoremissing and b'/' in path:
578 if not ignoremissing and b'/' in path:
579 d, f = path.rsplit(b'/', 1)
579 d, f = path.rsplit(b'/', 1)
580 d = self._normalize(d, False, ignoremissing, None)
580 d = self._normalize(d, False, ignoremissing, None)
581 folded = d + b"/" + f
581 folded = d + b"/" + f
582 else:
582 else:
583 # No path components, preserve original case
583 # No path components, preserve original case
584 folded = path
584 folded = path
585 else:
585 else:
586 # recursively normalize leading directory components
586 # recursively normalize leading directory components
587 # against dirstate
587 # against dirstate
588 if b'/' in normed:
588 if b'/' in normed:
589 d, f = normed.rsplit(b'/', 1)
589 d, f = normed.rsplit(b'/', 1)
590 d = self._normalize(d, False, ignoremissing, True)
590 d = self._normalize(d, False, ignoremissing, True)
591 r = self._root + b"/" + d
591 r = self._root + b"/" + d
592 folded = d + b"/" + util.fspath(f, r)
592 folded = d + b"/" + util.fspath(f, r)
593 else:
593 else:
594 folded = util.fspath(normed, self._root)
594 folded = util.fspath(normed, self._root)
595 storemap[normed] = folded
595 storemap[normed] = folded
596
596
597 return folded
597 return folded
598
598
599 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
599 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
600 normed = util.normcase(path)
600 normed = util.normcase(path)
601 folded = self._map.filefoldmap.get(normed, None)
601 folded = self._map.filefoldmap.get(normed, None)
602 if folded is None:
602 if folded is None:
603 if isknown:
603 if isknown:
604 folded = path
604 folded = path
605 else:
605 else:
606 folded = self._discoverpath(
606 folded = self._discoverpath(
607 path, normed, ignoremissing, exists, self._map.filefoldmap
607 path, normed, ignoremissing, exists, self._map.filefoldmap
608 )
608 )
609 return folded
609 return folded
610
610
611 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
611 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
612 normed = util.normcase(path)
612 normed = util.normcase(path)
613 folded = self._map.filefoldmap.get(normed, None)
613 folded = self._map.filefoldmap.get(normed, None)
614 if folded is None:
614 if folded is None:
615 folded = self._map.dirfoldmap.get(normed, None)
615 folded = self._map.dirfoldmap.get(normed, None)
616 if folded is None:
616 if folded is None:
617 if isknown:
617 if isknown:
618 folded = path
618 folded = path
619 else:
619 else:
620 # store discovered result in dirfoldmap so that future
620 # store discovered result in dirfoldmap so that future
621 # normalizefile calls don't start matching directories
621 # normalizefile calls don't start matching directories
622 folded = self._discoverpath(
622 folded = self._discoverpath(
623 path, normed, ignoremissing, exists, self._map.dirfoldmap
623 path, normed, ignoremissing, exists, self._map.dirfoldmap
624 )
624 )
625 return folded
625 return folded
626
626
627 def normalize(self, path, isknown=False, ignoremissing=False):
627 def normalize(self, path, isknown=False, ignoremissing=False):
628 """
628 """
629 normalize the case of a pathname when on a casefolding filesystem
629 normalize the case of a pathname when on a casefolding filesystem
630
630
631 isknown specifies whether the filename came from walking the
631 isknown specifies whether the filename came from walking the
632 disk, to avoid extra filesystem access.
632 disk, to avoid extra filesystem access.
633
633
634 If ignoremissing is True, missing path are returned
634 If ignoremissing is True, missing path are returned
635 unchanged. Otherwise, we try harder to normalize possibly
635 unchanged. Otherwise, we try harder to normalize possibly
636 existing path components.
636 existing path components.
637
637
638 The normalized case is determined based on the following precedence:
638 The normalized case is determined based on the following precedence:
639
639
640 - version of name already stored in the dirstate
640 - version of name already stored in the dirstate
641 - version of name stored on disk
641 - version of name stored on disk
642 - version provided via command arguments
642 - version provided via command arguments
643 """
643 """
644
644
645 if self._checkcase:
645 if self._checkcase:
646 return self._normalize(path, isknown, ignoremissing)
646 return self._normalize(path, isknown, ignoremissing)
647 return path
647 return path
648
648
649 def clear(self):
649 def clear(self):
650 self._map.clear()
650 self._map.clear()
651 self._dirty = True
651 self._dirty = True
652
652
653 def rebuild(self, parent, allfiles, changedfiles=None):
653 def rebuild(self, parent, allfiles, changedfiles=None):
654 if changedfiles is None:
654 if changedfiles is None:
655 # Rebuild entire dirstate
655 # Rebuild entire dirstate
656 to_lookup = allfiles
656 to_lookup = allfiles
657 to_drop = []
657 to_drop = []
658 self.clear()
658 self.clear()
659 elif len(changedfiles) < 10:
659 elif len(changedfiles) < 10:
660 # Avoid turning allfiles into a set, which can be expensive if it's
660 # Avoid turning allfiles into a set, which can be expensive if it's
661 # large.
661 # large.
662 to_lookup = []
662 to_lookup = []
663 to_drop = []
663 to_drop = []
664 for f in changedfiles:
664 for f in changedfiles:
665 if f in allfiles:
665 if f in allfiles:
666 to_lookup.append(f)
666 to_lookup.append(f)
667 else:
667 else:
668 to_drop.append(f)
668 to_drop.append(f)
669 else:
669 else:
670 changedfilesset = set(changedfiles)
670 changedfilesset = set(changedfiles)
671 to_lookup = changedfilesset & set(allfiles)
671 to_lookup = changedfilesset & set(allfiles)
672 to_drop = changedfilesset - to_lookup
672 to_drop = changedfilesset - to_lookup
673
673
674 if self._origpl is None:
674 if self._origpl is None:
675 self._origpl = self._pl
675 self._origpl = self._pl
676 self._map.setparents(parent, self._nodeconstants.nullid)
676 self._map.setparents(parent, self._nodeconstants.nullid)
677
677
678 for f in to_lookup:
678 for f in to_lookup:
679
679
680 if self.in_merge:
680 if self.in_merge:
681 self.set_tracked(f)
681 self.set_tracked(f)
682 else:
682 else:
683 self._map.reset_state(
683 self._map.reset_state(
684 f,
684 f,
685 wc_tracked=True,
685 wc_tracked=True,
686 p1_tracked=True,
686 p1_tracked=True,
687 )
687 )
688 for f in to_drop:
688 for f in to_drop:
689 self._map.reset_state(f)
689 self._map.reset_state(f)
690
690
691 self._dirty = True
691 self._dirty = True
692
692
693 def identity(self):
693 def identity(self):
694 """Return identity of dirstate itself to detect changing in storage
694 """Return identity of dirstate itself to detect changing in storage
695
695
696 If identity of previous dirstate is equal to this, writing
696 If identity of previous dirstate is equal to this, writing
697 changes based on the former dirstate out can keep consistency.
697 changes based on the former dirstate out can keep consistency.
698 """
698 """
699 return self._map.identity
699 return self._map.identity
700
700
701 def write(self, tr):
701 def write(self, tr):
702 if not self._dirty:
702 if not self._dirty:
703 return
703 return
704
704
705 filename = self._filename
705 filename = self._filename
706 if tr:
706 if tr:
707 # delay writing in-memory changes out
707 # delay writing in-memory changes out
708 tr.addfilegenerator(
708 tr.addfilegenerator(
709 b'dirstate',
709 b'dirstate-1-main',
710 (self._filename,),
710 (self._filename,),
711 lambda f: self._writedirstate(tr, f),
711 lambda f: self._writedirstate(tr, f),
712 location=b'plain',
712 location=b'plain',
713 )
713 )
714 return
714 return
715
715
716 file = lambda f: self._opener(f, b"w", atomictemp=True, checkambig=True)
716 file = lambda f: self._opener(f, b"w", atomictemp=True, checkambig=True)
717 with file(self._filename) as f:
717 with file(self._filename) as f:
718 self._writedirstate(tr, f)
718 self._writedirstate(tr, f)
719
719
720 def addparentchangecallback(self, category, callback):
720 def addparentchangecallback(self, category, callback):
721 """add a callback to be called when the wd parents are changed
721 """add a callback to be called when the wd parents are changed
722
722
723 Callback will be called with the following arguments:
723 Callback will be called with the following arguments:
724 dirstate, (oldp1, oldp2), (newp1, newp2)
724 dirstate, (oldp1, oldp2), (newp1, newp2)
725
725
726 Category is a unique identifier to allow overwriting an old callback
726 Category is a unique identifier to allow overwriting an old callback
727 with a newer callback.
727 with a newer callback.
728 """
728 """
729 self._plchangecallbacks[category] = callback
729 self._plchangecallbacks[category] = callback
730
730
731 def _writedirstate(self, tr, st):
731 def _writedirstate(self, tr, st):
732 # notify callbacks about parents change
732 # notify callbacks about parents change
733 if self._origpl is not None and self._origpl != self._pl:
733 if self._origpl is not None and self._origpl != self._pl:
734 for c, callback in sorted(
734 for c, callback in sorted(
735 pycompat.iteritems(self._plchangecallbacks)
735 pycompat.iteritems(self._plchangecallbacks)
736 ):
736 ):
737 callback(self, self._origpl, self._pl)
737 callback(self, self._origpl, self._pl)
738 self._origpl = None
738 self._origpl = None
739
739
740 self._map.write(tr, st)
740 self._map.write(tr, st)
741 self._dirty = False
741 self._dirty = False
742
742
743 def _dirignore(self, f):
743 def _dirignore(self, f):
744 if self._ignore(f):
744 if self._ignore(f):
745 return True
745 return True
746 for p in pathutil.finddirs(f):
746 for p in pathutil.finddirs(f):
747 if self._ignore(p):
747 if self._ignore(p):
748 return True
748 return True
749 return False
749 return False
750
750
751 def _ignorefiles(self):
751 def _ignorefiles(self):
752 files = []
752 files = []
753 if os.path.exists(self._join(b'.hgignore')):
753 if os.path.exists(self._join(b'.hgignore')):
754 files.append(self._join(b'.hgignore'))
754 files.append(self._join(b'.hgignore'))
755 for name, path in self._ui.configitems(b"ui"):
755 for name, path in self._ui.configitems(b"ui"):
756 if name == b'ignore' or name.startswith(b'ignore.'):
756 if name == b'ignore' or name.startswith(b'ignore.'):
757 # we need to use os.path.join here rather than self._join
757 # we need to use os.path.join here rather than self._join
758 # because path is arbitrary and user-specified
758 # because path is arbitrary and user-specified
759 files.append(os.path.join(self._rootdir, util.expandpath(path)))
759 files.append(os.path.join(self._rootdir, util.expandpath(path)))
760 return files
760 return files
761
761
762 def _ignorefileandline(self, f):
762 def _ignorefileandline(self, f):
763 files = collections.deque(self._ignorefiles())
763 files = collections.deque(self._ignorefiles())
764 visited = set()
764 visited = set()
765 while files:
765 while files:
766 i = files.popleft()
766 i = files.popleft()
767 patterns = matchmod.readpatternfile(
767 patterns = matchmod.readpatternfile(
768 i, self._ui.warn, sourceinfo=True
768 i, self._ui.warn, sourceinfo=True
769 )
769 )
770 for pattern, lineno, line in patterns:
770 for pattern, lineno, line in patterns:
771 kind, p = matchmod._patsplit(pattern, b'glob')
771 kind, p = matchmod._patsplit(pattern, b'glob')
772 if kind == b"subinclude":
772 if kind == b"subinclude":
773 if p not in visited:
773 if p not in visited:
774 files.append(p)
774 files.append(p)
775 continue
775 continue
776 m = matchmod.match(
776 m = matchmod.match(
777 self._root, b'', [], [pattern], warn=self._ui.warn
777 self._root, b'', [], [pattern], warn=self._ui.warn
778 )
778 )
779 if m(f):
779 if m(f):
780 return (i, lineno, line)
780 return (i, lineno, line)
781 visited.add(i)
781 visited.add(i)
782 return (None, -1, b"")
782 return (None, -1, b"")
783
783
784 def _walkexplicit(self, match, subrepos):
784 def _walkexplicit(self, match, subrepos):
785 """Get stat data about the files explicitly specified by match.
785 """Get stat data about the files explicitly specified by match.
786
786
787 Return a triple (results, dirsfound, dirsnotfound).
787 Return a triple (results, dirsfound, dirsnotfound).
788 - results is a mapping from filename to stat result. It also contains
788 - results is a mapping from filename to stat result. It also contains
789 listings mapping subrepos and .hg to None.
789 listings mapping subrepos and .hg to None.
790 - dirsfound is a list of files found to be directories.
790 - dirsfound is a list of files found to be directories.
791 - dirsnotfound is a list of files that the dirstate thinks are
791 - dirsnotfound is a list of files that the dirstate thinks are
792 directories and that were not found."""
792 directories and that were not found."""
793
793
794 def badtype(mode):
794 def badtype(mode):
795 kind = _(b'unknown')
795 kind = _(b'unknown')
796 if stat.S_ISCHR(mode):
796 if stat.S_ISCHR(mode):
797 kind = _(b'character device')
797 kind = _(b'character device')
798 elif stat.S_ISBLK(mode):
798 elif stat.S_ISBLK(mode):
799 kind = _(b'block device')
799 kind = _(b'block device')
800 elif stat.S_ISFIFO(mode):
800 elif stat.S_ISFIFO(mode):
801 kind = _(b'fifo')
801 kind = _(b'fifo')
802 elif stat.S_ISSOCK(mode):
802 elif stat.S_ISSOCK(mode):
803 kind = _(b'socket')
803 kind = _(b'socket')
804 elif stat.S_ISDIR(mode):
804 elif stat.S_ISDIR(mode):
805 kind = _(b'directory')
805 kind = _(b'directory')
806 return _(b'unsupported file type (type is %s)') % kind
806 return _(b'unsupported file type (type is %s)') % kind
807
807
808 badfn = match.bad
808 badfn = match.bad
809 dmap = self._map
809 dmap = self._map
810 lstat = os.lstat
810 lstat = os.lstat
811 getkind = stat.S_IFMT
811 getkind = stat.S_IFMT
812 dirkind = stat.S_IFDIR
812 dirkind = stat.S_IFDIR
813 regkind = stat.S_IFREG
813 regkind = stat.S_IFREG
814 lnkkind = stat.S_IFLNK
814 lnkkind = stat.S_IFLNK
815 join = self._join
815 join = self._join
816 dirsfound = []
816 dirsfound = []
817 foundadd = dirsfound.append
817 foundadd = dirsfound.append
818 dirsnotfound = []
818 dirsnotfound = []
819 notfoundadd = dirsnotfound.append
819 notfoundadd = dirsnotfound.append
820
820
821 if not match.isexact() and self._checkcase:
821 if not match.isexact() and self._checkcase:
822 normalize = self._normalize
822 normalize = self._normalize
823 else:
823 else:
824 normalize = None
824 normalize = None
825
825
826 files = sorted(match.files())
826 files = sorted(match.files())
827 subrepos.sort()
827 subrepos.sort()
828 i, j = 0, 0
828 i, j = 0, 0
829 while i < len(files) and j < len(subrepos):
829 while i < len(files) and j < len(subrepos):
830 subpath = subrepos[j] + b"/"
830 subpath = subrepos[j] + b"/"
831 if files[i] < subpath:
831 if files[i] < subpath:
832 i += 1
832 i += 1
833 continue
833 continue
834 while i < len(files) and files[i].startswith(subpath):
834 while i < len(files) and files[i].startswith(subpath):
835 del files[i]
835 del files[i]
836 j += 1
836 j += 1
837
837
838 if not files or b'' in files:
838 if not files or b'' in files:
839 files = [b'']
839 files = [b'']
840 # constructing the foldmap is expensive, so don't do it for the
840 # constructing the foldmap is expensive, so don't do it for the
841 # common case where files is ['']
841 # common case where files is ['']
842 normalize = None
842 normalize = None
843 results = dict.fromkeys(subrepos)
843 results = dict.fromkeys(subrepos)
844 results[b'.hg'] = None
844 results[b'.hg'] = None
845
845
846 for ff in files:
846 for ff in files:
847 if normalize:
847 if normalize:
848 nf = normalize(ff, False, True)
848 nf = normalize(ff, False, True)
849 else:
849 else:
850 nf = ff
850 nf = ff
851 if nf in results:
851 if nf in results:
852 continue
852 continue
853
853
854 try:
854 try:
855 st = lstat(join(nf))
855 st = lstat(join(nf))
856 kind = getkind(st.st_mode)
856 kind = getkind(st.st_mode)
857 if kind == dirkind:
857 if kind == dirkind:
858 if nf in dmap:
858 if nf in dmap:
859 # file replaced by dir on disk but still in dirstate
859 # file replaced by dir on disk but still in dirstate
860 results[nf] = None
860 results[nf] = None
861 foundadd((nf, ff))
861 foundadd((nf, ff))
862 elif kind == regkind or kind == lnkkind:
862 elif kind == regkind or kind == lnkkind:
863 results[nf] = st
863 results[nf] = st
864 else:
864 else:
865 badfn(ff, badtype(kind))
865 badfn(ff, badtype(kind))
866 if nf in dmap:
866 if nf in dmap:
867 results[nf] = None
867 results[nf] = None
868 except OSError as inst: # nf not found on disk - it is dirstate only
868 except OSError as inst: # nf not found on disk - it is dirstate only
869 if nf in dmap: # does it exactly match a missing file?
869 if nf in dmap: # does it exactly match a missing file?
870 results[nf] = None
870 results[nf] = None
871 else: # does it match a missing directory?
871 else: # does it match a missing directory?
872 if self._map.hasdir(nf):
872 if self._map.hasdir(nf):
873 notfoundadd(nf)
873 notfoundadd(nf)
874 else:
874 else:
875 badfn(ff, encoding.strtolocal(inst.strerror))
875 badfn(ff, encoding.strtolocal(inst.strerror))
876
876
877 # match.files() may contain explicitly-specified paths that shouldn't
877 # match.files() may contain explicitly-specified paths that shouldn't
878 # be taken; drop them from the list of files found. dirsfound/notfound
878 # be taken; drop them from the list of files found. dirsfound/notfound
879 # aren't filtered here because they will be tested later.
879 # aren't filtered here because they will be tested later.
880 if match.anypats():
880 if match.anypats():
881 for f in list(results):
881 for f in list(results):
882 if f == b'.hg' or f in subrepos:
882 if f == b'.hg' or f in subrepos:
883 # keep sentinel to disable further out-of-repo walks
883 # keep sentinel to disable further out-of-repo walks
884 continue
884 continue
885 if not match(f):
885 if not match(f):
886 del results[f]
886 del results[f]
887
887
888 # Case insensitive filesystems cannot rely on lstat() failing to detect
888 # Case insensitive filesystems cannot rely on lstat() failing to detect
889 # a case-only rename. Prune the stat object for any file that does not
889 # a case-only rename. Prune the stat object for any file that does not
890 # match the case in the filesystem, if there are multiple files that
890 # match the case in the filesystem, if there are multiple files that
891 # normalize to the same path.
891 # normalize to the same path.
892 if match.isexact() and self._checkcase:
892 if match.isexact() and self._checkcase:
893 normed = {}
893 normed = {}
894
894
895 for f, st in pycompat.iteritems(results):
895 for f, st in pycompat.iteritems(results):
896 if st is None:
896 if st is None:
897 continue
897 continue
898
898
899 nc = util.normcase(f)
899 nc = util.normcase(f)
900 paths = normed.get(nc)
900 paths = normed.get(nc)
901
901
902 if paths is None:
902 if paths is None:
903 paths = set()
903 paths = set()
904 normed[nc] = paths
904 normed[nc] = paths
905
905
906 paths.add(f)
906 paths.add(f)
907
907
908 for norm, paths in pycompat.iteritems(normed):
908 for norm, paths in pycompat.iteritems(normed):
909 if len(paths) > 1:
909 if len(paths) > 1:
910 for path in paths:
910 for path in paths:
911 folded = self._discoverpath(
911 folded = self._discoverpath(
912 path, norm, True, None, self._map.dirfoldmap
912 path, norm, True, None, self._map.dirfoldmap
913 )
913 )
914 if path != folded:
914 if path != folded:
915 results[path] = None
915 results[path] = None
916
916
917 return results, dirsfound, dirsnotfound
917 return results, dirsfound, dirsnotfound
918
918
919 def walk(self, match, subrepos, unknown, ignored, full=True):
919 def walk(self, match, subrepos, unknown, ignored, full=True):
920 """
920 """
921 Walk recursively through the directory tree, finding all files
921 Walk recursively through the directory tree, finding all files
922 matched by match.
922 matched by match.
923
923
924 If full is False, maybe skip some known-clean files.
924 If full is False, maybe skip some known-clean files.
925
925
926 Return a dict mapping filename to stat-like object (either
926 Return a dict mapping filename to stat-like object (either
927 mercurial.osutil.stat instance or return value of os.stat()).
927 mercurial.osutil.stat instance or return value of os.stat()).
928
928
929 """
929 """
930 # full is a flag that extensions that hook into walk can use -- this
930 # full is a flag that extensions that hook into walk can use -- this
931 # implementation doesn't use it at all. This satisfies the contract
931 # implementation doesn't use it at all. This satisfies the contract
932 # because we only guarantee a "maybe".
932 # because we only guarantee a "maybe".
933
933
934 if ignored:
934 if ignored:
935 ignore = util.never
935 ignore = util.never
936 dirignore = util.never
936 dirignore = util.never
937 elif unknown:
937 elif unknown:
938 ignore = self._ignore
938 ignore = self._ignore
939 dirignore = self._dirignore
939 dirignore = self._dirignore
940 else:
940 else:
941 # if not unknown and not ignored, drop dir recursion and step 2
941 # if not unknown and not ignored, drop dir recursion and step 2
942 ignore = util.always
942 ignore = util.always
943 dirignore = util.always
943 dirignore = util.always
944
944
945 matchfn = match.matchfn
945 matchfn = match.matchfn
946 matchalways = match.always()
946 matchalways = match.always()
947 matchtdir = match.traversedir
947 matchtdir = match.traversedir
948 dmap = self._map
948 dmap = self._map
949 listdir = util.listdir
949 listdir = util.listdir
950 lstat = os.lstat
950 lstat = os.lstat
951 dirkind = stat.S_IFDIR
951 dirkind = stat.S_IFDIR
952 regkind = stat.S_IFREG
952 regkind = stat.S_IFREG
953 lnkkind = stat.S_IFLNK
953 lnkkind = stat.S_IFLNK
954 join = self._join
954 join = self._join
955
955
956 exact = skipstep3 = False
956 exact = skipstep3 = False
957 if match.isexact(): # match.exact
957 if match.isexact(): # match.exact
958 exact = True
958 exact = True
959 dirignore = util.always # skip step 2
959 dirignore = util.always # skip step 2
960 elif match.prefix(): # match.match, no patterns
960 elif match.prefix(): # match.match, no patterns
961 skipstep3 = True
961 skipstep3 = True
962
962
963 if not exact and self._checkcase:
963 if not exact and self._checkcase:
964 normalize = self._normalize
964 normalize = self._normalize
965 normalizefile = self._normalizefile
965 normalizefile = self._normalizefile
966 skipstep3 = False
966 skipstep3 = False
967 else:
967 else:
968 normalize = self._normalize
968 normalize = self._normalize
969 normalizefile = None
969 normalizefile = None
970
970
971 # step 1: find all explicit files
971 # step 1: find all explicit files
972 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
972 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
973 if matchtdir:
973 if matchtdir:
974 for d in work:
974 for d in work:
975 matchtdir(d[0])
975 matchtdir(d[0])
976 for d in dirsnotfound:
976 for d in dirsnotfound:
977 matchtdir(d)
977 matchtdir(d)
978
978
979 skipstep3 = skipstep3 and not (work or dirsnotfound)
979 skipstep3 = skipstep3 and not (work or dirsnotfound)
980 work = [d for d in work if not dirignore(d[0])]
980 work = [d for d in work if not dirignore(d[0])]
981
981
982 # step 2: visit subdirectories
982 # step 2: visit subdirectories
983 def traverse(work, alreadynormed):
983 def traverse(work, alreadynormed):
984 wadd = work.append
984 wadd = work.append
985 while work:
985 while work:
986 tracing.counter('dirstate.walk work', len(work))
986 tracing.counter('dirstate.walk work', len(work))
987 nd = work.pop()
987 nd = work.pop()
988 visitentries = match.visitchildrenset(nd)
988 visitentries = match.visitchildrenset(nd)
989 if not visitentries:
989 if not visitentries:
990 continue
990 continue
991 if visitentries == b'this' or visitentries == b'all':
991 if visitentries == b'this' or visitentries == b'all':
992 visitentries = None
992 visitentries = None
993 skip = None
993 skip = None
994 if nd != b'':
994 if nd != b'':
995 skip = b'.hg'
995 skip = b'.hg'
996 try:
996 try:
997 with tracing.log('dirstate.walk.traverse listdir %s', nd):
997 with tracing.log('dirstate.walk.traverse listdir %s', nd):
998 entries = listdir(join(nd), stat=True, skip=skip)
998 entries = listdir(join(nd), stat=True, skip=skip)
999 except OSError as inst:
999 except OSError as inst:
1000 if inst.errno in (errno.EACCES, errno.ENOENT):
1000 if inst.errno in (errno.EACCES, errno.ENOENT):
1001 match.bad(
1001 match.bad(
1002 self.pathto(nd), encoding.strtolocal(inst.strerror)
1002 self.pathto(nd), encoding.strtolocal(inst.strerror)
1003 )
1003 )
1004 continue
1004 continue
1005 raise
1005 raise
1006 for f, kind, st in entries:
1006 for f, kind, st in entries:
1007 # Some matchers may return files in the visitentries set,
1007 # Some matchers may return files in the visitentries set,
1008 # instead of 'this', if the matcher explicitly mentions them
1008 # instead of 'this', if the matcher explicitly mentions them
1009 # and is not an exactmatcher. This is acceptable; we do not
1009 # and is not an exactmatcher. This is acceptable; we do not
1010 # make any hard assumptions about file-or-directory below
1010 # make any hard assumptions about file-or-directory below
1011 # based on the presence of `f` in visitentries. If
1011 # based on the presence of `f` in visitentries. If
1012 # visitchildrenset returned a set, we can always skip the
1012 # visitchildrenset returned a set, we can always skip the
1013 # entries *not* in the set it provided regardless of whether
1013 # entries *not* in the set it provided regardless of whether
1014 # they're actually a file or a directory.
1014 # they're actually a file or a directory.
1015 if visitentries and f not in visitentries:
1015 if visitentries and f not in visitentries:
1016 continue
1016 continue
1017 if normalizefile:
1017 if normalizefile:
1018 # even though f might be a directory, we're only
1018 # even though f might be a directory, we're only
1019 # interested in comparing it to files currently in the
1019 # interested in comparing it to files currently in the
1020 # dmap -- therefore normalizefile is enough
1020 # dmap -- therefore normalizefile is enough
1021 nf = normalizefile(
1021 nf = normalizefile(
1022 nd and (nd + b"/" + f) or f, True, True
1022 nd and (nd + b"/" + f) or f, True, True
1023 )
1023 )
1024 else:
1024 else:
1025 nf = nd and (nd + b"/" + f) or f
1025 nf = nd and (nd + b"/" + f) or f
1026 if nf not in results:
1026 if nf not in results:
1027 if kind == dirkind:
1027 if kind == dirkind:
1028 if not ignore(nf):
1028 if not ignore(nf):
1029 if matchtdir:
1029 if matchtdir:
1030 matchtdir(nf)
1030 matchtdir(nf)
1031 wadd(nf)
1031 wadd(nf)
1032 if nf in dmap and (matchalways or matchfn(nf)):
1032 if nf in dmap and (matchalways or matchfn(nf)):
1033 results[nf] = None
1033 results[nf] = None
1034 elif kind == regkind or kind == lnkkind:
1034 elif kind == regkind or kind == lnkkind:
1035 if nf in dmap:
1035 if nf in dmap:
1036 if matchalways or matchfn(nf):
1036 if matchalways or matchfn(nf):
1037 results[nf] = st
1037 results[nf] = st
1038 elif (matchalways or matchfn(nf)) and not ignore(
1038 elif (matchalways or matchfn(nf)) and not ignore(
1039 nf
1039 nf
1040 ):
1040 ):
1041 # unknown file -- normalize if necessary
1041 # unknown file -- normalize if necessary
1042 if not alreadynormed:
1042 if not alreadynormed:
1043 nf = normalize(nf, False, True)
1043 nf = normalize(nf, False, True)
1044 results[nf] = st
1044 results[nf] = st
1045 elif nf in dmap and (matchalways or matchfn(nf)):
1045 elif nf in dmap and (matchalways or matchfn(nf)):
1046 results[nf] = None
1046 results[nf] = None
1047
1047
1048 for nd, d in work:
1048 for nd, d in work:
1049 # alreadynormed means that processwork doesn't have to do any
1049 # alreadynormed means that processwork doesn't have to do any
1050 # expensive directory normalization
1050 # expensive directory normalization
1051 alreadynormed = not normalize or nd == d
1051 alreadynormed = not normalize or nd == d
1052 traverse([d], alreadynormed)
1052 traverse([d], alreadynormed)
1053
1053
1054 for s in subrepos:
1054 for s in subrepos:
1055 del results[s]
1055 del results[s]
1056 del results[b'.hg']
1056 del results[b'.hg']
1057
1057
1058 # step 3: visit remaining files from dmap
1058 # step 3: visit remaining files from dmap
1059 if not skipstep3 and not exact:
1059 if not skipstep3 and not exact:
1060 # If a dmap file is not in results yet, it was either
1060 # If a dmap file is not in results yet, it was either
1061 # a) not matching matchfn b) ignored, c) missing, or d) under a
1061 # a) not matching matchfn b) ignored, c) missing, or d) under a
1062 # symlink directory.
1062 # symlink directory.
1063 if not results and matchalways:
1063 if not results and matchalways:
1064 visit = [f for f in dmap]
1064 visit = [f for f in dmap]
1065 else:
1065 else:
1066 visit = [f for f in dmap if f not in results and matchfn(f)]
1066 visit = [f for f in dmap if f not in results and matchfn(f)]
1067 visit.sort()
1067 visit.sort()
1068
1068
1069 if unknown:
1069 if unknown:
1070 # unknown == True means we walked all dirs under the roots
1070 # unknown == True means we walked all dirs under the roots
1071 # that wasn't ignored, and everything that matched was stat'ed
1071 # that wasn't ignored, and everything that matched was stat'ed
1072 # and is already in results.
1072 # and is already in results.
1073 # The rest must thus be ignored or under a symlink.
1073 # The rest must thus be ignored or under a symlink.
1074 audit_path = pathutil.pathauditor(self._root, cached=True)
1074 audit_path = pathutil.pathauditor(self._root, cached=True)
1075
1075
1076 for nf in iter(visit):
1076 for nf in iter(visit):
1077 # If a stat for the same file was already added with a
1077 # If a stat for the same file was already added with a
1078 # different case, don't add one for this, since that would
1078 # different case, don't add one for this, since that would
1079 # make it appear as if the file exists under both names
1079 # make it appear as if the file exists under both names
1080 # on disk.
1080 # on disk.
1081 if (
1081 if (
1082 normalizefile
1082 normalizefile
1083 and normalizefile(nf, True, True) in results
1083 and normalizefile(nf, True, True) in results
1084 ):
1084 ):
1085 results[nf] = None
1085 results[nf] = None
1086 # Report ignored items in the dmap as long as they are not
1086 # Report ignored items in the dmap as long as they are not
1087 # under a symlink directory.
1087 # under a symlink directory.
1088 elif audit_path.check(nf):
1088 elif audit_path.check(nf):
1089 try:
1089 try:
1090 results[nf] = lstat(join(nf))
1090 results[nf] = lstat(join(nf))
1091 # file was just ignored, no links, and exists
1091 # file was just ignored, no links, and exists
1092 except OSError:
1092 except OSError:
1093 # file doesn't exist
1093 # file doesn't exist
1094 results[nf] = None
1094 results[nf] = None
1095 else:
1095 else:
1096 # It's either missing or under a symlink directory
1096 # It's either missing or under a symlink directory
1097 # which we in this case report as missing
1097 # which we in this case report as missing
1098 results[nf] = None
1098 results[nf] = None
1099 else:
1099 else:
1100 # We may not have walked the full directory tree above,
1100 # We may not have walked the full directory tree above,
1101 # so stat and check everything we missed.
1101 # so stat and check everything we missed.
1102 iv = iter(visit)
1102 iv = iter(visit)
1103 for st in util.statfiles([join(i) for i in visit]):
1103 for st in util.statfiles([join(i) for i in visit]):
1104 results[next(iv)] = st
1104 results[next(iv)] = st
1105 return results
1105 return results
1106
1106
1107 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1107 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1108 # Force Rayon (Rust parallelism library) to respect the number of
1108 # Force Rayon (Rust parallelism library) to respect the number of
1109 # workers. This is a temporary workaround until Rust code knows
1109 # workers. This is a temporary workaround until Rust code knows
1110 # how to read the config file.
1110 # how to read the config file.
1111 numcpus = self._ui.configint(b"worker", b"numcpus")
1111 numcpus = self._ui.configint(b"worker", b"numcpus")
1112 if numcpus is not None:
1112 if numcpus is not None:
1113 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1113 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1114
1114
1115 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1115 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1116 if not workers_enabled:
1116 if not workers_enabled:
1117 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1117 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1118
1118
1119 (
1119 (
1120 lookup,
1120 lookup,
1121 modified,
1121 modified,
1122 added,
1122 added,
1123 removed,
1123 removed,
1124 deleted,
1124 deleted,
1125 clean,
1125 clean,
1126 ignored,
1126 ignored,
1127 unknown,
1127 unknown,
1128 warnings,
1128 warnings,
1129 bad,
1129 bad,
1130 traversed,
1130 traversed,
1131 dirty,
1131 dirty,
1132 ) = rustmod.status(
1132 ) = rustmod.status(
1133 self._map._map,
1133 self._map._map,
1134 matcher,
1134 matcher,
1135 self._rootdir,
1135 self._rootdir,
1136 self._ignorefiles(),
1136 self._ignorefiles(),
1137 self._checkexec,
1137 self._checkexec,
1138 bool(list_clean),
1138 bool(list_clean),
1139 bool(list_ignored),
1139 bool(list_ignored),
1140 bool(list_unknown),
1140 bool(list_unknown),
1141 bool(matcher.traversedir),
1141 bool(matcher.traversedir),
1142 )
1142 )
1143
1143
1144 self._dirty |= dirty
1144 self._dirty |= dirty
1145
1145
1146 if matcher.traversedir:
1146 if matcher.traversedir:
1147 for dir in traversed:
1147 for dir in traversed:
1148 matcher.traversedir(dir)
1148 matcher.traversedir(dir)
1149
1149
1150 if self._ui.warn:
1150 if self._ui.warn:
1151 for item in warnings:
1151 for item in warnings:
1152 if isinstance(item, tuple):
1152 if isinstance(item, tuple):
1153 file_path, syntax = item
1153 file_path, syntax = item
1154 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1154 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1155 file_path,
1155 file_path,
1156 syntax,
1156 syntax,
1157 )
1157 )
1158 self._ui.warn(msg)
1158 self._ui.warn(msg)
1159 else:
1159 else:
1160 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1160 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1161 self._ui.warn(
1161 self._ui.warn(
1162 msg
1162 msg
1163 % (
1163 % (
1164 pathutil.canonpath(
1164 pathutil.canonpath(
1165 self._rootdir, self._rootdir, item
1165 self._rootdir, self._rootdir, item
1166 ),
1166 ),
1167 b"No such file or directory",
1167 b"No such file or directory",
1168 )
1168 )
1169 )
1169 )
1170
1170
1171 for (fn, message) in bad:
1171 for (fn, message) in bad:
1172 matcher.bad(fn, encoding.strtolocal(message))
1172 matcher.bad(fn, encoding.strtolocal(message))
1173
1173
1174 status = scmutil.status(
1174 status = scmutil.status(
1175 modified=modified,
1175 modified=modified,
1176 added=added,
1176 added=added,
1177 removed=removed,
1177 removed=removed,
1178 deleted=deleted,
1178 deleted=deleted,
1179 unknown=unknown,
1179 unknown=unknown,
1180 ignored=ignored,
1180 ignored=ignored,
1181 clean=clean,
1181 clean=clean,
1182 )
1182 )
1183 return (lookup, status)
1183 return (lookup, status)
1184
1184
1185 def status(self, match, subrepos, ignored, clean, unknown):
1185 def status(self, match, subrepos, ignored, clean, unknown):
1186 """Determine the status of the working copy relative to the
1186 """Determine the status of the working copy relative to the
1187 dirstate and return a pair of (unsure, status), where status is of type
1187 dirstate and return a pair of (unsure, status), where status is of type
1188 scmutil.status and:
1188 scmutil.status and:
1189
1189
1190 unsure:
1190 unsure:
1191 files that might have been modified since the dirstate was
1191 files that might have been modified since the dirstate was
1192 written, but need to be read to be sure (size is the same
1192 written, but need to be read to be sure (size is the same
1193 but mtime differs)
1193 but mtime differs)
1194 status.modified:
1194 status.modified:
1195 files that have definitely been modified since the dirstate
1195 files that have definitely been modified since the dirstate
1196 was written (different size or mode)
1196 was written (different size or mode)
1197 status.clean:
1197 status.clean:
1198 files that have definitely not been modified since the
1198 files that have definitely not been modified since the
1199 dirstate was written
1199 dirstate was written
1200 """
1200 """
1201 listignored, listclean, listunknown = ignored, clean, unknown
1201 listignored, listclean, listunknown = ignored, clean, unknown
1202 lookup, modified, added, unknown, ignored = [], [], [], [], []
1202 lookup, modified, added, unknown, ignored = [], [], [], [], []
1203 removed, deleted, clean = [], [], []
1203 removed, deleted, clean = [], [], []
1204
1204
1205 dmap = self._map
1205 dmap = self._map
1206 dmap.preload()
1206 dmap.preload()
1207
1207
1208 use_rust = True
1208 use_rust = True
1209
1209
1210 allowed_matchers = (
1210 allowed_matchers = (
1211 matchmod.alwaysmatcher,
1211 matchmod.alwaysmatcher,
1212 matchmod.exactmatcher,
1212 matchmod.exactmatcher,
1213 matchmod.includematcher,
1213 matchmod.includematcher,
1214 )
1214 )
1215
1215
1216 if rustmod is None:
1216 if rustmod is None:
1217 use_rust = False
1217 use_rust = False
1218 elif self._checkcase:
1218 elif self._checkcase:
1219 # Case-insensitive filesystems are not handled yet
1219 # Case-insensitive filesystems are not handled yet
1220 use_rust = False
1220 use_rust = False
1221 elif subrepos:
1221 elif subrepos:
1222 use_rust = False
1222 use_rust = False
1223 elif sparse.enabled:
1223 elif sparse.enabled:
1224 use_rust = False
1224 use_rust = False
1225 elif not isinstance(match, allowed_matchers):
1225 elif not isinstance(match, allowed_matchers):
1226 # Some matchers have yet to be implemented
1226 # Some matchers have yet to be implemented
1227 use_rust = False
1227 use_rust = False
1228
1228
1229 # Get the time from the filesystem so we can disambiguate files that
1229 # Get the time from the filesystem so we can disambiguate files that
1230 # appear modified in the present or future.
1230 # appear modified in the present or future.
1231 try:
1231 try:
1232 mtime_boundary = timestamp.get_fs_now(self._opener)
1232 mtime_boundary = timestamp.get_fs_now(self._opener)
1233 except OSError:
1233 except OSError:
1234 # In largefiles or readonly context
1234 # In largefiles or readonly context
1235 mtime_boundary = None
1235 mtime_boundary = None
1236
1236
1237 if use_rust:
1237 if use_rust:
1238 try:
1238 try:
1239 res = self._rust_status(
1239 res = self._rust_status(
1240 match, listclean, listignored, listunknown
1240 match, listclean, listignored, listunknown
1241 )
1241 )
1242 return res + (mtime_boundary,)
1242 return res + (mtime_boundary,)
1243 except rustmod.FallbackError:
1243 except rustmod.FallbackError:
1244 pass
1244 pass
1245
1245
1246 def noop(f):
1246 def noop(f):
1247 pass
1247 pass
1248
1248
1249 dcontains = dmap.__contains__
1249 dcontains = dmap.__contains__
1250 dget = dmap.__getitem__
1250 dget = dmap.__getitem__
1251 ladd = lookup.append # aka "unsure"
1251 ladd = lookup.append # aka "unsure"
1252 madd = modified.append
1252 madd = modified.append
1253 aadd = added.append
1253 aadd = added.append
1254 uadd = unknown.append if listunknown else noop
1254 uadd = unknown.append if listunknown else noop
1255 iadd = ignored.append if listignored else noop
1255 iadd = ignored.append if listignored else noop
1256 radd = removed.append
1256 radd = removed.append
1257 dadd = deleted.append
1257 dadd = deleted.append
1258 cadd = clean.append if listclean else noop
1258 cadd = clean.append if listclean else noop
1259 mexact = match.exact
1259 mexact = match.exact
1260 dirignore = self._dirignore
1260 dirignore = self._dirignore
1261 checkexec = self._checkexec
1261 checkexec = self._checkexec
1262 checklink = self._checklink
1262 checklink = self._checklink
1263 copymap = self._map.copymap
1263 copymap = self._map.copymap
1264
1264
1265 # We need to do full walks when either
1265 # We need to do full walks when either
1266 # - we're listing all clean files, or
1266 # - we're listing all clean files, or
1267 # - match.traversedir does something, because match.traversedir should
1267 # - match.traversedir does something, because match.traversedir should
1268 # be called for every dir in the working dir
1268 # be called for every dir in the working dir
1269 full = listclean or match.traversedir is not None
1269 full = listclean or match.traversedir is not None
1270 for fn, st in pycompat.iteritems(
1270 for fn, st in pycompat.iteritems(
1271 self.walk(match, subrepos, listunknown, listignored, full=full)
1271 self.walk(match, subrepos, listunknown, listignored, full=full)
1272 ):
1272 ):
1273 if not dcontains(fn):
1273 if not dcontains(fn):
1274 if (listignored or mexact(fn)) and dirignore(fn):
1274 if (listignored or mexact(fn)) and dirignore(fn):
1275 if listignored:
1275 if listignored:
1276 iadd(fn)
1276 iadd(fn)
1277 else:
1277 else:
1278 uadd(fn)
1278 uadd(fn)
1279 continue
1279 continue
1280
1280
1281 t = dget(fn)
1281 t = dget(fn)
1282 mode = t.mode
1282 mode = t.mode
1283 size = t.size
1283 size = t.size
1284
1284
1285 if not st and t.tracked:
1285 if not st and t.tracked:
1286 dadd(fn)
1286 dadd(fn)
1287 elif t.p2_info:
1287 elif t.p2_info:
1288 madd(fn)
1288 madd(fn)
1289 elif t.added:
1289 elif t.added:
1290 aadd(fn)
1290 aadd(fn)
1291 elif t.removed:
1291 elif t.removed:
1292 radd(fn)
1292 radd(fn)
1293 elif t.tracked:
1293 elif t.tracked:
1294 if not checklink and t.has_fallback_symlink:
1294 if not checklink and t.has_fallback_symlink:
1295 # If the file system does not support symlink, the mode
1295 # If the file system does not support symlink, the mode
1296 # might not be correctly stored in the dirstate, so do not
1296 # might not be correctly stored in the dirstate, so do not
1297 # trust it.
1297 # trust it.
1298 ladd(fn)
1298 ladd(fn)
1299 elif not checkexec and t.has_fallback_exec:
1299 elif not checkexec and t.has_fallback_exec:
1300 # If the file system does not support exec bits, the mode
1300 # If the file system does not support exec bits, the mode
1301 # might not be correctly stored in the dirstate, so do not
1301 # might not be correctly stored in the dirstate, so do not
1302 # trust it.
1302 # trust it.
1303 ladd(fn)
1303 ladd(fn)
1304 elif (
1304 elif (
1305 size >= 0
1305 size >= 0
1306 and (
1306 and (
1307 (size != st.st_size and size != st.st_size & _rangemask)
1307 (size != st.st_size and size != st.st_size & _rangemask)
1308 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1308 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1309 )
1309 )
1310 or fn in copymap
1310 or fn in copymap
1311 ):
1311 ):
1312 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1312 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1313 # issue6456: Size returned may be longer due to
1313 # issue6456: Size returned may be longer due to
1314 # encryption on EXT-4 fscrypt, undecided.
1314 # encryption on EXT-4 fscrypt, undecided.
1315 ladd(fn)
1315 ladd(fn)
1316 else:
1316 else:
1317 madd(fn)
1317 madd(fn)
1318 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1318 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1319 # There might be a change in the future if for example the
1319 # There might be a change in the future if for example the
1320 # internal clock is off, but this is a case where the issues
1320 # internal clock is off, but this is a case where the issues
1321 # the user would face would be a lot worse and there is
1321 # the user would face would be a lot worse and there is
1322 # nothing we can really do.
1322 # nothing we can really do.
1323 ladd(fn)
1323 ladd(fn)
1324 elif listclean:
1324 elif listclean:
1325 cadd(fn)
1325 cadd(fn)
1326 status = scmutil.status(
1326 status = scmutil.status(
1327 modified, added, removed, deleted, unknown, ignored, clean
1327 modified, added, removed, deleted, unknown, ignored, clean
1328 )
1328 )
1329 return (lookup, status, mtime_boundary)
1329 return (lookup, status, mtime_boundary)
1330
1330
1331 def matches(self, match):
1331 def matches(self, match):
1332 """
1332 """
1333 return files in the dirstate (in whatever state) filtered by match
1333 return files in the dirstate (in whatever state) filtered by match
1334 """
1334 """
1335 dmap = self._map
1335 dmap = self._map
1336 if rustmod is not None:
1336 if rustmod is not None:
1337 dmap = self._map._map
1337 dmap = self._map._map
1338
1338
1339 if match.always():
1339 if match.always():
1340 return dmap.keys()
1340 return dmap.keys()
1341 files = match.files()
1341 files = match.files()
1342 if match.isexact():
1342 if match.isexact():
1343 # fast path -- filter the other way around, since typically files is
1343 # fast path -- filter the other way around, since typically files is
1344 # much smaller than dmap
1344 # much smaller than dmap
1345 return [f for f in files if f in dmap]
1345 return [f for f in files if f in dmap]
1346 if match.prefix() and all(fn in dmap for fn in files):
1346 if match.prefix() and all(fn in dmap for fn in files):
1347 # fast path -- all the values are known to be files, so just return
1347 # fast path -- all the values are known to be files, so just return
1348 # that
1348 # that
1349 return list(files)
1349 return list(files)
1350 return [f for f in dmap if match(f)]
1350 return [f for f in dmap if match(f)]
1351
1351
1352 def _actualfilename(self, tr):
1352 def _actualfilename(self, tr):
1353 if tr:
1353 if tr:
1354 return self._pendingfilename
1354 return self._pendingfilename
1355 else:
1355 else:
1356 return self._filename
1356 return self._filename
1357
1357
1358 def savebackup(self, tr, backupname):
1358 def savebackup(self, tr, backupname):
1359 '''Save current dirstate into backup file'''
1359 '''Save current dirstate into backup file'''
1360 filename = self._actualfilename(tr)
1360 filename = self._actualfilename(tr)
1361 assert backupname != filename
1361 assert backupname != filename
1362
1362
1363 # use '_writedirstate' instead of 'write' to write changes certainly,
1363 # use '_writedirstate' instead of 'write' to write changes certainly,
1364 # because the latter omits writing out if transaction is running.
1364 # because the latter omits writing out if transaction is running.
1365 # output file will be used to create backup of dirstate at this point.
1365 # output file will be used to create backup of dirstate at this point.
1366 if self._dirty or not self._opener.exists(filename):
1366 if self._dirty or not self._opener.exists(filename):
1367 self._writedirstate(
1367 self._writedirstate(
1368 tr,
1368 tr,
1369 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1369 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1370 )
1370 )
1371
1371
1372 if tr:
1372 if tr:
1373 # ensure that subsequent tr.writepending returns True for
1373 # ensure that subsequent tr.writepending returns True for
1374 # changes written out above, even if dirstate is never
1374 # changes written out above, even if dirstate is never
1375 # changed after this
1375 # changed after this
1376 tr.addfilegenerator(
1376 tr.addfilegenerator(
1377 b'dirstate',
1377 b'dirstate-1-main',
1378 (self._filename,),
1378 (self._filename,),
1379 lambda f: self._writedirstate(tr, f),
1379 lambda f: self._writedirstate(tr, f),
1380 location=b'plain',
1380 location=b'plain',
1381 )
1381 )
1382
1382
1383 # ensure that pending file written above is unlinked at
1383 # ensure that pending file written above is unlinked at
1384 # failure, even if tr.writepending isn't invoked until the
1384 # failure, even if tr.writepending isn't invoked until the
1385 # end of this transaction
1385 # end of this transaction
1386 tr.registertmp(filename, location=b'plain')
1386 tr.registertmp(filename, location=b'plain')
1387
1387
1388 self._opener.tryunlink(backupname)
1388 self._opener.tryunlink(backupname)
1389 # hardlink backup is okay because _writedirstate is always called
1389 # hardlink backup is okay because _writedirstate is always called
1390 # with an "atomictemp=True" file.
1390 # with an "atomictemp=True" file.
1391 util.copyfile(
1391 util.copyfile(
1392 self._opener.join(filename),
1392 self._opener.join(filename),
1393 self._opener.join(backupname),
1393 self._opener.join(backupname),
1394 hardlink=True,
1394 hardlink=True,
1395 )
1395 )
1396
1396
1397 def restorebackup(self, tr, backupname):
1397 def restorebackup(self, tr, backupname):
1398 '''Restore dirstate by backup file'''
1398 '''Restore dirstate by backup file'''
1399 # this "invalidate()" prevents "wlock.release()" from writing
1399 # this "invalidate()" prevents "wlock.release()" from writing
1400 # changes of dirstate out after restoring from backup file
1400 # changes of dirstate out after restoring from backup file
1401 self.invalidate()
1401 self.invalidate()
1402 filename = self._actualfilename(tr)
1402 filename = self._actualfilename(tr)
1403 o = self._opener
1403 o = self._opener
1404 if util.samefile(o.join(backupname), o.join(filename)):
1404 if util.samefile(o.join(backupname), o.join(filename)):
1405 o.unlink(backupname)
1405 o.unlink(backupname)
1406 else:
1406 else:
1407 o.rename(backupname, filename, checkambig=True)
1407 o.rename(backupname, filename, checkambig=True)
1408
1408
1409 def clearbackup(self, tr, backupname):
1409 def clearbackup(self, tr, backupname):
1410 '''Clear backup file'''
1410 '''Clear backup file'''
1411 self._opener.unlink(backupname)
1411 self._opener.unlink(backupname)
1412
1412
1413 def verify(self, m1, m2):
1413 def verify(self, m1, m2):
1414 """check the dirstate content again the parent manifest and yield errors"""
1414 """check the dirstate content again the parent manifest and yield errors"""
1415 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1415 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1416 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1416 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1417 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1417 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1418 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1418 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1419 for f, entry in self.items():
1419 for f, entry in self.items():
1420 state = entry.state
1420 state = entry.state
1421 if state in b"nr" and f not in m1:
1421 if state in b"nr" and f not in m1:
1422 yield (missing_from_p1, f, state)
1422 yield (missing_from_p1, f, state)
1423 if state in b"a" and f in m1:
1423 if state in b"a" and f in m1:
1424 yield (unexpected_in_p1, f, state)
1424 yield (unexpected_in_p1, f, state)
1425 if state in b"m" and f not in m1 and f not in m2:
1425 if state in b"m" and f not in m1 and f not in m2:
1426 yield (missing_from_ps, f, state)
1426 yield (missing_from_ps, f, state)
1427 for f in m1:
1427 for f in m1:
1428 state = self.get_entry(f).state
1428 state = self.get_entry(f).state
1429 if state not in b"nrm":
1429 if state not in b"nrm":
1430 yield (missing_from_ds, f, state)
1430 yield (missing_from_ds, f, state)
@@ -1,768 +1,771 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import errno
16 import errno
17
17
18 from .i18n import _
18 from .i18n import _
19 from . import (
19 from . import (
20 error,
20 error,
21 pycompat,
21 pycompat,
22 util,
22 util,
23 )
23 )
24 from .utils import stringutil
24 from .utils import stringutil
25
25
26 version = 2
26 version = 2
27
27
28 # These are the file generators that should only be executed after the
28 # These are the file generators that should only be executed after the
29 # finalizers are done, since they rely on the output of the finalizers (like
29 # finalizers are done, since they rely on the output of the finalizers (like
30 # the changelog having been written).
30 # the changelog having been written).
31 postfinalizegenerators = {b'bookmarks', b'dirstate'}
31 postfinalizegenerators = {
32 b'bookmarks',
33 b'dirstate-1-main',
34 }
32
35
33 GEN_GROUP_ALL = b'all'
36 GEN_GROUP_ALL = b'all'
34 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
37 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
35 GEN_GROUP_POST_FINALIZE = b'postfinalize'
38 GEN_GROUP_POST_FINALIZE = b'postfinalize'
36
39
37
40
38 def active(func):
41 def active(func):
39 def _active(self, *args, **kwds):
42 def _active(self, *args, **kwds):
40 if self._count == 0:
43 if self._count == 0:
41 raise error.ProgrammingError(
44 raise error.ProgrammingError(
42 b'cannot use transaction when it is already committed/aborted'
45 b'cannot use transaction when it is already committed/aborted'
43 )
46 )
44 return func(self, *args, **kwds)
47 return func(self, *args, **kwds)
45
48
46 return _active
49 return _active
47
50
48
51
49 def _playback(
52 def _playback(
50 journal,
53 journal,
51 report,
54 report,
52 opener,
55 opener,
53 vfsmap,
56 vfsmap,
54 entries,
57 entries,
55 backupentries,
58 backupentries,
56 unlink=True,
59 unlink=True,
57 checkambigfiles=None,
60 checkambigfiles=None,
58 ):
61 ):
59 for f, o in sorted(dict(entries).items()):
62 for f, o in sorted(dict(entries).items()):
60 if o or not unlink:
63 if o or not unlink:
61 checkambig = checkambigfiles and (f, b'') in checkambigfiles
64 checkambig = checkambigfiles and (f, b'') in checkambigfiles
62 try:
65 try:
63 fp = opener(f, b'a', checkambig=checkambig)
66 fp = opener(f, b'a', checkambig=checkambig)
64 if fp.tell() < o:
67 if fp.tell() < o:
65 raise error.Abort(
68 raise error.Abort(
66 _(
69 _(
67 b"attempted to truncate %s to %d bytes, but it was "
70 b"attempted to truncate %s to %d bytes, but it was "
68 b"already %d bytes\n"
71 b"already %d bytes\n"
69 )
72 )
70 % (f, o, fp.tell())
73 % (f, o, fp.tell())
71 )
74 )
72 fp.truncate(o)
75 fp.truncate(o)
73 fp.close()
76 fp.close()
74 except IOError:
77 except IOError:
75 report(_(b"failed to truncate %s\n") % f)
78 report(_(b"failed to truncate %s\n") % f)
76 raise
79 raise
77 else:
80 else:
78 try:
81 try:
79 opener.unlink(f)
82 opener.unlink(f)
80 except (IOError, OSError) as inst:
83 except (IOError, OSError) as inst:
81 if inst.errno != errno.ENOENT:
84 if inst.errno != errno.ENOENT:
82 raise
85 raise
83
86
84 backupfiles = []
87 backupfiles = []
85 for l, f, b, c in backupentries:
88 for l, f, b, c in backupentries:
86 if l not in vfsmap and c:
89 if l not in vfsmap and c:
87 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
90 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
88 vfs = vfsmap[l]
91 vfs = vfsmap[l]
89 try:
92 try:
90 if f and b:
93 if f and b:
91 filepath = vfs.join(f)
94 filepath = vfs.join(f)
92 backuppath = vfs.join(b)
95 backuppath = vfs.join(b)
93 checkambig = checkambigfiles and (f, l) in checkambigfiles
96 checkambig = checkambigfiles and (f, l) in checkambigfiles
94 try:
97 try:
95 util.copyfile(backuppath, filepath, checkambig=checkambig)
98 util.copyfile(backuppath, filepath, checkambig=checkambig)
96 backupfiles.append(b)
99 backupfiles.append(b)
97 except IOError as exc:
100 except IOError as exc:
98 e_msg = stringutil.forcebytestr(exc)
101 e_msg = stringutil.forcebytestr(exc)
99 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
102 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
100 else:
103 else:
101 target = f or b
104 target = f or b
102 try:
105 try:
103 vfs.unlink(target)
106 vfs.unlink(target)
104 except (IOError, OSError) as inst:
107 except (IOError, OSError) as inst:
105 if inst.errno != errno.ENOENT:
108 if inst.errno != errno.ENOENT:
106 raise
109 raise
107 except (IOError, OSError, error.Abort):
110 except (IOError, OSError, error.Abort):
108 if not c:
111 if not c:
109 raise
112 raise
110
113
111 backuppath = b"%s.backupfiles" % journal
114 backuppath = b"%s.backupfiles" % journal
112 if opener.exists(backuppath):
115 if opener.exists(backuppath):
113 opener.unlink(backuppath)
116 opener.unlink(backuppath)
114 opener.unlink(journal)
117 opener.unlink(journal)
115 try:
118 try:
116 for f in backupfiles:
119 for f in backupfiles:
117 if opener.exists(f):
120 if opener.exists(f):
118 opener.unlink(f)
121 opener.unlink(f)
119 except (IOError, OSError, error.Abort):
122 except (IOError, OSError, error.Abort):
120 # only pure backup file remains, it is sage to ignore any error
123 # only pure backup file remains, it is sage to ignore any error
121 pass
124 pass
122
125
123
126
124 class transaction(util.transactional):
127 class transaction(util.transactional):
125 def __init__(
128 def __init__(
126 self,
129 self,
127 report,
130 report,
128 opener,
131 opener,
129 vfsmap,
132 vfsmap,
130 journalname,
133 journalname,
131 undoname=None,
134 undoname=None,
132 after=None,
135 after=None,
133 createmode=None,
136 createmode=None,
134 validator=None,
137 validator=None,
135 releasefn=None,
138 releasefn=None,
136 checkambigfiles=None,
139 checkambigfiles=None,
137 name='<unnamed>',
140 name='<unnamed>',
138 ):
141 ):
139 """Begin a new transaction
142 """Begin a new transaction
140
143
141 Begins a new transaction that allows rolling back writes in the event of
144 Begins a new transaction that allows rolling back writes in the event of
142 an exception.
145 an exception.
143
146
144 * `after`: called after the transaction has been committed
147 * `after`: called after the transaction has been committed
145 * `createmode`: the mode of the journal file that will be created
148 * `createmode`: the mode of the journal file that will be created
146 * `releasefn`: called after releasing (with transaction and result)
149 * `releasefn`: called after releasing (with transaction and result)
147
150
148 `checkambigfiles` is a set of (path, vfs-location) tuples,
151 `checkambigfiles` is a set of (path, vfs-location) tuples,
149 which determine whether file stat ambiguity should be avoided
152 which determine whether file stat ambiguity should be avoided
150 for corresponded files.
153 for corresponded files.
151 """
154 """
152 self._count = 1
155 self._count = 1
153 self._usages = 1
156 self._usages = 1
154 self._report = report
157 self._report = report
155 # a vfs to the store content
158 # a vfs to the store content
156 self._opener = opener
159 self._opener = opener
157 # a map to access file in various {location -> vfs}
160 # a map to access file in various {location -> vfs}
158 vfsmap = vfsmap.copy()
161 vfsmap = vfsmap.copy()
159 vfsmap[b''] = opener # set default value
162 vfsmap[b''] = opener # set default value
160 self._vfsmap = vfsmap
163 self._vfsmap = vfsmap
161 self._after = after
164 self._after = after
162 self._offsetmap = {}
165 self._offsetmap = {}
163 self._newfiles = set()
166 self._newfiles = set()
164 self._journal = journalname
167 self._journal = journalname
165 self._undoname = undoname
168 self._undoname = undoname
166 self._queue = []
169 self._queue = []
167 # A callback to do something just after releasing transaction.
170 # A callback to do something just after releasing transaction.
168 if releasefn is None:
171 if releasefn is None:
169 releasefn = lambda tr, success: None
172 releasefn = lambda tr, success: None
170 self._releasefn = releasefn
173 self._releasefn = releasefn
171
174
172 self._checkambigfiles = set()
175 self._checkambigfiles = set()
173 if checkambigfiles:
176 if checkambigfiles:
174 self._checkambigfiles.update(checkambigfiles)
177 self._checkambigfiles.update(checkambigfiles)
175
178
176 self._names = [name]
179 self._names = [name]
177
180
178 # A dict dedicated to precisely tracking the changes introduced in the
181 # A dict dedicated to precisely tracking the changes introduced in the
179 # transaction.
182 # transaction.
180 self.changes = {}
183 self.changes = {}
181
184
182 # a dict of arguments to be passed to hooks
185 # a dict of arguments to be passed to hooks
183 self.hookargs = {}
186 self.hookargs = {}
184 self._file = opener.open(self._journal, b"w+")
187 self._file = opener.open(self._journal, b"w+")
185
188
186 # a list of ('location', 'path', 'backuppath', cache) entries.
189 # a list of ('location', 'path', 'backuppath', cache) entries.
187 # - if 'backuppath' is empty, no file existed at backup time
190 # - if 'backuppath' is empty, no file existed at backup time
188 # - if 'path' is empty, this is a temporary transaction file
191 # - if 'path' is empty, this is a temporary transaction file
189 # - if 'location' is not empty, the path is outside main opener reach.
192 # - if 'location' is not empty, the path is outside main opener reach.
190 # use 'location' value as a key in a vfsmap to find the right 'vfs'
193 # use 'location' value as a key in a vfsmap to find the right 'vfs'
191 # (cache is currently unused)
194 # (cache is currently unused)
192 self._backupentries = []
195 self._backupentries = []
193 self._backupmap = {}
196 self._backupmap = {}
194 self._backupjournal = b"%s.backupfiles" % self._journal
197 self._backupjournal = b"%s.backupfiles" % self._journal
195 self._backupsfile = opener.open(self._backupjournal, b'w')
198 self._backupsfile = opener.open(self._backupjournal, b'w')
196 self._backupsfile.write(b'%d\n' % version)
199 self._backupsfile.write(b'%d\n' % version)
197
200
198 if createmode is not None:
201 if createmode is not None:
199 opener.chmod(self._journal, createmode & 0o666)
202 opener.chmod(self._journal, createmode & 0o666)
200 opener.chmod(self._backupjournal, createmode & 0o666)
203 opener.chmod(self._backupjournal, createmode & 0o666)
201
204
202 # hold file generations to be performed on commit
205 # hold file generations to be performed on commit
203 self._filegenerators = {}
206 self._filegenerators = {}
204 # hold callback to write pending data for hooks
207 # hold callback to write pending data for hooks
205 self._pendingcallback = {}
208 self._pendingcallback = {}
206 # True is any pending data have been written ever
209 # True is any pending data have been written ever
207 self._anypending = False
210 self._anypending = False
208 # holds callback to call when writing the transaction
211 # holds callback to call when writing the transaction
209 self._finalizecallback = {}
212 self._finalizecallback = {}
210 # holds callback to call when validating the transaction
213 # holds callback to call when validating the transaction
211 # should raise exception if anything is wrong
214 # should raise exception if anything is wrong
212 self._validatecallback = {}
215 self._validatecallback = {}
213 if validator is not None:
216 if validator is not None:
214 self._validatecallback[b'001-userhooks'] = validator
217 self._validatecallback[b'001-userhooks'] = validator
215 # hold callback for post transaction close
218 # hold callback for post transaction close
216 self._postclosecallback = {}
219 self._postclosecallback = {}
217 # holds callbacks to call during abort
220 # holds callbacks to call during abort
218 self._abortcallback = {}
221 self._abortcallback = {}
219
222
220 def __repr__(self):
223 def __repr__(self):
221 name = '/'.join(self._names)
224 name = '/'.join(self._names)
222 return '<transaction name=%s, count=%d, usages=%d>' % (
225 return '<transaction name=%s, count=%d, usages=%d>' % (
223 name,
226 name,
224 self._count,
227 self._count,
225 self._usages,
228 self._usages,
226 )
229 )
227
230
228 def __del__(self):
231 def __del__(self):
229 if self._journal:
232 if self._journal:
230 self._abort()
233 self._abort()
231
234
232 @property
235 @property
233 def finalized(self):
236 def finalized(self):
234 return self._finalizecallback is None
237 return self._finalizecallback is None
235
238
236 @active
239 @active
237 def startgroup(self):
240 def startgroup(self):
238 """delay registration of file entry
241 """delay registration of file entry
239
242
240 This is used by strip to delay vision of strip offset. The transaction
243 This is used by strip to delay vision of strip offset. The transaction
241 sees either none or all of the strip actions to be done."""
244 sees either none or all of the strip actions to be done."""
242 self._queue.append([])
245 self._queue.append([])
243
246
244 @active
247 @active
245 def endgroup(self):
248 def endgroup(self):
246 """apply delayed registration of file entry.
249 """apply delayed registration of file entry.
247
250
248 This is used by strip to delay vision of strip offset. The transaction
251 This is used by strip to delay vision of strip offset. The transaction
249 sees either none or all of the strip actions to be done."""
252 sees either none or all of the strip actions to be done."""
250 q = self._queue.pop()
253 q = self._queue.pop()
251 for f, o in q:
254 for f, o in q:
252 self._addentry(f, o)
255 self._addentry(f, o)
253
256
254 @active
257 @active
255 def add(self, file, offset):
258 def add(self, file, offset):
256 """record the state of an append-only file before update"""
259 """record the state of an append-only file before update"""
257 if (
260 if (
258 file in self._newfiles
261 file in self._newfiles
259 or file in self._offsetmap
262 or file in self._offsetmap
260 or file in self._backupmap
263 or file in self._backupmap
261 ):
264 ):
262 return
265 return
263 if self._queue:
266 if self._queue:
264 self._queue[-1].append((file, offset))
267 self._queue[-1].append((file, offset))
265 return
268 return
266
269
267 self._addentry(file, offset)
270 self._addentry(file, offset)
268
271
269 def _addentry(self, file, offset):
272 def _addentry(self, file, offset):
270 """add a append-only entry to memory and on-disk state"""
273 """add a append-only entry to memory and on-disk state"""
271 if (
274 if (
272 file in self._newfiles
275 file in self._newfiles
273 or file in self._offsetmap
276 or file in self._offsetmap
274 or file in self._backupmap
277 or file in self._backupmap
275 ):
278 ):
276 return
279 return
277 if offset:
280 if offset:
278 self._offsetmap[file] = offset
281 self._offsetmap[file] = offset
279 else:
282 else:
280 self._newfiles.add(file)
283 self._newfiles.add(file)
281 # add enough data to the journal to do the truncate
284 # add enough data to the journal to do the truncate
282 self._file.write(b"%s\0%d\n" % (file, offset))
285 self._file.write(b"%s\0%d\n" % (file, offset))
283 self._file.flush()
286 self._file.flush()
284
287
285 @active
288 @active
286 def addbackup(self, file, hardlink=True, location=b''):
289 def addbackup(self, file, hardlink=True, location=b''):
287 """Adds a backup of the file to the transaction
290 """Adds a backup of the file to the transaction
288
291
289 Calling addbackup() creates a hardlink backup of the specified file
292 Calling addbackup() creates a hardlink backup of the specified file
290 that is used to recover the file in the event of the transaction
293 that is used to recover the file in the event of the transaction
291 aborting.
294 aborting.
292
295
293 * `file`: the file path, relative to .hg/store
296 * `file`: the file path, relative to .hg/store
294 * `hardlink`: use a hardlink to quickly create the backup
297 * `hardlink`: use a hardlink to quickly create the backup
295 """
298 """
296 if self._queue:
299 if self._queue:
297 msg = b'cannot use transaction.addbackup inside "group"'
300 msg = b'cannot use transaction.addbackup inside "group"'
298 raise error.ProgrammingError(msg)
301 raise error.ProgrammingError(msg)
299
302
300 if (
303 if (
301 file in self._newfiles
304 file in self._newfiles
302 or file in self._offsetmap
305 or file in self._offsetmap
303 or file in self._backupmap
306 or file in self._backupmap
304 ):
307 ):
305 return
308 return
306 vfs = self._vfsmap[location]
309 vfs = self._vfsmap[location]
307 dirname, filename = vfs.split(file)
310 dirname, filename = vfs.split(file)
308 backupfilename = b"%s.backup.%s" % (self._journal, filename)
311 backupfilename = b"%s.backup.%s" % (self._journal, filename)
309 backupfile = vfs.reljoin(dirname, backupfilename)
312 backupfile = vfs.reljoin(dirname, backupfilename)
310 if vfs.exists(file):
313 if vfs.exists(file):
311 filepath = vfs.join(file)
314 filepath = vfs.join(file)
312 backuppath = vfs.join(backupfile)
315 backuppath = vfs.join(backupfile)
313 util.copyfile(filepath, backuppath, hardlink=hardlink)
316 util.copyfile(filepath, backuppath, hardlink=hardlink)
314 else:
317 else:
315 backupfile = b''
318 backupfile = b''
316
319
317 self._addbackupentry((location, file, backupfile, False))
320 self._addbackupentry((location, file, backupfile, False))
318
321
319 def _addbackupentry(self, entry):
322 def _addbackupentry(self, entry):
320 """register a new backup entry and write it to disk"""
323 """register a new backup entry and write it to disk"""
321 self._backupentries.append(entry)
324 self._backupentries.append(entry)
322 self._backupmap[entry[1]] = len(self._backupentries) - 1
325 self._backupmap[entry[1]] = len(self._backupentries) - 1
323 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
326 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
324 self._backupsfile.flush()
327 self._backupsfile.flush()
325
328
326 @active
329 @active
327 def registertmp(self, tmpfile, location=b''):
330 def registertmp(self, tmpfile, location=b''):
328 """register a temporary transaction file
331 """register a temporary transaction file
329
332
330 Such files will be deleted when the transaction exits (on both
333 Such files will be deleted when the transaction exits (on both
331 failure and success).
334 failure and success).
332 """
335 """
333 self._addbackupentry((location, b'', tmpfile, False))
336 self._addbackupentry((location, b'', tmpfile, False))
334
337
335 @active
338 @active
336 def addfilegenerator(
339 def addfilegenerator(
337 self, genid, filenames, genfunc, order=0, location=b''
340 self, genid, filenames, genfunc, order=0, location=b''
338 ):
341 ):
339 """add a function to generates some files at transaction commit
342 """add a function to generates some files at transaction commit
340
343
341 The `genfunc` argument is a function capable of generating proper
344 The `genfunc` argument is a function capable of generating proper
342 content of each entry in the `filename` tuple.
345 content of each entry in the `filename` tuple.
343
346
344 At transaction close time, `genfunc` will be called with one file
347 At transaction close time, `genfunc` will be called with one file
345 object argument per entries in `filenames`.
348 object argument per entries in `filenames`.
346
349
347 The transaction itself is responsible for the backup, creation and
350 The transaction itself is responsible for the backup, creation and
348 final write of such file.
351 final write of such file.
349
352
350 The `genid` argument is used to ensure the same set of file is only
353 The `genid` argument is used to ensure the same set of file is only
351 generated once. Call to `addfilegenerator` for a `genid` already
354 generated once. Call to `addfilegenerator` for a `genid` already
352 present will overwrite the old entry.
355 present will overwrite the old entry.
353
356
354 The `order` argument may be used to control the order in which multiple
357 The `order` argument may be used to control the order in which multiple
355 generator will be executed.
358 generator will be executed.
356
359
357 The `location` arguments may be used to indicate the files are located
360 The `location` arguments may be used to indicate the files are located
358 outside of the the standard directory for transaction. It should match
361 outside of the the standard directory for transaction. It should match
359 one of the key of the `transaction.vfsmap` dictionary.
362 one of the key of the `transaction.vfsmap` dictionary.
360 """
363 """
361 # For now, we are unable to do proper backup and restore of custom vfs
364 # For now, we are unable to do proper backup and restore of custom vfs
362 # but for bookmarks that are handled outside this mechanism.
365 # but for bookmarks that are handled outside this mechanism.
363 self._filegenerators[genid] = (order, filenames, genfunc, location)
366 self._filegenerators[genid] = (order, filenames, genfunc, location)
364
367
365 @active
368 @active
366 def removefilegenerator(self, genid):
369 def removefilegenerator(self, genid):
367 """reverse of addfilegenerator, remove a file generator function"""
370 """reverse of addfilegenerator, remove a file generator function"""
368 if genid in self._filegenerators:
371 if genid in self._filegenerators:
369 del self._filegenerators[genid]
372 del self._filegenerators[genid]
370
373
371 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
374 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
372 # write files registered for generation
375 # write files registered for generation
373 any = False
376 any = False
374
377
375 if group == GEN_GROUP_ALL:
378 if group == GEN_GROUP_ALL:
376 skip_post = skip_pre = False
379 skip_post = skip_pre = False
377 else:
380 else:
378 skip_pre = group == GEN_GROUP_POST_FINALIZE
381 skip_pre = group == GEN_GROUP_POST_FINALIZE
379 skip_post = group == GEN_GROUP_PRE_FINALIZE
382 skip_post = group == GEN_GROUP_PRE_FINALIZE
380
383
381 for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
384 for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
382 any = True
385 any = True
383 order, filenames, genfunc, location = entry
386 order, filenames, genfunc, location = entry
384
387
385 # for generation at closing, check if it's before or after finalize
388 # for generation at closing, check if it's before or after finalize
386 is_post = id in postfinalizegenerators
389 is_post = id in postfinalizegenerators
387 if skip_post and is_post:
390 if skip_post and is_post:
388 continue
391 continue
389 elif skip_pre and not is_post:
392 elif skip_pre and not is_post:
390 continue
393 continue
391
394
392 vfs = self._vfsmap[location]
395 vfs = self._vfsmap[location]
393 files = []
396 files = []
394 try:
397 try:
395 for name in filenames:
398 for name in filenames:
396 name += suffix
399 name += suffix
397 if suffix:
400 if suffix:
398 self.registertmp(name, location=location)
401 self.registertmp(name, location=location)
399 checkambig = False
402 checkambig = False
400 else:
403 else:
401 self.addbackup(name, location=location)
404 self.addbackup(name, location=location)
402 checkambig = (name, location) in self._checkambigfiles
405 checkambig = (name, location) in self._checkambigfiles
403 files.append(
406 files.append(
404 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
407 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
405 )
408 )
406 genfunc(*files)
409 genfunc(*files)
407 for f in files:
410 for f in files:
408 f.close()
411 f.close()
409 # skip discard() loop since we're sure no open file remains
412 # skip discard() loop since we're sure no open file remains
410 del files[:]
413 del files[:]
411 finally:
414 finally:
412 for f in files:
415 for f in files:
413 f.discard()
416 f.discard()
414 return any
417 return any
415
418
416 @active
419 @active
417 def findoffset(self, file):
420 def findoffset(self, file):
418 if file in self._newfiles:
421 if file in self._newfiles:
419 return 0
422 return 0
420 return self._offsetmap.get(file)
423 return self._offsetmap.get(file)
421
424
422 @active
425 @active
423 def readjournal(self):
426 def readjournal(self):
424 self._file.seek(0)
427 self._file.seek(0)
425 entries = []
428 entries = []
426 for l in self._file.readlines():
429 for l in self._file.readlines():
427 file, troffset = l.split(b'\0')
430 file, troffset = l.split(b'\0')
428 entries.append((file, int(troffset)))
431 entries.append((file, int(troffset)))
429 return entries
432 return entries
430
433
431 @active
434 @active
432 def replace(self, file, offset):
435 def replace(self, file, offset):
433 """
436 """
434 replace can only replace already committed entries
437 replace can only replace already committed entries
435 that are not pending in the queue
438 that are not pending in the queue
436 """
439 """
437 if file in self._newfiles:
440 if file in self._newfiles:
438 if not offset:
441 if not offset:
439 return
442 return
440 self._newfiles.remove(file)
443 self._newfiles.remove(file)
441 self._offsetmap[file] = offset
444 self._offsetmap[file] = offset
442 elif file in self._offsetmap:
445 elif file in self._offsetmap:
443 if not offset:
446 if not offset:
444 del self._offsetmap[file]
447 del self._offsetmap[file]
445 self._newfiles.add(file)
448 self._newfiles.add(file)
446 else:
449 else:
447 self._offsetmap[file] = offset
450 self._offsetmap[file] = offset
448 else:
451 else:
449 raise KeyError(file)
452 raise KeyError(file)
450 self._file.write(b"%s\0%d\n" % (file, offset))
453 self._file.write(b"%s\0%d\n" % (file, offset))
451 self._file.flush()
454 self._file.flush()
452
455
453 @active
456 @active
454 def nest(self, name='<unnamed>'):
457 def nest(self, name='<unnamed>'):
455 self._count += 1
458 self._count += 1
456 self._usages += 1
459 self._usages += 1
457 self._names.append(name)
460 self._names.append(name)
458 return self
461 return self
459
462
460 def release(self):
463 def release(self):
461 if self._count > 0:
464 if self._count > 0:
462 self._usages -= 1
465 self._usages -= 1
463 if self._names:
466 if self._names:
464 self._names.pop()
467 self._names.pop()
465 # if the transaction scopes are left without being closed, fail
468 # if the transaction scopes are left without being closed, fail
466 if self._count > 0 and self._usages == 0:
469 if self._count > 0 and self._usages == 0:
467 self._abort()
470 self._abort()
468
471
469 def running(self):
472 def running(self):
470 return self._count > 0
473 return self._count > 0
471
474
472 def addpending(self, category, callback):
475 def addpending(self, category, callback):
473 """add a callback to be called when the transaction is pending
476 """add a callback to be called when the transaction is pending
474
477
475 The transaction will be given as callback's first argument.
478 The transaction will be given as callback's first argument.
476
479
477 Category is a unique identifier to allow overwriting an old callback
480 Category is a unique identifier to allow overwriting an old callback
478 with a newer callback.
481 with a newer callback.
479 """
482 """
480 self._pendingcallback[category] = callback
483 self._pendingcallback[category] = callback
481
484
482 @active
485 @active
483 def writepending(self):
486 def writepending(self):
484 """write pending file to temporary version
487 """write pending file to temporary version
485
488
486 This is used to allow hooks to view a transaction before commit"""
489 This is used to allow hooks to view a transaction before commit"""
487 categories = sorted(self._pendingcallback)
490 categories = sorted(self._pendingcallback)
488 for cat in categories:
491 for cat in categories:
489 # remove callback since the data will have been flushed
492 # remove callback since the data will have been flushed
490 any = self._pendingcallback.pop(cat)(self)
493 any = self._pendingcallback.pop(cat)(self)
491 self._anypending = self._anypending or any
494 self._anypending = self._anypending or any
492 self._anypending |= self._generatefiles(suffix=b'.pending')
495 self._anypending |= self._generatefiles(suffix=b'.pending')
493 return self._anypending
496 return self._anypending
494
497
495 @active
498 @active
496 def hasfinalize(self, category):
499 def hasfinalize(self, category):
497 """check is a callback already exist for a category"""
500 """check is a callback already exist for a category"""
498 return category in self._finalizecallback
501 return category in self._finalizecallback
499
502
500 @active
503 @active
501 def addfinalize(self, category, callback):
504 def addfinalize(self, category, callback):
502 """add a callback to be called when the transaction is closed
505 """add a callback to be called when the transaction is closed
503
506
504 The transaction will be given as callback's first argument.
507 The transaction will be given as callback's first argument.
505
508
506 Category is a unique identifier to allow overwriting old callbacks with
509 Category is a unique identifier to allow overwriting old callbacks with
507 newer callbacks.
510 newer callbacks.
508 """
511 """
509 self._finalizecallback[category] = callback
512 self._finalizecallback[category] = callback
510
513
511 @active
514 @active
512 def addpostclose(self, category, callback):
515 def addpostclose(self, category, callback):
513 """add or replace a callback to be called after the transaction closed
516 """add or replace a callback to be called after the transaction closed
514
517
515 The transaction will be given as callback's first argument.
518 The transaction will be given as callback's first argument.
516
519
517 Category is a unique identifier to allow overwriting an old callback
520 Category is a unique identifier to allow overwriting an old callback
518 with a newer callback.
521 with a newer callback.
519 """
522 """
520 self._postclosecallback[category] = callback
523 self._postclosecallback[category] = callback
521
524
522 @active
525 @active
523 def getpostclose(self, category):
526 def getpostclose(self, category):
524 """return a postclose callback added before, or None"""
527 """return a postclose callback added before, or None"""
525 return self._postclosecallback.get(category, None)
528 return self._postclosecallback.get(category, None)
526
529
527 @active
530 @active
528 def addabort(self, category, callback):
531 def addabort(self, category, callback):
529 """add a callback to be called when the transaction is aborted.
532 """add a callback to be called when the transaction is aborted.
530
533
531 The transaction will be given as the first argument to the callback.
534 The transaction will be given as the first argument to the callback.
532
535
533 Category is a unique identifier to allow overwriting an old callback
536 Category is a unique identifier to allow overwriting an old callback
534 with a newer callback.
537 with a newer callback.
535 """
538 """
536 self._abortcallback[category] = callback
539 self._abortcallback[category] = callback
537
540
538 @active
541 @active
539 def addvalidator(self, category, callback):
542 def addvalidator(self, category, callback):
540 """adds a callback to be called when validating the transaction.
543 """adds a callback to be called when validating the transaction.
541
544
542 The transaction will be given as the first argument to the callback.
545 The transaction will be given as the first argument to the callback.
543
546
544 callback should raise exception if to abort transaction"""
547 callback should raise exception if to abort transaction"""
545 self._validatecallback[category] = callback
548 self._validatecallback[category] = callback
546
549
547 @active
550 @active
548 def close(self):
551 def close(self):
549 '''commit the transaction'''
552 '''commit the transaction'''
550 if self._count == 1:
553 if self._count == 1:
551 for category in sorted(self._validatecallback):
554 for category in sorted(self._validatecallback):
552 self._validatecallback[category](self)
555 self._validatecallback[category](self)
553 self._validatecallback = None # Help prevent cycles.
556 self._validatecallback = None # Help prevent cycles.
554 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
557 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
555 while self._finalizecallback:
558 while self._finalizecallback:
556 callbacks = self._finalizecallback
559 callbacks = self._finalizecallback
557 self._finalizecallback = {}
560 self._finalizecallback = {}
558 categories = sorted(callbacks)
561 categories = sorted(callbacks)
559 for cat in categories:
562 for cat in categories:
560 callbacks[cat](self)
563 callbacks[cat](self)
561 # Prevent double usage and help clear cycles.
564 # Prevent double usage and help clear cycles.
562 self._finalizecallback = None
565 self._finalizecallback = None
563 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
566 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
564
567
565 self._count -= 1
568 self._count -= 1
566 if self._count != 0:
569 if self._count != 0:
567 return
570 return
568 self._file.close()
571 self._file.close()
569 self._backupsfile.close()
572 self._backupsfile.close()
570 # cleanup temporary files
573 # cleanup temporary files
571 for l, f, b, c in self._backupentries:
574 for l, f, b, c in self._backupentries:
572 if l not in self._vfsmap and c:
575 if l not in self._vfsmap and c:
573 self._report(
576 self._report(
574 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
577 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
575 )
578 )
576 continue
579 continue
577 vfs = self._vfsmap[l]
580 vfs = self._vfsmap[l]
578 if not f and b and vfs.exists(b):
581 if not f and b and vfs.exists(b):
579 try:
582 try:
580 vfs.unlink(b)
583 vfs.unlink(b)
581 except (IOError, OSError, error.Abort) as inst:
584 except (IOError, OSError, error.Abort) as inst:
582 if not c:
585 if not c:
583 raise
586 raise
584 # Abort may be raise by read only opener
587 # Abort may be raise by read only opener
585 self._report(
588 self._report(
586 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
589 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
587 )
590 )
588 self._offsetmap = {}
591 self._offsetmap = {}
589 self._newfiles = set()
592 self._newfiles = set()
590 self._writeundo()
593 self._writeundo()
591 if self._after:
594 if self._after:
592 self._after()
595 self._after()
593 self._after = None # Help prevent cycles.
596 self._after = None # Help prevent cycles.
594 if self._opener.isfile(self._backupjournal):
597 if self._opener.isfile(self._backupjournal):
595 self._opener.unlink(self._backupjournal)
598 self._opener.unlink(self._backupjournal)
596 if self._opener.isfile(self._journal):
599 if self._opener.isfile(self._journal):
597 self._opener.unlink(self._journal)
600 self._opener.unlink(self._journal)
598 for l, _f, b, c in self._backupentries:
601 for l, _f, b, c in self._backupentries:
599 if l not in self._vfsmap and c:
602 if l not in self._vfsmap and c:
600 self._report(
603 self._report(
601 b"couldn't remove %s: unknown cache location"
604 b"couldn't remove %s: unknown cache location"
602 b"%s\n" % (b, l)
605 b"%s\n" % (b, l)
603 )
606 )
604 continue
607 continue
605 vfs = self._vfsmap[l]
608 vfs = self._vfsmap[l]
606 if b and vfs.exists(b):
609 if b and vfs.exists(b):
607 try:
610 try:
608 vfs.unlink(b)
611 vfs.unlink(b)
609 except (IOError, OSError, error.Abort) as inst:
612 except (IOError, OSError, error.Abort) as inst:
610 if not c:
613 if not c:
611 raise
614 raise
612 # Abort may be raise by read only opener
615 # Abort may be raise by read only opener
613 self._report(
616 self._report(
614 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
617 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
615 )
618 )
616 self._backupentries = []
619 self._backupentries = []
617 self._journal = None
620 self._journal = None
618
621
619 self._releasefn(self, True) # notify success of closing transaction
622 self._releasefn(self, True) # notify success of closing transaction
620 self._releasefn = None # Help prevent cycles.
623 self._releasefn = None # Help prevent cycles.
621
624
622 # run post close action
625 # run post close action
623 categories = sorted(self._postclosecallback)
626 categories = sorted(self._postclosecallback)
624 for cat in categories:
627 for cat in categories:
625 self._postclosecallback[cat](self)
628 self._postclosecallback[cat](self)
626 # Prevent double usage and help clear cycles.
629 # Prevent double usage and help clear cycles.
627 self._postclosecallback = None
630 self._postclosecallback = None
628
631
629 @active
632 @active
630 def abort(self):
633 def abort(self):
631 """abort the transaction (generally called on error, or when the
634 """abort the transaction (generally called on error, or when the
632 transaction is not explicitly committed before going out of
635 transaction is not explicitly committed before going out of
633 scope)"""
636 scope)"""
634 self._abort()
637 self._abort()
635
638
636 def _writeundo(self):
639 def _writeundo(self):
637 """write transaction data for possible future undo call"""
640 """write transaction data for possible future undo call"""
638 if self._undoname is None:
641 if self._undoname is None:
639 return
642 return
640
643
641 undo_backup_path = b"%s.backupfiles" % self._undoname
644 undo_backup_path = b"%s.backupfiles" % self._undoname
642 undobackupfile = self._opener.open(undo_backup_path, b'w')
645 undobackupfile = self._opener.open(undo_backup_path, b'w')
643 undobackupfile.write(b'%d\n' % version)
646 undobackupfile.write(b'%d\n' % version)
644 for l, f, b, c in self._backupentries:
647 for l, f, b, c in self._backupentries:
645 if not f: # temporary file
648 if not f: # temporary file
646 continue
649 continue
647 if not b:
650 if not b:
648 u = b''
651 u = b''
649 else:
652 else:
650 if l not in self._vfsmap and c:
653 if l not in self._vfsmap and c:
651 self._report(
654 self._report(
652 b"couldn't remove %s: unknown cache location"
655 b"couldn't remove %s: unknown cache location"
653 b"%s\n" % (b, l)
656 b"%s\n" % (b, l)
654 )
657 )
655 continue
658 continue
656 vfs = self._vfsmap[l]
659 vfs = self._vfsmap[l]
657 base, name = vfs.split(b)
660 base, name = vfs.split(b)
658 assert name.startswith(self._journal), name
661 assert name.startswith(self._journal), name
659 uname = name.replace(self._journal, self._undoname, 1)
662 uname = name.replace(self._journal, self._undoname, 1)
660 u = vfs.reljoin(base, uname)
663 u = vfs.reljoin(base, uname)
661 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
664 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
662 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
665 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
663 undobackupfile.close()
666 undobackupfile.close()
664
667
665 def _abort(self):
668 def _abort(self):
666 entries = self.readjournal()
669 entries = self.readjournal()
667 self._count = 0
670 self._count = 0
668 self._usages = 0
671 self._usages = 0
669 self._file.close()
672 self._file.close()
670 self._backupsfile.close()
673 self._backupsfile.close()
671
674
672 try:
675 try:
673 if not entries and not self._backupentries:
676 if not entries and not self._backupentries:
674 if self._backupjournal:
677 if self._backupjournal:
675 self._opener.unlink(self._backupjournal)
678 self._opener.unlink(self._backupjournal)
676 if self._journal:
679 if self._journal:
677 self._opener.unlink(self._journal)
680 self._opener.unlink(self._journal)
678 return
681 return
679
682
680 self._report(_(b"transaction abort!\n"))
683 self._report(_(b"transaction abort!\n"))
681
684
682 try:
685 try:
683 for cat in sorted(self._abortcallback):
686 for cat in sorted(self._abortcallback):
684 self._abortcallback[cat](self)
687 self._abortcallback[cat](self)
685 # Prevent double usage and help clear cycles.
688 # Prevent double usage and help clear cycles.
686 self._abortcallback = None
689 self._abortcallback = None
687 _playback(
690 _playback(
688 self._journal,
691 self._journal,
689 self._report,
692 self._report,
690 self._opener,
693 self._opener,
691 self._vfsmap,
694 self._vfsmap,
692 entries,
695 entries,
693 self._backupentries,
696 self._backupentries,
694 False,
697 False,
695 checkambigfiles=self._checkambigfiles,
698 checkambigfiles=self._checkambigfiles,
696 )
699 )
697 self._report(_(b"rollback completed\n"))
700 self._report(_(b"rollback completed\n"))
698 except BaseException as exc:
701 except BaseException as exc:
699 self._report(_(b"rollback failed - please run hg recover\n"))
702 self._report(_(b"rollback failed - please run hg recover\n"))
700 self._report(
703 self._report(
701 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
704 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
702 )
705 )
703 finally:
706 finally:
704 self._journal = None
707 self._journal = None
705 self._releasefn(self, False) # notify failure of transaction
708 self._releasefn(self, False) # notify failure of transaction
706 self._releasefn = None # Help prevent cycles.
709 self._releasefn = None # Help prevent cycles.
707
710
708
711
709 BAD_VERSION_MSG = _(
712 BAD_VERSION_MSG = _(
710 b"journal was created by a different version of Mercurial\n"
713 b"journal was created by a different version of Mercurial\n"
711 )
714 )
712
715
713
716
714 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
717 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
715 """Rolls back the transaction contained in the given file
718 """Rolls back the transaction contained in the given file
716
719
717 Reads the entries in the specified file, and the corresponding
720 Reads the entries in the specified file, and the corresponding
718 '*.backupfiles' file, to recover from an incomplete transaction.
721 '*.backupfiles' file, to recover from an incomplete transaction.
719
722
720 * `file`: a file containing a list of entries, specifying where
723 * `file`: a file containing a list of entries, specifying where
721 to truncate each file. The file should contain a list of
724 to truncate each file. The file should contain a list of
722 file\0offset pairs, delimited by newlines. The corresponding
725 file\0offset pairs, delimited by newlines. The corresponding
723 '*.backupfiles' file should contain a list of file\0backupfile
726 '*.backupfiles' file should contain a list of file\0backupfile
724 pairs, delimited by \0.
727 pairs, delimited by \0.
725
728
726 `checkambigfiles` is a set of (path, vfs-location) tuples,
729 `checkambigfiles` is a set of (path, vfs-location) tuples,
727 which determine whether file stat ambiguity should be avoided at
730 which determine whether file stat ambiguity should be avoided at
728 restoring corresponded files.
731 restoring corresponded files.
729 """
732 """
730 entries = []
733 entries = []
731 backupentries = []
734 backupentries = []
732
735
733 with opener.open(file) as fp:
736 with opener.open(file) as fp:
734 lines = fp.readlines()
737 lines = fp.readlines()
735 for l in lines:
738 for l in lines:
736 try:
739 try:
737 f, o = l.split(b'\0')
740 f, o = l.split(b'\0')
738 entries.append((f, int(o)))
741 entries.append((f, int(o)))
739 except ValueError:
742 except ValueError:
740 report(
743 report(
741 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
744 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
742 )
745 )
743
746
744 backupjournal = b"%s.backupfiles" % file
747 backupjournal = b"%s.backupfiles" % file
745 if opener.exists(backupjournal):
748 if opener.exists(backupjournal):
746 fp = opener.open(backupjournal)
749 fp = opener.open(backupjournal)
747 lines = fp.readlines()
750 lines = fp.readlines()
748 if lines:
751 if lines:
749 ver = lines[0][:-1]
752 ver = lines[0][:-1]
750 if ver != (b'%d' % version):
753 if ver != (b'%d' % version):
751 report(BAD_VERSION_MSG)
754 report(BAD_VERSION_MSG)
752 else:
755 else:
753 for line in lines[1:]:
756 for line in lines[1:]:
754 if line:
757 if line:
755 # Shave off the trailing newline
758 # Shave off the trailing newline
756 line = line[:-1]
759 line = line[:-1]
757 l, f, b, c = line.split(b'\0')
760 l, f, b, c = line.split(b'\0')
758 backupentries.append((l, f, b, bool(c)))
761 backupentries.append((l, f, b, bool(c)))
759
762
760 _playback(
763 _playback(
761 file,
764 file,
762 report,
765 report,
763 opener,
766 opener,
764 vfsmap,
767 vfsmap,
765 entries,
768 entries,
766 backupentries,
769 backupentries,
767 checkambigfiles=checkambigfiles,
770 checkambigfiles=checkambigfiles,
768 )
771 )
General Comments 0
You need to be logged in to leave comments. Login now