##// END OF EJS Templates
pathauditor: make _checkfs_exists a static method...
Arseniy Alekseyev -
r50807:bc83ebe0 default
parent child Browse files
Show More
@@ -1,383 +1,391 b''
1 import contextlib
1 import contextlib
2 import errno
2 import errno
3 import os
3 import os
4 import posixpath
4 import posixpath
5 import stat
5 import stat
6
6
7 from typing import (
7 from typing import (
8 Any,
8 Any,
9 Callable,
9 Callable,
10 Iterator,
10 Iterator,
11 Optional,
11 Optional,
12 )
12 )
13
13
14 from .i18n import _
14 from .i18n import _
15 from . import (
15 from . import (
16 encoding,
16 encoding,
17 error,
17 error,
18 policy,
18 policy,
19 pycompat,
19 pycompat,
20 util,
20 util,
21 )
21 )
22
22
23 rustdirs = policy.importrust('dirstate', 'Dirs')
23 rustdirs = policy.importrust('dirstate', 'Dirs')
24 parsers = policy.importmod('parsers')
24 parsers = policy.importmod('parsers')
25
25
26
26
27 def _lowerclean(s):
27 def _lowerclean(s):
28 # type: (bytes) -> bytes
28 # type: (bytes) -> bytes
29 return encoding.hfsignoreclean(s.lower())
29 return encoding.hfsignoreclean(s.lower())
30
30
31
31
32 class pathauditor:
32 class pathauditor:
33 """ensure that a filesystem path contains no banned components.
33 """ensure that a filesystem path contains no banned components.
34 the following properties of a path are checked:
34 the following properties of a path are checked:
35
35
36 - ends with a directory separator
36 - ends with a directory separator
37 - under top-level .hg
37 - under top-level .hg
38 - starts at the root of a windows drive
38 - starts at the root of a windows drive
39 - contains ".."
39 - contains ".."
40
40
41 More check are also done about the file system states:
41 More check are also done about the file system states:
42 - traverses a symlink (e.g. a/symlink_here/b)
42 - traverses a symlink (e.g. a/symlink_here/b)
43 - inside a nested repository (a callback can be used to approve
43 - inside a nested repository (a callback can be used to approve
44 some nested repositories, e.g., subrepositories)
44 some nested repositories, e.g., subrepositories)
45
45
46 The file system checks are only done when 'realfs' is set to True (the
46 The file system checks are only done when 'realfs' is set to True (the
47 default). They should be disable then we are auditing path for operation on
47 default). They should be disable then we are auditing path for operation on
48 stored history.
48 stored history.
49
49
50 If 'cached' is set to True, audited paths and sub-directories are cached.
50 If 'cached' is set to True, audited paths and sub-directories are cached.
51 Be careful to not keep the cache of unmanaged directories for long because
51 Be careful to not keep the cache of unmanaged directories for long because
52 audited paths may be replaced with symlinks.
52 audited paths may be replaced with symlinks.
53 """
53 """
54
54
55 def __init__(self, root, callback=None, realfs=True, cached=False):
55 def __init__(self, root, callback=None, realfs=True, cached=False):
56 self.audited = set()
56 self.audited = set()
57 self.auditeddir = dict()
57 self.auditeddir = dict()
58 self.root = root
58 self.root = root
59 self._realfs = realfs
59 self._realfs = realfs
60 self._cached = cached
60 self._cached = cached
61 self.callback = callback
61 self.callback = callback
62 if os.path.lexists(root) and not util.fscasesensitive(root):
62 if os.path.lexists(root) and not util.fscasesensitive(root):
63 self.normcase = util.normcase
63 self.normcase = util.normcase
64 else:
64 else:
65 self.normcase = lambda x: x
65 self.normcase = lambda x: x
66
66
67 def __call__(self, path, mode=None):
67 def __call__(self, path, mode=None):
68 # type: (bytes, Optional[Any]) -> None
68 # type: (bytes, Optional[Any]) -> None
69 """Check the relative path.
69 """Check the relative path.
70 path may contain a pattern (e.g. foodir/**.txt)"""
70 path may contain a pattern (e.g. foodir/**.txt)"""
71
71
72 path = util.localpath(path)
72 path = util.localpath(path)
73 if path in self.audited:
73 if path in self.audited:
74 return
74 return
75 # AIX ignores "/" at end of path, others raise EISDIR.
75 # AIX ignores "/" at end of path, others raise EISDIR.
76 if util.endswithsep(path):
76 if util.endswithsep(path):
77 raise error.InputError(
77 raise error.InputError(
78 _(b"path ends in directory separator: %s") % path
78 _(b"path ends in directory separator: %s") % path
79 )
79 )
80 parts = util.splitpath(path)
80 parts = util.splitpath(path)
81 if (
81 if (
82 os.path.splitdrive(path)[0]
82 os.path.splitdrive(path)[0]
83 or _lowerclean(parts[0]) in (b'.hg', b'.hg.', b'')
83 or _lowerclean(parts[0]) in (b'.hg', b'.hg.', b'')
84 or pycompat.ospardir in parts
84 or pycompat.ospardir in parts
85 ):
85 ):
86 raise error.InputError(
86 raise error.InputError(
87 _(b"path contains illegal component: %s") % path
87 _(b"path contains illegal component: %s") % path
88 )
88 )
89 # Windows shortname aliases
89 # Windows shortname aliases
90 if b"~" in path:
90 if b"~" in path:
91 for p in parts:
91 for p in parts:
92 if b"~" in p:
92 if b"~" in p:
93 first, last = p.split(b"~", 1)
93 first, last = p.split(b"~", 1)
94 if last.isdigit() and first.upper() in [b"HG", b"HG8B6C"]:
94 if last.isdigit() and first.upper() in [b"HG", b"HG8B6C"]:
95 raise error.InputError(
95 raise error.InputError(
96 _(b"path contains illegal component: %s") % path
96 _(b"path contains illegal component: %s") % path
97 )
97 )
98 if b'.hg' in _lowerclean(path):
98 if b'.hg' in _lowerclean(path):
99 lparts = [_lowerclean(p) for p in parts]
99 lparts = [_lowerclean(p) for p in parts]
100 for p in b'.hg', b'.hg.':
100 for p in b'.hg', b'.hg.':
101 if p in lparts[1:]:
101 if p in lparts[1:]:
102 pos = lparts.index(p)
102 pos = lparts.index(p)
103 base = os.path.join(*parts[:pos])
103 base = os.path.join(*parts[:pos])
104 raise error.InputError(
104 raise error.InputError(
105 _(b"path '%s' is inside nested repo %r")
105 _(b"path '%s' is inside nested repo %r")
106 % (path, pycompat.bytestr(base))
106 % (path, pycompat.bytestr(base))
107 )
107 )
108
108
109 if self._realfs:
109 if self._realfs:
110 # It's important that we check the path parts starting from the root.
110 # It's important that we check the path parts starting from the root.
111 # We don't want to add "foo/bar/baz" to auditeddir before checking if
111 # We don't want to add "foo/bar/baz" to auditeddir before checking if
112 # there's a "foo/.hg" directory. This also means we won't accidentally
112 # there's a "foo/.hg" directory. This also means we won't accidentally
113 # traverse a symlink into some other filesystem (which is potentially
113 # traverse a symlink into some other filesystem (which is potentially
114 # expensive to access).
114 # expensive to access).
115 for prefix in finddirs_rev_noroot(path):
115 for prefix in finddirs_rev_noroot(path):
116 if prefix in self.auditeddir:
116 if prefix in self.auditeddir:
117 res = self.auditeddir[prefix]
117 res = self.auditeddir[prefix]
118 else:
118 else:
119 res = self._checkfs_exists(prefix, path)
119 res = pathauditor._checkfs_exists(
120 self.root, prefix, path, self.callback
121 )
120 if self._cached:
122 if self._cached:
121 self.auditeddir[prefix] = res
123 self.auditeddir[prefix] = res
122 if not res:
124 if not res:
123 break
125 break
124
126
125 if self._cached:
127 if self._cached:
126 self.audited.add(path)
128 self.audited.add(path)
127
129
128 def _checkfs_exists(self, prefix: bytes, path: bytes) -> bool:
130 @staticmethod
131 def _checkfs_exists(
132 root,
133 prefix: bytes,
134 path: bytes,
135 callback: Optional[Callable[[bytes], bool]] = None,
136 ):
129 """raise exception if a file system backed check fails.
137 """raise exception if a file system backed check fails.
130
138
131 Return a bool that indicates that the directory (or file) exists."""
139 Return a bool that indicates that the directory (or file) exists."""
132 curpath = os.path.join(self.root, prefix)
140 curpath = os.path.join(root, prefix)
133 try:
141 try:
134 st = os.lstat(curpath)
142 st = os.lstat(curpath)
135 except OSError as err:
143 except OSError as err:
136 if err.errno == errno.ENOENT:
144 if err.errno == errno.ENOENT:
137 return False
145 return False
138 # EINVAL can be raised as invalid path syntax under win32.
146 # EINVAL can be raised as invalid path syntax under win32.
139 # They must be ignored for patterns can be checked too.
147 # They must be ignored for patterns can be checked too.
140 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
148 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
141 raise
149 raise
142 else:
150 else:
143 if stat.S_ISLNK(st.st_mode):
151 if stat.S_ISLNK(st.st_mode):
144 msg = _(b'path %r traverses symbolic link %r') % (
152 msg = _(b'path %r traverses symbolic link %r') % (
145 pycompat.bytestr(path),
153 pycompat.bytestr(path),
146 pycompat.bytestr(prefix),
154 pycompat.bytestr(prefix),
147 )
155 )
148 raise error.Abort(msg)
156 raise error.Abort(msg)
149 elif stat.S_ISDIR(st.st_mode) and os.path.isdir(
157 elif stat.S_ISDIR(st.st_mode) and os.path.isdir(
150 os.path.join(curpath, b'.hg')
158 os.path.join(curpath, b'.hg')
151 ):
159 ):
152 if not self.callback or not self.callback(curpath):
160 if not callback or not callback(curpath):
153 msg = _(b"path '%s' is inside nested repo %r")
161 msg = _(b"path '%s' is inside nested repo %r")
154 raise error.Abort(msg % (path, pycompat.bytestr(prefix)))
162 raise error.Abort(msg % (path, pycompat.bytestr(prefix)))
155 return True
163 return True
156
164
157 def check(self, path):
165 def check(self, path):
158 # type: (bytes) -> bool
166 # type: (bytes) -> bool
159 try:
167 try:
160 self(path)
168 self(path)
161 return True
169 return True
162 except (OSError, error.Abort):
170 except (OSError, error.Abort):
163 return False
171 return False
164
172
165 @contextlib.contextmanager
173 @contextlib.contextmanager
166 def cached(self):
174 def cached(self):
167 if self._cached:
175 if self._cached:
168 yield
176 yield
169 else:
177 else:
170 try:
178 try:
171 self._cached = True
179 self._cached = True
172 yield
180 yield
173 finally:
181 finally:
174 self.audited.clear()
182 self.audited.clear()
175 self.auditeddir.clear()
183 self.auditeddir.clear()
176 self._cached = False
184 self._cached = False
177
185
178
186
179 def canonpath(root, cwd, myname, auditor=None):
187 def canonpath(root, cwd, myname, auditor=None):
180 # type: (bytes, bytes, bytes, Optional[pathauditor]) -> bytes
188 # type: (bytes, bytes, bytes, Optional[pathauditor]) -> bytes
181 """return the canonical path of myname, given cwd and root
189 """return the canonical path of myname, given cwd and root
182
190
183 >>> def check(root, cwd, myname):
191 >>> def check(root, cwd, myname):
184 ... a = pathauditor(root, realfs=False)
192 ... a = pathauditor(root, realfs=False)
185 ... try:
193 ... try:
186 ... return canonpath(root, cwd, myname, a)
194 ... return canonpath(root, cwd, myname, a)
187 ... except error.Abort:
195 ... except error.Abort:
188 ... return 'aborted'
196 ... return 'aborted'
189 >>> def unixonly(root, cwd, myname, expected='aborted'):
197 >>> def unixonly(root, cwd, myname, expected='aborted'):
190 ... if pycompat.iswindows:
198 ... if pycompat.iswindows:
191 ... return expected
199 ... return expected
192 ... return check(root, cwd, myname)
200 ... return check(root, cwd, myname)
193 >>> def winonly(root, cwd, myname, expected='aborted'):
201 >>> def winonly(root, cwd, myname, expected='aborted'):
194 ... if not pycompat.iswindows:
202 ... if not pycompat.iswindows:
195 ... return expected
203 ... return expected
196 ... return check(root, cwd, myname)
204 ... return check(root, cwd, myname)
197 >>> winonly(b'd:\\\\repo', b'c:\\\\dir', b'filename')
205 >>> winonly(b'd:\\\\repo', b'c:\\\\dir', b'filename')
198 'aborted'
206 'aborted'
199 >>> winonly(b'c:\\\\repo', b'c:\\\\dir', b'filename')
207 >>> winonly(b'c:\\\\repo', b'c:\\\\dir', b'filename')
200 'aborted'
208 'aborted'
201 >>> winonly(b'c:\\\\repo', b'c:\\\\', b'filename')
209 >>> winonly(b'c:\\\\repo', b'c:\\\\', b'filename')
202 'aborted'
210 'aborted'
203 >>> winonly(b'c:\\\\repo', b'c:\\\\', b'repo\\\\filename',
211 >>> winonly(b'c:\\\\repo', b'c:\\\\', b'repo\\\\filename',
204 ... b'filename')
212 ... b'filename')
205 'filename'
213 'filename'
206 >>> winonly(b'c:\\\\repo', b'c:\\\\repo', b'filename', b'filename')
214 >>> winonly(b'c:\\\\repo', b'c:\\\\repo', b'filename', b'filename')
207 'filename'
215 'filename'
208 >>> winonly(b'c:\\\\repo', b'c:\\\\repo\\\\subdir', b'filename',
216 >>> winonly(b'c:\\\\repo', b'c:\\\\repo\\\\subdir', b'filename',
209 ... b'subdir/filename')
217 ... b'subdir/filename')
210 'subdir/filename'
218 'subdir/filename'
211 >>> unixonly(b'/repo', b'/dir', b'filename')
219 >>> unixonly(b'/repo', b'/dir', b'filename')
212 'aborted'
220 'aborted'
213 >>> unixonly(b'/repo', b'/', b'filename')
221 >>> unixonly(b'/repo', b'/', b'filename')
214 'aborted'
222 'aborted'
215 >>> unixonly(b'/repo', b'/', b'repo/filename', b'filename')
223 >>> unixonly(b'/repo', b'/', b'repo/filename', b'filename')
216 'filename'
224 'filename'
217 >>> unixonly(b'/repo', b'/repo', b'filename', b'filename')
225 >>> unixonly(b'/repo', b'/repo', b'filename', b'filename')
218 'filename'
226 'filename'
219 >>> unixonly(b'/repo', b'/repo/subdir', b'filename', b'subdir/filename')
227 >>> unixonly(b'/repo', b'/repo/subdir', b'filename', b'subdir/filename')
220 'subdir/filename'
228 'subdir/filename'
221 """
229 """
222 if util.endswithsep(root):
230 if util.endswithsep(root):
223 rootsep = root
231 rootsep = root
224 else:
232 else:
225 rootsep = root + pycompat.ossep
233 rootsep = root + pycompat.ossep
226 name = myname
234 name = myname
227 if not os.path.isabs(name):
235 if not os.path.isabs(name):
228 name = os.path.join(root, cwd, name)
236 name = os.path.join(root, cwd, name)
229 name = os.path.normpath(name)
237 name = os.path.normpath(name)
230 if auditor is None:
238 if auditor is None:
231 auditor = pathauditor(root)
239 auditor = pathauditor(root)
232 if name != rootsep and name.startswith(rootsep):
240 if name != rootsep and name.startswith(rootsep):
233 name = name[len(rootsep) :]
241 name = name[len(rootsep) :]
234 auditor(name)
242 auditor(name)
235 return util.pconvert(name)
243 return util.pconvert(name)
236 elif name == root:
244 elif name == root:
237 return b''
245 return b''
238 else:
246 else:
239 # Determine whether `name' is in the hierarchy at or beneath `root',
247 # Determine whether `name' is in the hierarchy at or beneath `root',
240 # by iterating name=dirname(name) until that causes no change (can't
248 # by iterating name=dirname(name) until that causes no change (can't
241 # check name == '/', because that doesn't work on windows). The list
249 # check name == '/', because that doesn't work on windows). The list
242 # `rel' holds the reversed list of components making up the relative
250 # `rel' holds the reversed list of components making up the relative
243 # file name we want.
251 # file name we want.
244 rel = []
252 rel = []
245 while True:
253 while True:
246 try:
254 try:
247 s = util.samefile(name, root)
255 s = util.samefile(name, root)
248 except OSError:
256 except OSError:
249 s = False
257 s = False
250 if s:
258 if s:
251 if not rel:
259 if not rel:
252 # name was actually the same as root (maybe a symlink)
260 # name was actually the same as root (maybe a symlink)
253 return b''
261 return b''
254 rel.reverse()
262 rel.reverse()
255 name = os.path.join(*rel)
263 name = os.path.join(*rel)
256 auditor(name)
264 auditor(name)
257 return util.pconvert(name)
265 return util.pconvert(name)
258 dirname, basename = util.split(name)
266 dirname, basename = util.split(name)
259 rel.append(basename)
267 rel.append(basename)
260 if dirname == name:
268 if dirname == name:
261 break
269 break
262 name = dirname
270 name = dirname
263
271
264 # A common mistake is to use -R, but specify a file relative to the repo
272 # A common mistake is to use -R, but specify a file relative to the repo
265 # instead of cwd. Detect that case, and provide a hint to the user.
273 # instead of cwd. Detect that case, and provide a hint to the user.
266 hint = None
274 hint = None
267 try:
275 try:
268 if cwd != root:
276 if cwd != root:
269 canonpath(root, root, myname, auditor)
277 canonpath(root, root, myname, auditor)
270 relpath = util.pathto(root, cwd, b'')
278 relpath = util.pathto(root, cwd, b'')
271 if relpath.endswith(pycompat.ossep):
279 if relpath.endswith(pycompat.ossep):
272 relpath = relpath[:-1]
280 relpath = relpath[:-1]
273 hint = _(b"consider using '--cwd %s'") % relpath
281 hint = _(b"consider using '--cwd %s'") % relpath
274 except error.Abort:
282 except error.Abort:
275 pass
283 pass
276
284
277 raise error.Abort(
285 raise error.Abort(
278 _(b"%s not under root '%s'") % (myname, root), hint=hint
286 _(b"%s not under root '%s'") % (myname, root), hint=hint
279 )
287 )
280
288
281
289
282 def normasprefix(path):
290 def normasprefix(path):
283 # type: (bytes) -> bytes
291 # type: (bytes) -> bytes
284 """normalize the specified path as path prefix
292 """normalize the specified path as path prefix
285
293
286 Returned value can be used safely for "p.startswith(prefix)",
294 Returned value can be used safely for "p.startswith(prefix)",
287 "p[len(prefix):]", and so on.
295 "p[len(prefix):]", and so on.
288
296
289 For efficiency, this expects "path" argument to be already
297 For efficiency, this expects "path" argument to be already
290 normalized by "os.path.normpath", "os.path.realpath", and so on.
298 normalized by "os.path.normpath", "os.path.realpath", and so on.
291
299
292 See also issue3033 for detail about need of this function.
300 See also issue3033 for detail about need of this function.
293
301
294 >>> normasprefix(b'/foo/bar').replace(pycompat.ossep, b'/')
302 >>> normasprefix(b'/foo/bar').replace(pycompat.ossep, b'/')
295 '/foo/bar/'
303 '/foo/bar/'
296 >>> normasprefix(b'/').replace(pycompat.ossep, b'/')
304 >>> normasprefix(b'/').replace(pycompat.ossep, b'/')
297 '/'
305 '/'
298 """
306 """
299 d, p = os.path.splitdrive(path)
307 d, p = os.path.splitdrive(path)
300 if len(p) != len(pycompat.ossep):
308 if len(p) != len(pycompat.ossep):
301 return path + pycompat.ossep
309 return path + pycompat.ossep
302 else:
310 else:
303 return path
311 return path
304
312
305
313
306 def finddirs(path):
314 def finddirs(path):
307 # type: (bytes) -> Iterator[bytes]
315 # type: (bytes) -> Iterator[bytes]
308 pos = path.rfind(b'/')
316 pos = path.rfind(b'/')
309 while pos != -1:
317 while pos != -1:
310 yield path[:pos]
318 yield path[:pos]
311 pos = path.rfind(b'/', 0, pos)
319 pos = path.rfind(b'/', 0, pos)
312 yield b''
320 yield b''
313
321
314
322
315 def finddirs_rev_noroot(path: bytes) -> Iterator[bytes]:
323 def finddirs_rev_noroot(path: bytes) -> Iterator[bytes]:
316 pos = path.find(pycompat.ossep)
324 pos = path.find(pycompat.ossep)
317 while pos != -1:
325 while pos != -1:
318 yield path[:pos]
326 yield path[:pos]
319 pos = path.find(pycompat.ossep, pos + 1)
327 pos = path.find(pycompat.ossep, pos + 1)
320
328
321
329
322 class dirs:
330 class dirs:
323 '''a multiset of directory names from a set of file paths'''
331 '''a multiset of directory names from a set of file paths'''
324
332
325 def __init__(self, map, only_tracked=False):
333 def __init__(self, map, only_tracked=False):
326 """
334 """
327 a dict map indicates a dirstate while a list indicates a manifest
335 a dict map indicates a dirstate while a list indicates a manifest
328 """
336 """
329 self._dirs = {}
337 self._dirs = {}
330 addpath = self.addpath
338 addpath = self.addpath
331 if isinstance(map, dict) and only_tracked:
339 if isinstance(map, dict) and only_tracked:
332 for f, s in map.items():
340 for f, s in map.items():
333 if s.state != b'r':
341 if s.state != b'r':
334 addpath(f)
342 addpath(f)
335 elif only_tracked:
343 elif only_tracked:
336 msg = b"`only_tracked` is only supported with a dict source"
344 msg = b"`only_tracked` is only supported with a dict source"
337 raise error.ProgrammingError(msg)
345 raise error.ProgrammingError(msg)
338 else:
346 else:
339 for f in map:
347 for f in map:
340 addpath(f)
348 addpath(f)
341
349
342 def addpath(self, path):
350 def addpath(self, path):
343 # type: (bytes) -> None
351 # type: (bytes) -> None
344 dirs = self._dirs
352 dirs = self._dirs
345 for base in finddirs(path):
353 for base in finddirs(path):
346 if base.endswith(b'/'):
354 if base.endswith(b'/'):
347 raise ValueError(
355 raise ValueError(
348 "found invalid consecutive slashes in path: %r" % base
356 "found invalid consecutive slashes in path: %r" % base
349 )
357 )
350 if base in dirs:
358 if base in dirs:
351 dirs[base] += 1
359 dirs[base] += 1
352 return
360 return
353 dirs[base] = 1
361 dirs[base] = 1
354
362
355 def delpath(self, path):
363 def delpath(self, path):
356 # type: (bytes) -> None
364 # type: (bytes) -> None
357 dirs = self._dirs
365 dirs = self._dirs
358 for base in finddirs(path):
366 for base in finddirs(path):
359 if dirs[base] > 1:
367 if dirs[base] > 1:
360 dirs[base] -= 1
368 dirs[base] -= 1
361 return
369 return
362 del dirs[base]
370 del dirs[base]
363
371
364 def __iter__(self):
372 def __iter__(self):
365 return iter(self._dirs)
373 return iter(self._dirs)
366
374
367 def __contains__(self, d):
375 def __contains__(self, d):
368 # type: (bytes) -> bool
376 # type: (bytes) -> bool
369 return d in self._dirs
377 return d in self._dirs
370
378
371
379
372 if util.safehasattr(parsers, 'dirs'):
380 if util.safehasattr(parsers, 'dirs'):
373 dirs = parsers.dirs
381 dirs = parsers.dirs
374
382
375 if rustdirs is not None:
383 if rustdirs is not None:
376 dirs = rustdirs
384 dirs = rustdirs
377
385
378
386
379 # forward two methods from posixpath that do what we need, but we'd
387 # forward two methods from posixpath that do what we need, but we'd
380 # rather not let our internals know that we're thinking in posix terms
388 # rather not let our internals know that we're thinking in posix terms
381 # - instead we'll let them be oblivious.
389 # - instead we'll let them be oblivious.
382 join = posixpath.join
390 join = posixpath.join
383 dirname = posixpath.dirname # type: Callable[[bytes], bytes]
391 dirname = posixpath.dirname # type: Callable[[bytes], bytes]
@@ -1,807 +1,813 b''
1 # vfs.py - Mercurial 'vfs' classes
1 # vfs.py - Mercurial 'vfs' classes
2 #
2 #
3 # Copyright Olivia Mackall <olivia@selenic.com>
3 # Copyright Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import contextlib
8 import contextlib
9 import os
9 import os
10 import shutil
10 import shutil
11 import stat
11 import stat
12 import threading
12 import threading
13
13
14 from typing import (
14 from typing import (
15 Optional,
15 Optional,
16 )
16 )
17
17
18 from .i18n import _
18 from .i18n import _
19 from .pycompat import (
19 from .pycompat import (
20 delattr,
20 delattr,
21 getattr,
21 getattr,
22 setattr,
22 setattr,
23 )
23 )
24 from . import (
24 from . import (
25 encoding,
25 encoding,
26 error,
26 error,
27 pathutil,
27 pathutil,
28 pycompat,
28 pycompat,
29 util,
29 util,
30 )
30 )
31
31
32
32
33 def _avoidambig(path: bytes, oldstat):
33 def _avoidambig(path: bytes, oldstat):
34 """Avoid file stat ambiguity forcibly
34 """Avoid file stat ambiguity forcibly
35
35
36 This function causes copying ``path`` file, if it is owned by
36 This function causes copying ``path`` file, if it is owned by
37 another (see issue5418 and issue5584 for detail).
37 another (see issue5418 and issue5584 for detail).
38 """
38 """
39
39
40 def checkandavoid():
40 def checkandavoid():
41 newstat = util.filestat.frompath(path)
41 newstat = util.filestat.frompath(path)
42 # return whether file stat ambiguity is (already) avoided
42 # return whether file stat ambiguity is (already) avoided
43 return not newstat.isambig(oldstat) or newstat.avoidambig(path, oldstat)
43 return not newstat.isambig(oldstat) or newstat.avoidambig(path, oldstat)
44
44
45 if not checkandavoid():
45 if not checkandavoid():
46 # simply copy to change owner of path to get privilege to
46 # simply copy to change owner of path to get privilege to
47 # advance mtime (see issue5418)
47 # advance mtime (see issue5418)
48 util.rename(util.mktempcopy(path), path)
48 util.rename(util.mktempcopy(path), path)
49 checkandavoid()
49 checkandavoid()
50
50
51
51
52 class abstractvfs:
52 class abstractvfs:
53 """Abstract base class; cannot be instantiated"""
53 """Abstract base class; cannot be instantiated"""
54
54
55 # default directory separator for vfs
55 # default directory separator for vfs
56 #
56 #
57 # Other vfs code always use `/` and this works fine because python file API
57 # Other vfs code always use `/` and this works fine because python file API
58 # abstract the use of `/` and make it work transparently. For consistency
58 # abstract the use of `/` and make it work transparently. For consistency
59 # vfs will always use `/` when joining. This avoid some confusion in
59 # vfs will always use `/` when joining. This avoid some confusion in
60 # encoded vfs (see issue6546)
60 # encoded vfs (see issue6546)
61 _dir_sep = b'/'
61 _dir_sep = b'/'
62
62
63 def __init__(self, *args, **kwargs):
63 def __init__(self, *args, **kwargs):
64 '''Prevent instantiation; don't call this from subclasses.'''
64 '''Prevent instantiation; don't call this from subclasses.'''
65 raise NotImplementedError('attempted instantiating ' + str(type(self)))
65 raise NotImplementedError('attempted instantiating ' + str(type(self)))
66
66
67 # TODO: type return, which is util.posixfile wrapped by a proxy
67 # TODO: type return, which is util.posixfile wrapped by a proxy
68 def __call__(self, path: bytes, mode: bytes = b'rb', **kwargs):
68 def __call__(self, path: bytes, mode: bytes = b'rb', **kwargs):
69 raise NotImplementedError
69 raise NotImplementedError
70
70
71 def _auditpath(self, path: bytes, mode: bytes):
71 def _auditpath(self, path: bytes, mode: bytes):
72 raise NotImplementedError
72 raise NotImplementedError
73
73
74 def join(self, path: Optional[bytes], *insidef: bytes) -> bytes:
74 def join(self, path: Optional[bytes], *insidef: bytes) -> bytes:
75 raise NotImplementedError
75 raise NotImplementedError
76
76
77 def tryread(self, path: bytes) -> bytes:
77 def tryread(self, path: bytes) -> bytes:
78 '''gracefully return an empty string for missing files'''
78 '''gracefully return an empty string for missing files'''
79 try:
79 try:
80 return self.read(path)
80 return self.read(path)
81 except FileNotFoundError:
81 except FileNotFoundError:
82 pass
82 pass
83 return b""
83 return b""
84
84
85 def tryreadlines(self, path: bytes, mode: bytes = b'rb'):
85 def tryreadlines(self, path: bytes, mode: bytes = b'rb'):
86 '''gracefully return an empty array for missing files'''
86 '''gracefully return an empty array for missing files'''
87 try:
87 try:
88 return self.readlines(path, mode=mode)
88 return self.readlines(path, mode=mode)
89 except FileNotFoundError:
89 except FileNotFoundError:
90 pass
90 pass
91 return []
91 return []
92
92
93 @util.propertycache
93 @util.propertycache
94 def open(self):
94 def open(self):
95 """Open ``path`` file, which is relative to vfs root.
95 """Open ``path`` file, which is relative to vfs root.
96
96
97 Newly created directories are marked as "not to be indexed by
97 Newly created directories are marked as "not to be indexed by
98 the content indexing service", if ``notindexed`` is specified
98 the content indexing service", if ``notindexed`` is specified
99 for "write" mode access.
99 for "write" mode access.
100 """
100 """
101 return self.__call__
101 return self.__call__
102
102
103 def read(self, path: bytes) -> bytes:
103 def read(self, path: bytes) -> bytes:
104 with self(path, b'rb') as fp:
104 with self(path, b'rb') as fp:
105 return fp.read()
105 return fp.read()
106
106
107 def readlines(self, path: bytes, mode: bytes = b'rb'):
107 def readlines(self, path: bytes, mode: bytes = b'rb'):
108 with self(path, mode=mode) as fp:
108 with self(path, mode=mode) as fp:
109 return fp.readlines()
109 return fp.readlines()
110
110
111 def write(
111 def write(
112 self, path: bytes, data: bytes, backgroundclose=False, **kwargs
112 self, path: bytes, data: bytes, backgroundclose=False, **kwargs
113 ) -> int:
113 ) -> int:
114 with self(path, b'wb', backgroundclose=backgroundclose, **kwargs) as fp:
114 with self(path, b'wb', backgroundclose=backgroundclose, **kwargs) as fp:
115 return fp.write(data)
115 return fp.write(data)
116
116
117 def writelines(
117 def writelines(
118 self, path: bytes, data: bytes, mode: bytes = b'wb', notindexed=False
118 self, path: bytes, data: bytes, mode: bytes = b'wb', notindexed=False
119 ) -> None:
119 ) -> None:
120 with self(path, mode=mode, notindexed=notindexed) as fp:
120 with self(path, mode=mode, notindexed=notindexed) as fp:
121 return fp.writelines(data)
121 return fp.writelines(data)
122
122
123 def append(self, path: bytes, data: bytes) -> int:
123 def append(self, path: bytes, data: bytes) -> int:
124 with self(path, b'ab') as fp:
124 with self(path, b'ab') as fp:
125 return fp.write(data)
125 return fp.write(data)
126
126
127 def basename(self, path: bytes) -> bytes:
127 def basename(self, path: bytes) -> bytes:
128 """return base element of a path (as os.path.basename would do)
128 """return base element of a path (as os.path.basename would do)
129
129
130 This exists to allow handling of strange encoding if needed."""
130 This exists to allow handling of strange encoding if needed."""
131 return os.path.basename(path)
131 return os.path.basename(path)
132
132
133 def chmod(self, path: bytes, mode: int) -> None:
133 def chmod(self, path: bytes, mode: int) -> None:
134 return os.chmod(self.join(path), mode)
134 return os.chmod(self.join(path), mode)
135
135
136 def dirname(self, path: bytes) -> bytes:
136 def dirname(self, path: bytes) -> bytes:
137 """return dirname element of a path (as os.path.dirname would do)
137 """return dirname element of a path (as os.path.dirname would do)
138
138
139 This exists to allow handling of strange encoding if needed."""
139 This exists to allow handling of strange encoding if needed."""
140 return os.path.dirname(path)
140 return os.path.dirname(path)
141
141
142 def exists(self, path: Optional[bytes] = None) -> bool:
142 def exists(self, path: Optional[bytes] = None) -> bool:
143 return os.path.exists(self.join(path))
143 return os.path.exists(self.join(path))
144
144
145 def fstat(self, fp):
145 def fstat(self, fp):
146 return util.fstat(fp)
146 return util.fstat(fp)
147
147
148 def isdir(self, path: Optional[bytes] = None) -> bool:
148 def isdir(self, path: Optional[bytes] = None) -> bool:
149 return os.path.isdir(self.join(path))
149 return os.path.isdir(self.join(path))
150
150
151 def isfile(self, path: Optional[bytes] = None) -> bool:
151 def isfile(self, path: Optional[bytes] = None) -> bool:
152 return os.path.isfile(self.join(path))
152 return os.path.isfile(self.join(path))
153
153
154 def islink(self, path: Optional[bytes] = None) -> bool:
154 def islink(self, path: Optional[bytes] = None) -> bool:
155 return os.path.islink(self.join(path))
155 return os.path.islink(self.join(path))
156
156
157 def isfileorlink(self, path: Optional[bytes] = None) -> bool:
157 def isfileorlink(self, path: Optional[bytes] = None) -> bool:
158 """return whether path is a regular file or a symlink
158 """return whether path is a regular file or a symlink
159
159
160 Unlike isfile, this doesn't follow symlinks."""
160 Unlike isfile, this doesn't follow symlinks."""
161 try:
161 try:
162 st = self.lstat(path)
162 st = self.lstat(path)
163 except OSError:
163 except OSError:
164 return False
164 return False
165 mode = st.st_mode
165 mode = st.st_mode
166 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
166 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
167
167
168 def _join(self, *paths: bytes) -> bytes:
168 def _join(self, *paths: bytes) -> bytes:
169 root_idx = 0
169 root_idx = 0
170 for idx, p in enumerate(paths):
170 for idx, p in enumerate(paths):
171 if os.path.isabs(p) or p.startswith(self._dir_sep):
171 if os.path.isabs(p) or p.startswith(self._dir_sep):
172 root_idx = idx
172 root_idx = idx
173 if root_idx != 0:
173 if root_idx != 0:
174 paths = paths[root_idx:]
174 paths = paths[root_idx:]
175 paths = [p for p in paths if p]
175 paths = [p for p in paths if p]
176 return self._dir_sep.join(paths)
176 return self._dir_sep.join(paths)
177
177
178 def reljoin(self, *paths: bytes) -> bytes:
178 def reljoin(self, *paths: bytes) -> bytes:
179 """join various elements of a path together (as os.path.join would do)
179 """join various elements of a path together (as os.path.join would do)
180
180
181 The vfs base is not injected so that path stay relative. This exists
181 The vfs base is not injected so that path stay relative. This exists
182 to allow handling of strange encoding if needed."""
182 to allow handling of strange encoding if needed."""
183 return self._join(*paths)
183 return self._join(*paths)
184
184
185 def split(self, path: bytes):
185 def split(self, path: bytes):
186 """split top-most element of a path (as os.path.split would do)
186 """split top-most element of a path (as os.path.split would do)
187
187
188 This exists to allow handling of strange encoding if needed."""
188 This exists to allow handling of strange encoding if needed."""
189 return os.path.split(path)
189 return os.path.split(path)
190
190
191 def lexists(self, path: Optional[bytes] = None) -> bool:
191 def lexists(self, path: Optional[bytes] = None) -> bool:
192 return os.path.lexists(self.join(path))
192 return os.path.lexists(self.join(path))
193
193
194 def lstat(self, path: Optional[bytes] = None):
194 def lstat(self, path: Optional[bytes] = None):
195 return os.lstat(self.join(path))
195 return os.lstat(self.join(path))
196
196
197 def listdir(self, path: Optional[bytes] = None):
197 def listdir(self, path: Optional[bytes] = None):
198 return os.listdir(self.join(path))
198 return os.listdir(self.join(path))
199
199
200 def makedir(self, path: Optional[bytes] = None, notindexed=True):
200 def makedir(self, path: Optional[bytes] = None, notindexed=True):
201 return util.makedir(self.join(path), notindexed)
201 return util.makedir(self.join(path), notindexed)
202
202
203 def makedirs(
203 def makedirs(
204 self, path: Optional[bytes] = None, mode: Optional[int] = None
204 self, path: Optional[bytes] = None, mode: Optional[int] = None
205 ):
205 ):
206 return util.makedirs(self.join(path), mode)
206 return util.makedirs(self.join(path), mode)
207
207
208 def makelock(self, info, path: bytes):
208 def makelock(self, info, path: bytes):
209 return util.makelock(info, self.join(path))
209 return util.makelock(info, self.join(path))
210
210
211 def mkdir(self, path: Optional[bytes] = None):
211 def mkdir(self, path: Optional[bytes] = None):
212 return os.mkdir(self.join(path))
212 return os.mkdir(self.join(path))
213
213
214 def mkstemp(
214 def mkstemp(
215 self,
215 self,
216 suffix: bytes = b'',
216 suffix: bytes = b'',
217 prefix: bytes = b'tmp',
217 prefix: bytes = b'tmp',
218 dir: Optional[bytes] = None,
218 dir: Optional[bytes] = None,
219 ):
219 ):
220 fd, name = pycompat.mkstemp(
220 fd, name = pycompat.mkstemp(
221 suffix=suffix, prefix=prefix, dir=self.join(dir)
221 suffix=suffix, prefix=prefix, dir=self.join(dir)
222 )
222 )
223 dname, fname = util.split(name)
223 dname, fname = util.split(name)
224 if dir:
224 if dir:
225 return fd, os.path.join(dir, fname)
225 return fd, os.path.join(dir, fname)
226 else:
226 else:
227 return fd, fname
227 return fd, fname
228
228
229 def readdir(self, path: Optional[bytes] = None, stat=None, skip=None):
229 def readdir(self, path: Optional[bytes] = None, stat=None, skip=None):
230 return util.listdir(self.join(path), stat, skip)
230 return util.listdir(self.join(path), stat, skip)
231
231
232 def readlock(self, path: bytes) -> bytes:
232 def readlock(self, path: bytes) -> bytes:
233 return util.readlock(self.join(path))
233 return util.readlock(self.join(path))
234
234
235 def rename(self, src: bytes, dst: bytes, checkambig=False):
235 def rename(self, src: bytes, dst: bytes, checkambig=False):
236 """Rename from src to dst
236 """Rename from src to dst
237
237
238 checkambig argument is used with util.filestat, and is useful
238 checkambig argument is used with util.filestat, and is useful
239 only if destination file is guarded by any lock
239 only if destination file is guarded by any lock
240 (e.g. repo.lock or repo.wlock).
240 (e.g. repo.lock or repo.wlock).
241
241
242 To avoid file stat ambiguity forcibly, checkambig=True involves
242 To avoid file stat ambiguity forcibly, checkambig=True involves
243 copying ``src`` file, if it is owned by another. Therefore, use
243 copying ``src`` file, if it is owned by another. Therefore, use
244 checkambig=True only in limited cases (see also issue5418 and
244 checkambig=True only in limited cases (see also issue5418 and
245 issue5584 for detail).
245 issue5584 for detail).
246 """
246 """
247 self._auditpath(dst, b'w')
247 self._auditpath(dst, b'w')
248 srcpath = self.join(src)
248 srcpath = self.join(src)
249 dstpath = self.join(dst)
249 dstpath = self.join(dst)
250 oldstat = checkambig and util.filestat.frompath(dstpath)
250 oldstat = checkambig and util.filestat.frompath(dstpath)
251 if oldstat and oldstat.stat:
251 if oldstat and oldstat.stat:
252 ret = util.rename(srcpath, dstpath)
252 ret = util.rename(srcpath, dstpath)
253 _avoidambig(dstpath, oldstat)
253 _avoidambig(dstpath, oldstat)
254 return ret
254 return ret
255 return util.rename(srcpath, dstpath)
255 return util.rename(srcpath, dstpath)
256
256
257 def readlink(self, path: bytes) -> bytes:
257 def readlink(self, path: bytes) -> bytes:
258 return util.readlink(self.join(path))
258 return util.readlink(self.join(path))
259
259
260 def removedirs(self, path: Optional[bytes] = None):
260 def removedirs(self, path: Optional[bytes] = None):
261 """Remove a leaf directory and all empty intermediate ones"""
261 """Remove a leaf directory and all empty intermediate ones"""
262 return util.removedirs(self.join(path))
262 return util.removedirs(self.join(path))
263
263
264 def rmdir(self, path: Optional[bytes] = None):
264 def rmdir(self, path: Optional[bytes] = None):
265 """Remove an empty directory."""
265 """Remove an empty directory."""
266 return os.rmdir(self.join(path))
266 return os.rmdir(self.join(path))
267
267
268 def rmtree(
268 def rmtree(
269 self, path: Optional[bytes] = None, ignore_errors=False, forcibly=False
269 self, path: Optional[bytes] = None, ignore_errors=False, forcibly=False
270 ):
270 ):
271 """Remove a directory tree recursively
271 """Remove a directory tree recursively
272
272
273 If ``forcibly``, this tries to remove READ-ONLY files, too.
273 If ``forcibly``, this tries to remove READ-ONLY files, too.
274 """
274 """
275 if forcibly:
275 if forcibly:
276
276
277 def onerror(function, path, excinfo):
277 def onerror(function, path, excinfo):
278 if function is not os.remove:
278 if function is not os.remove:
279 raise
279 raise
280 # read-only files cannot be unlinked under Windows
280 # read-only files cannot be unlinked under Windows
281 s = os.stat(path)
281 s = os.stat(path)
282 if (s.st_mode & stat.S_IWRITE) != 0:
282 if (s.st_mode & stat.S_IWRITE) != 0:
283 raise
283 raise
284 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
284 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
285 os.remove(path)
285 os.remove(path)
286
286
287 else:
287 else:
288 onerror = None
288 onerror = None
289 return shutil.rmtree(
289 return shutil.rmtree(
290 self.join(path), ignore_errors=ignore_errors, onerror=onerror
290 self.join(path), ignore_errors=ignore_errors, onerror=onerror
291 )
291 )
292
292
293 def setflags(self, path: bytes, l: bool, x: bool):
293 def setflags(self, path: bytes, l: bool, x: bool):
294 return util.setflags(self.join(path), l, x)
294 return util.setflags(self.join(path), l, x)
295
295
296 def stat(self, path: Optional[bytes] = None):
296 def stat(self, path: Optional[bytes] = None):
297 return os.stat(self.join(path))
297 return os.stat(self.join(path))
298
298
299 def unlink(self, path: Optional[bytes] = None):
299 def unlink(self, path: Optional[bytes] = None):
300 return util.unlink(self.join(path))
300 return util.unlink(self.join(path))
301
301
302 def tryunlink(self, path: Optional[bytes] = None):
302 def tryunlink(self, path: Optional[bytes] = None):
303 """Attempt to remove a file, ignoring missing file errors."""
303 """Attempt to remove a file, ignoring missing file errors."""
304 util.tryunlink(self.join(path))
304 util.tryunlink(self.join(path))
305
305
306 def unlinkpath(
306 def unlinkpath(
307 self, path: Optional[bytes] = None, ignoremissing=False, rmdir=True
307 self, path: Optional[bytes] = None, ignoremissing=False, rmdir=True
308 ):
308 ):
309 return util.unlinkpath(
309 return util.unlinkpath(
310 self.join(path), ignoremissing=ignoremissing, rmdir=rmdir
310 self.join(path), ignoremissing=ignoremissing, rmdir=rmdir
311 )
311 )
312
312
313 def utime(self, path: Optional[bytes] = None, t=None):
313 def utime(self, path: Optional[bytes] = None, t=None):
314 return os.utime(self.join(path), t)
314 return os.utime(self.join(path), t)
315
315
316 def walk(self, path: Optional[bytes] = None, onerror=None):
316 def walk(self, path: Optional[bytes] = None, onerror=None):
317 """Yield (dirpath, dirs, files) tuple for each directories under path
317 """Yield (dirpath, dirs, files) tuple for each directories under path
318
318
319 ``dirpath`` is relative one from the root of this vfs. This
319 ``dirpath`` is relative one from the root of this vfs. This
320 uses ``os.sep`` as path separator, even you specify POSIX
320 uses ``os.sep`` as path separator, even you specify POSIX
321 style ``path``.
321 style ``path``.
322
322
323 "The root of this vfs" is represented as empty ``dirpath``.
323 "The root of this vfs" is represented as empty ``dirpath``.
324 """
324 """
325 root = os.path.normpath(self.join(None))
325 root = os.path.normpath(self.join(None))
326 # when dirpath == root, dirpath[prefixlen:] becomes empty
326 # when dirpath == root, dirpath[prefixlen:] becomes empty
327 # because len(dirpath) < prefixlen.
327 # because len(dirpath) < prefixlen.
328 prefixlen = len(pathutil.normasprefix(root))
328 prefixlen = len(pathutil.normasprefix(root))
329 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
329 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
330 yield (dirpath[prefixlen:], dirs, files)
330 yield (dirpath[prefixlen:], dirs, files)
331
331
332 @contextlib.contextmanager
332 @contextlib.contextmanager
333 def backgroundclosing(self, ui, expectedcount=-1):
333 def backgroundclosing(self, ui, expectedcount=-1):
334 """Allow files to be closed asynchronously.
334 """Allow files to be closed asynchronously.
335
335
336 When this context manager is active, ``backgroundclose`` can be passed
336 When this context manager is active, ``backgroundclose`` can be passed
337 to ``__call__``/``open`` to result in the file possibly being closed
337 to ``__call__``/``open`` to result in the file possibly being closed
338 asynchronously, on a background thread.
338 asynchronously, on a background thread.
339 """
339 """
340 # Sharing backgroundfilecloser between threads is complex and using
340 # Sharing backgroundfilecloser between threads is complex and using
341 # multiple instances puts us at risk of running out of file descriptors
341 # multiple instances puts us at risk of running out of file descriptors
342 # only allow to use backgroundfilecloser when in main thread.
342 # only allow to use backgroundfilecloser when in main thread.
343 if not isinstance(
343 if not isinstance(
344 threading.current_thread(),
344 threading.current_thread(),
345 threading._MainThread, # pytype: disable=module-attr
345 threading._MainThread, # pytype: disable=module-attr
346 ):
346 ):
347 yield
347 yield
348 return
348 return
349 vfs = getattr(self, 'vfs', self)
349 vfs = getattr(self, 'vfs', self)
350 if getattr(vfs, '_backgroundfilecloser', None):
350 if getattr(vfs, '_backgroundfilecloser', None):
351 raise error.Abort(
351 raise error.Abort(
352 _(b'can only have 1 active background file closer')
352 _(b'can only have 1 active background file closer')
353 )
353 )
354
354
355 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
355 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
356 try:
356 try:
357 vfs._backgroundfilecloser = (
357 vfs._backgroundfilecloser = (
358 bfc # pytype: disable=attribute-error
358 bfc # pytype: disable=attribute-error
359 )
359 )
360 yield bfc
360 yield bfc
361 finally:
361 finally:
362 vfs._backgroundfilecloser = (
362 vfs._backgroundfilecloser = (
363 None # pytype: disable=attribute-error
363 None # pytype: disable=attribute-error
364 )
364 )
365
365
366 def register_file(self, path):
366 def register_file(self, path):
367 """generic hook point to lets fncache steer its stew"""
367 """generic hook point to lets fncache steer its stew"""
368
368
369
369
370 class vfs(abstractvfs):
370 class vfs(abstractvfs):
371 """Operate files relative to a base directory
371 """Operate files relative to a base directory
372
372
373 This class is used to hide the details of COW semantics and
373 This class is used to hide the details of COW semantics and
374 remote file access from higher level code.
374 remote file access from higher level code.
375
375
376 'cacheaudited' should be enabled only if (a) vfs object is short-lived, or
376 'cacheaudited' should be enabled only if (a) vfs object is short-lived, or
377 (b) the base directory is managed by hg and considered sort-of append-only.
377 (b) the base directory is managed by hg and considered sort-of append-only.
378 See pathutil.pathauditor() for details.
378 See pathutil.pathauditor() for details.
379 """
379 """
380
380
381 def __init__(
381 def __init__(
382 self,
382 self,
383 base: bytes,
383 base: bytes,
384 audit=True,
384 audit=True,
385 cacheaudited=False,
385 cacheaudited=False,
386 expandpath=False,
386 expandpath=False,
387 realpath=False,
387 realpath=False,
388 ):
388 ):
389 if expandpath:
389 if expandpath:
390 base = util.expandpath(base)
390 base = util.expandpath(base)
391 if realpath:
391 if realpath:
392 base = os.path.realpath(base)
392 base = os.path.realpath(base)
393 self.base = base
393 self.base = base
394 self._audit = audit
394 self._audit = audit
395 if audit:
395 if audit:
396 self.audit = pathutil.pathauditor(self.base, cached=cacheaudited)
396 self.audit = pathutil.pathauditor(self.base, cached=cacheaudited)
397 else:
397 else:
398 self.audit = lambda path, mode=None: True
398 self.audit = lambda path, mode=None: True
399 self.createmode = None
399 self.createmode = None
400 self._trustnlink = None
400 self._trustnlink = None
401 self.options = {}
401 self.options = {}
402
402
403 @util.propertycache
403 @util.propertycache
404 def _cansymlink(self) -> bool:
404 def _cansymlink(self) -> bool:
405 return util.checklink(self.base)
405 return util.checklink(self.base)
406
406
407 @util.propertycache
407 @util.propertycache
408 def _chmod(self):
408 def _chmod(self):
409 return util.checkexec(self.base)
409 return util.checkexec(self.base)
410
410
411 def _fixfilemode(self, name):
411 def _fixfilemode(self, name):
412 if self.createmode is None or not self._chmod:
412 if self.createmode is None or not self._chmod:
413 return
413 return
414 os.chmod(name, self.createmode & 0o666)
414 os.chmod(name, self.createmode & 0o666)
415
415
416 def _auditpath(self, path, mode) -> None:
416 def _auditpath(self, path, mode) -> None:
417 if self._audit:
417 if self._audit:
418 if os.path.isabs(path) and path.startswith(self.base):
418 if os.path.isabs(path) and path.startswith(self.base):
419 path = os.path.relpath(path, self.base)
419 path = os.path.relpath(path, self.base)
420 r = util.checkosfilename(path)
420 r = util.checkosfilename(path)
421 if r:
421 if r:
422 raise error.Abort(b"%s: %r" % (r, path))
422 raise error.Abort(b"%s: %r" % (r, path))
423 self.audit(path, mode=mode)
423 self.audit(path, mode=mode)
424
424
425 def isfileorlink_checkdir(
425 def isfileorlink_checkdir(
426 self, dircache, path: Optional[bytes] = None
426 self, dircache, path: Optional[bytes] = None
427 ) -> bool:
427 ) -> bool:
428 """return True if the path is a regular file or a symlink and
428 """return True if the path is a regular file or a symlink and
429 the directories along the path are "normal", that is
429 the directories along the path are "normal", that is
430 not symlinks or nested hg repositories."""
430 not symlinks or nested hg repositories.
431
432 Ignores the `_audit` setting, and checks the directories regardless.
433 `dircache` is used to cache the directory checks.
434 """
431 try:
435 try:
432 for prefix in pathutil.finddirs_rev_noroot(util.localpath(path)):
436 for prefix in pathutil.finddirs_rev_noroot(util.localpath(path)):
433 if prefix in dircache:
437 if prefix in dircache:
434 res = dircache[prefix]
438 res = dircache[prefix]
435 else:
439 else:
436 res = self.audit._checkfs_exists(prefix, path)
440 res = pathutil.pathauditor._checkfs_exists(
441 self.base, prefix, path
442 )
437 dircache[prefix] = res
443 dircache[prefix] = res
438 if not res:
444 if not res:
439 return False
445 return False
440 except (OSError, error.Abort):
446 except (OSError, error.Abort):
441 return False
447 return False
442 return self.isfileorlink(path)
448 return self.isfileorlink(path)
443
449
444 def __call__(
450 def __call__(
445 self,
451 self,
446 path: bytes,
452 path: bytes,
447 mode: bytes = b"rb",
453 mode: bytes = b"rb",
448 atomictemp=False,
454 atomictemp=False,
449 notindexed=False,
455 notindexed=False,
450 backgroundclose=False,
456 backgroundclose=False,
451 checkambig=False,
457 checkambig=False,
452 auditpath=True,
458 auditpath=True,
453 makeparentdirs=True,
459 makeparentdirs=True,
454 ):
460 ):
455 """Open ``path`` file, which is relative to vfs root.
461 """Open ``path`` file, which is relative to vfs root.
456
462
457 By default, parent directories are created as needed. Newly created
463 By default, parent directories are created as needed. Newly created
458 directories are marked as "not to be indexed by the content indexing
464 directories are marked as "not to be indexed by the content indexing
459 service", if ``notindexed`` is specified for "write" mode access.
465 service", if ``notindexed`` is specified for "write" mode access.
460 Set ``makeparentdirs=False`` to not create directories implicitly.
466 Set ``makeparentdirs=False`` to not create directories implicitly.
461
467
462 If ``backgroundclose`` is passed, the file may be closed asynchronously.
468 If ``backgroundclose`` is passed, the file may be closed asynchronously.
463 It can only be used if the ``self.backgroundclosing()`` context manager
469 It can only be used if the ``self.backgroundclosing()`` context manager
464 is active. This should only be specified if the following criteria hold:
470 is active. This should only be specified if the following criteria hold:
465
471
466 1. There is a potential for writing thousands of files. Unless you
472 1. There is a potential for writing thousands of files. Unless you
467 are writing thousands of files, the performance benefits of
473 are writing thousands of files, the performance benefits of
468 asynchronously closing files is not realized.
474 asynchronously closing files is not realized.
469 2. Files are opened exactly once for the ``backgroundclosing``
475 2. Files are opened exactly once for the ``backgroundclosing``
470 active duration and are therefore free of race conditions between
476 active duration and are therefore free of race conditions between
471 closing a file on a background thread and reopening it. (If the
477 closing a file on a background thread and reopening it. (If the
472 file were opened multiple times, there could be unflushed data
478 file were opened multiple times, there could be unflushed data
473 because the original file handle hasn't been flushed/closed yet.)
479 because the original file handle hasn't been flushed/closed yet.)
474
480
475 ``checkambig`` argument is passed to atomictempfile (valid
481 ``checkambig`` argument is passed to atomictempfile (valid
476 only for writing), and is useful only if target file is
482 only for writing), and is useful only if target file is
477 guarded by any lock (e.g. repo.lock or repo.wlock).
483 guarded by any lock (e.g. repo.lock or repo.wlock).
478
484
479 To avoid file stat ambiguity forcibly, checkambig=True involves
485 To avoid file stat ambiguity forcibly, checkambig=True involves
480 copying ``path`` file opened in "append" mode (e.g. for
486 copying ``path`` file opened in "append" mode (e.g. for
481 truncation), if it is owned by another. Therefore, use
487 truncation), if it is owned by another. Therefore, use
482 combination of append mode and checkambig=True only in limited
488 combination of append mode and checkambig=True only in limited
483 cases (see also issue5418 and issue5584 for detail).
489 cases (see also issue5418 and issue5584 for detail).
484 """
490 """
485 if auditpath:
491 if auditpath:
486 self._auditpath(path, mode)
492 self._auditpath(path, mode)
487 f = self.join(path)
493 f = self.join(path)
488
494
489 if b"b" not in mode:
495 if b"b" not in mode:
490 mode += b"b" # for that other OS
496 mode += b"b" # for that other OS
491
497
492 nlink = -1
498 nlink = -1
493 if mode not in (b'r', b'rb'):
499 if mode not in (b'r', b'rb'):
494 dirname, basename = util.split(f)
500 dirname, basename = util.split(f)
495 # If basename is empty, then the path is malformed because it points
501 # If basename is empty, then the path is malformed because it points
496 # to a directory. Let the posixfile() call below raise IOError.
502 # to a directory. Let the posixfile() call below raise IOError.
497 if basename:
503 if basename:
498 if atomictemp:
504 if atomictemp:
499 if makeparentdirs:
505 if makeparentdirs:
500 util.makedirs(dirname, self.createmode, notindexed)
506 util.makedirs(dirname, self.createmode, notindexed)
501 return util.atomictempfile(
507 return util.atomictempfile(
502 f, mode, self.createmode, checkambig=checkambig
508 f, mode, self.createmode, checkambig=checkambig
503 )
509 )
504 try:
510 try:
505 if b'w' in mode:
511 if b'w' in mode:
506 util.unlink(f)
512 util.unlink(f)
507 nlink = 0
513 nlink = 0
508 else:
514 else:
509 # nlinks() may behave differently for files on Windows
515 # nlinks() may behave differently for files on Windows
510 # shares if the file is open.
516 # shares if the file is open.
511 with util.posixfile(f):
517 with util.posixfile(f):
512 nlink = util.nlinks(f)
518 nlink = util.nlinks(f)
513 if nlink < 1:
519 if nlink < 1:
514 nlink = 2 # force mktempcopy (issue1922)
520 nlink = 2 # force mktempcopy (issue1922)
515 except FileNotFoundError:
521 except FileNotFoundError:
516 nlink = 0
522 nlink = 0
517 if makeparentdirs:
523 if makeparentdirs:
518 util.makedirs(dirname, self.createmode, notindexed)
524 util.makedirs(dirname, self.createmode, notindexed)
519 if nlink > 0:
525 if nlink > 0:
520 if self._trustnlink is None:
526 if self._trustnlink is None:
521 self._trustnlink = nlink > 1 or util.checknlink(f)
527 self._trustnlink = nlink > 1 or util.checknlink(f)
522 if nlink > 1 or not self._trustnlink:
528 if nlink > 1 or not self._trustnlink:
523 util.rename(util.mktempcopy(f), f)
529 util.rename(util.mktempcopy(f), f)
524 fp = util.posixfile(f, mode)
530 fp = util.posixfile(f, mode)
525 if nlink == 0:
531 if nlink == 0:
526 self._fixfilemode(f)
532 self._fixfilemode(f)
527
533
528 if checkambig:
534 if checkambig:
529 if mode in (b'r', b'rb'):
535 if mode in (b'r', b'rb'):
530 raise error.Abort(
536 raise error.Abort(
531 _(
537 _(
532 b'implementation error: mode %s is not'
538 b'implementation error: mode %s is not'
533 b' valid for checkambig=True'
539 b' valid for checkambig=True'
534 )
540 )
535 % mode
541 % mode
536 )
542 )
537 fp = checkambigatclosing(fp)
543 fp = checkambigatclosing(fp)
538
544
539 if backgroundclose and isinstance(
545 if backgroundclose and isinstance(
540 threading.current_thread(),
546 threading.current_thread(),
541 threading._MainThread, # pytype: disable=module-attr
547 threading._MainThread, # pytype: disable=module-attr
542 ):
548 ):
543 if (
549 if (
544 not self._backgroundfilecloser # pytype: disable=attribute-error
550 not self._backgroundfilecloser # pytype: disable=attribute-error
545 ):
551 ):
546 raise error.Abort(
552 raise error.Abort(
547 _(
553 _(
548 b'backgroundclose can only be used when a '
554 b'backgroundclose can only be used when a '
549 b'backgroundclosing context manager is active'
555 b'backgroundclosing context manager is active'
550 )
556 )
551 )
557 )
552
558
553 fp = delayclosedfile(
559 fp = delayclosedfile(
554 fp,
560 fp,
555 self._backgroundfilecloser, # pytype: disable=attribute-error
561 self._backgroundfilecloser, # pytype: disable=attribute-error
556 )
562 )
557
563
558 return fp
564 return fp
559
565
560 def symlink(self, src: bytes, dst: bytes) -> None:
566 def symlink(self, src: bytes, dst: bytes) -> None:
561 self.audit(dst)
567 self.audit(dst)
562 linkname = self.join(dst)
568 linkname = self.join(dst)
563 util.tryunlink(linkname)
569 util.tryunlink(linkname)
564
570
565 util.makedirs(os.path.dirname(linkname), self.createmode)
571 util.makedirs(os.path.dirname(linkname), self.createmode)
566
572
567 if self._cansymlink:
573 if self._cansymlink:
568 try:
574 try:
569 os.symlink(src, linkname)
575 os.symlink(src, linkname)
570 except OSError as err:
576 except OSError as err:
571 raise OSError(
577 raise OSError(
572 err.errno,
578 err.errno,
573 _(b'could not symlink to %r: %s')
579 _(b'could not symlink to %r: %s')
574 % (src, encoding.strtolocal(err.strerror)),
580 % (src, encoding.strtolocal(err.strerror)),
575 linkname,
581 linkname,
576 )
582 )
577 else:
583 else:
578 self.write(dst, src)
584 self.write(dst, src)
579
585
580 def join(self, path: Optional[bytes], *insidef: bytes) -> bytes:
586 def join(self, path: Optional[bytes], *insidef: bytes) -> bytes:
581 if path:
587 if path:
582 parts = [self.base, path]
588 parts = [self.base, path]
583 parts.extend(insidef)
589 parts.extend(insidef)
584 return self._join(*parts)
590 return self._join(*parts)
585 else:
591 else:
586 return self.base
592 return self.base
587
593
588
594
589 opener = vfs
595 opener = vfs
590
596
591
597
592 class proxyvfs(abstractvfs):
598 class proxyvfs(abstractvfs):
593 def __init__(self, vfs: "vfs"):
599 def __init__(self, vfs: "vfs"):
594 self.vfs = vfs
600 self.vfs = vfs
595
601
596 def _auditpath(self, path, mode):
602 def _auditpath(self, path, mode):
597 return self.vfs._auditpath(path, mode)
603 return self.vfs._auditpath(path, mode)
598
604
599 @property
605 @property
600 def options(self):
606 def options(self):
601 return self.vfs.options
607 return self.vfs.options
602
608
603 @options.setter
609 @options.setter
604 def options(self, value):
610 def options(self, value):
605 self.vfs.options = value
611 self.vfs.options = value
606
612
607
613
608 class filtervfs(proxyvfs, abstractvfs):
614 class filtervfs(proxyvfs, abstractvfs):
609 '''Wrapper vfs for filtering filenames with a function.'''
615 '''Wrapper vfs for filtering filenames with a function.'''
610
616
611 def __init__(self, vfs: "vfs", filter):
617 def __init__(self, vfs: "vfs", filter):
612 proxyvfs.__init__(self, vfs)
618 proxyvfs.__init__(self, vfs)
613 self._filter = filter
619 self._filter = filter
614
620
615 def __call__(self, path: bytes, *args, **kwargs):
621 def __call__(self, path: bytes, *args, **kwargs):
616 return self.vfs(self._filter(path), *args, **kwargs)
622 return self.vfs(self._filter(path), *args, **kwargs)
617
623
618 def join(self, path: Optional[bytes], *insidef: bytes) -> bytes:
624 def join(self, path: Optional[bytes], *insidef: bytes) -> bytes:
619 if path:
625 if path:
620 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
626 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
621 else:
627 else:
622 return self.vfs.join(path)
628 return self.vfs.join(path)
623
629
624
630
625 filteropener = filtervfs
631 filteropener = filtervfs
626
632
627
633
628 class readonlyvfs(proxyvfs):
634 class readonlyvfs(proxyvfs):
629 '''Wrapper vfs preventing any writing.'''
635 '''Wrapper vfs preventing any writing.'''
630
636
631 def __init__(self, vfs: "vfs"):
637 def __init__(self, vfs: "vfs"):
632 proxyvfs.__init__(self, vfs)
638 proxyvfs.__init__(self, vfs)
633
639
634 def __call__(self, path: bytes, mode: bytes = b'rb', *args, **kw):
640 def __call__(self, path: bytes, mode: bytes = b'rb', *args, **kw):
635 if mode not in (b'r', b'rb'):
641 if mode not in (b'r', b'rb'):
636 raise error.Abort(_(b'this vfs is read only'))
642 raise error.Abort(_(b'this vfs is read only'))
637 return self.vfs(path, mode, *args, **kw)
643 return self.vfs(path, mode, *args, **kw)
638
644
639 def join(self, path: Optional[bytes], *insidef: bytes) -> bytes:
645 def join(self, path: Optional[bytes], *insidef: bytes) -> bytes:
640 return self.vfs.join(path, *insidef)
646 return self.vfs.join(path, *insidef)
641
647
642
648
643 class closewrapbase:
649 class closewrapbase:
644 """Base class of wrapper, which hooks closing
650 """Base class of wrapper, which hooks closing
645
651
646 Do not instantiate outside of the vfs layer.
652 Do not instantiate outside of the vfs layer.
647 """
653 """
648
654
649 def __init__(self, fh):
655 def __init__(self, fh):
650 object.__setattr__(self, '_origfh', fh)
656 object.__setattr__(self, '_origfh', fh)
651
657
652 def __getattr__(self, attr):
658 def __getattr__(self, attr):
653 return getattr(self._origfh, attr)
659 return getattr(self._origfh, attr)
654
660
655 def __setattr__(self, attr, value):
661 def __setattr__(self, attr, value):
656 return setattr(self._origfh, attr, value)
662 return setattr(self._origfh, attr, value)
657
663
658 def __delattr__(self, attr):
664 def __delattr__(self, attr):
659 return delattr(self._origfh, attr)
665 return delattr(self._origfh, attr)
660
666
661 def __enter__(self):
667 def __enter__(self):
662 self._origfh.__enter__()
668 self._origfh.__enter__()
663 return self
669 return self
664
670
665 def __exit__(self, exc_type, exc_value, exc_tb):
671 def __exit__(self, exc_type, exc_value, exc_tb):
666 raise NotImplementedError('attempted instantiating ' + str(type(self)))
672 raise NotImplementedError('attempted instantiating ' + str(type(self)))
667
673
668 def close(self):
674 def close(self):
669 raise NotImplementedError('attempted instantiating ' + str(type(self)))
675 raise NotImplementedError('attempted instantiating ' + str(type(self)))
670
676
671
677
672 class delayclosedfile(closewrapbase):
678 class delayclosedfile(closewrapbase):
673 """Proxy for a file object whose close is delayed.
679 """Proxy for a file object whose close is delayed.
674
680
675 Do not instantiate outside of the vfs layer.
681 Do not instantiate outside of the vfs layer.
676 """
682 """
677
683
678 def __init__(self, fh, closer):
684 def __init__(self, fh, closer):
679 super(delayclosedfile, self).__init__(fh)
685 super(delayclosedfile, self).__init__(fh)
680 object.__setattr__(self, '_closer', closer)
686 object.__setattr__(self, '_closer', closer)
681
687
682 def __exit__(self, exc_type, exc_value, exc_tb):
688 def __exit__(self, exc_type, exc_value, exc_tb):
683 self._closer.close(self._origfh)
689 self._closer.close(self._origfh)
684
690
685 def close(self):
691 def close(self):
686 self._closer.close(self._origfh)
692 self._closer.close(self._origfh)
687
693
688
694
689 class backgroundfilecloser:
695 class backgroundfilecloser:
690 """Coordinates background closing of file handles on multiple threads."""
696 """Coordinates background closing of file handles on multiple threads."""
691
697
692 def __init__(self, ui, expectedcount=-1):
698 def __init__(self, ui, expectedcount=-1):
693 self._running = False
699 self._running = False
694 self._entered = False
700 self._entered = False
695 self._threads = []
701 self._threads = []
696 self._threadexception = None
702 self._threadexception = None
697
703
698 # Only Windows/NTFS has slow file closing. So only enable by default
704 # Only Windows/NTFS has slow file closing. So only enable by default
699 # on that platform. But allow to be enabled elsewhere for testing.
705 # on that platform. But allow to be enabled elsewhere for testing.
700 defaultenabled = pycompat.iswindows
706 defaultenabled = pycompat.iswindows
701 enabled = ui.configbool(b'worker', b'backgroundclose', defaultenabled)
707 enabled = ui.configbool(b'worker', b'backgroundclose', defaultenabled)
702
708
703 if not enabled:
709 if not enabled:
704 return
710 return
705
711
706 # There is overhead to starting and stopping the background threads.
712 # There is overhead to starting and stopping the background threads.
707 # Don't do background processing unless the file count is large enough
713 # Don't do background processing unless the file count is large enough
708 # to justify it.
714 # to justify it.
709 minfilecount = ui.configint(b'worker', b'backgroundcloseminfilecount')
715 minfilecount = ui.configint(b'worker', b'backgroundcloseminfilecount')
710 # FUTURE dynamically start background threads after minfilecount closes.
716 # FUTURE dynamically start background threads after minfilecount closes.
711 # (We don't currently have any callers that don't know their file count)
717 # (We don't currently have any callers that don't know their file count)
712 if expectedcount > 0 and expectedcount < minfilecount:
718 if expectedcount > 0 and expectedcount < minfilecount:
713 return
719 return
714
720
715 maxqueue = ui.configint(b'worker', b'backgroundclosemaxqueue')
721 maxqueue = ui.configint(b'worker', b'backgroundclosemaxqueue')
716 threadcount = ui.configint(b'worker', b'backgroundclosethreadcount')
722 threadcount = ui.configint(b'worker', b'backgroundclosethreadcount')
717
723
718 ui.debug(
724 ui.debug(
719 b'starting %d threads for background file closing\n' % threadcount
725 b'starting %d threads for background file closing\n' % threadcount
720 )
726 )
721
727
722 self._queue = pycompat.queue.Queue(maxsize=maxqueue)
728 self._queue = pycompat.queue.Queue(maxsize=maxqueue)
723 self._running = True
729 self._running = True
724
730
725 for i in range(threadcount):
731 for i in range(threadcount):
726 t = threading.Thread(target=self._worker, name='backgroundcloser')
732 t = threading.Thread(target=self._worker, name='backgroundcloser')
727 self._threads.append(t)
733 self._threads.append(t)
728 t.start()
734 t.start()
729
735
730 def __enter__(self):
736 def __enter__(self):
731 self._entered = True
737 self._entered = True
732 return self
738 return self
733
739
734 def __exit__(self, exc_type, exc_value, exc_tb):
740 def __exit__(self, exc_type, exc_value, exc_tb):
735 self._running = False
741 self._running = False
736
742
737 # Wait for threads to finish closing so open files don't linger for
743 # Wait for threads to finish closing so open files don't linger for
738 # longer than lifetime of context manager.
744 # longer than lifetime of context manager.
739 for t in self._threads:
745 for t in self._threads:
740 t.join()
746 t.join()
741
747
742 def _worker(self):
748 def _worker(self):
743 """Main routine for worker thread."""
749 """Main routine for worker thread."""
744 while True:
750 while True:
745 try:
751 try:
746 fh = self._queue.get(block=True, timeout=0.100)
752 fh = self._queue.get(block=True, timeout=0.100)
747 # Need to catch or the thread will terminate and
753 # Need to catch or the thread will terminate and
748 # we could orphan file descriptors.
754 # we could orphan file descriptors.
749 try:
755 try:
750 fh.close()
756 fh.close()
751 except Exception as e:
757 except Exception as e:
752 # Stash so can re-raise from main thread later.
758 # Stash so can re-raise from main thread later.
753 self._threadexception = e
759 self._threadexception = e
754 except pycompat.queue.Empty:
760 except pycompat.queue.Empty:
755 if not self._running:
761 if not self._running:
756 break
762 break
757
763
758 def close(self, fh):
764 def close(self, fh):
759 """Schedule a file for closing."""
765 """Schedule a file for closing."""
760 if not self._entered:
766 if not self._entered:
761 raise error.Abort(
767 raise error.Abort(
762 _(b'can only call close() when context manager active')
768 _(b'can only call close() when context manager active')
763 )
769 )
764
770
765 # If a background thread encountered an exception, raise now so we fail
771 # If a background thread encountered an exception, raise now so we fail
766 # fast. Otherwise we may potentially go on for minutes until the error
772 # fast. Otherwise we may potentially go on for minutes until the error
767 # is acted on.
773 # is acted on.
768 if self._threadexception:
774 if self._threadexception:
769 e = self._threadexception
775 e = self._threadexception
770 self._threadexception = None
776 self._threadexception = None
771 raise e
777 raise e
772
778
773 # If we're not actively running, close synchronously.
779 # If we're not actively running, close synchronously.
774 if not self._running:
780 if not self._running:
775 fh.close()
781 fh.close()
776 return
782 return
777
783
778 self._queue.put(fh, block=True, timeout=None)
784 self._queue.put(fh, block=True, timeout=None)
779
785
780
786
781 class checkambigatclosing(closewrapbase):
787 class checkambigatclosing(closewrapbase):
782 """Proxy for a file object, to avoid ambiguity of file stat
788 """Proxy for a file object, to avoid ambiguity of file stat
783
789
784 See also util.filestat for detail about "ambiguity of file stat".
790 See also util.filestat for detail about "ambiguity of file stat".
785
791
786 This proxy is useful only if the target file is guarded by any
792 This proxy is useful only if the target file is guarded by any
787 lock (e.g. repo.lock or repo.wlock)
793 lock (e.g. repo.lock or repo.wlock)
788
794
789 Do not instantiate outside of the vfs layer.
795 Do not instantiate outside of the vfs layer.
790 """
796 """
791
797
792 def __init__(self, fh):
798 def __init__(self, fh):
793 super(checkambigatclosing, self).__init__(fh)
799 super(checkambigatclosing, self).__init__(fh)
794 object.__setattr__(self, '_oldstat', util.filestat.frompath(fh.name))
800 object.__setattr__(self, '_oldstat', util.filestat.frompath(fh.name))
795
801
796 def _checkambig(self):
802 def _checkambig(self):
797 oldstat = self._oldstat
803 oldstat = self._oldstat
798 if oldstat.stat:
804 if oldstat.stat:
799 _avoidambig(self._origfh.name, oldstat)
805 _avoidambig(self._origfh.name, oldstat)
800
806
801 def __exit__(self, exc_type, exc_value, exc_tb):
807 def __exit__(self, exc_type, exc_value, exc_tb):
802 self._origfh.__exit__(exc_type, exc_value, exc_tb)
808 self._origfh.__exit__(exc_type, exc_value, exc_tb)
803 self._checkambig()
809 self._checkambig()
804
810
805 def close(self):
811 def close(self):
806 self._origfh.close()
812 self._origfh.close()
807 self._checkambig()
813 self._checkambig()
General Comments 0
You need to be logged in to leave comments. Login now