##// END OF EJS Templates
vfs: fix proxyvfs inheritance...
Boris Feld -
r41125:6498f0e0 default
parent child Browse files
Show More
@@ -1,629 +1,629 b''
1 # store.py - repository store handling for Mercurial
1 # store.py - repository store handling for Mercurial
2 #
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from . import (
16 from . import (
17 error,
17 error,
18 node,
18 node,
19 policy,
19 policy,
20 pycompat,
20 pycompat,
21 util,
21 util,
22 vfs as vfsmod,
22 vfs as vfsmod,
23 )
23 )
24
24
25 parsers = policy.importmod(r'parsers')
25 parsers = policy.importmod(r'parsers')
26
26
27 def _matchtrackedpath(path, matcher):
27 def _matchtrackedpath(path, matcher):
28 """parses a fncache entry and returns whether the entry is tracking a path
28 """parses a fncache entry and returns whether the entry is tracking a path
29 matched by matcher or not.
29 matched by matcher or not.
30
30
31 If matcher is None, returns True"""
31 If matcher is None, returns True"""
32
32
33 if matcher is None:
33 if matcher is None:
34 return True
34 return True
35 path = decodedir(path)
35 path = decodedir(path)
36 if path.startswith('data/'):
36 if path.startswith('data/'):
37 return matcher(path[len('data/'):-len('.i')])
37 return matcher(path[len('data/'):-len('.i')])
38 elif path.startswith('meta/'):
38 elif path.startswith('meta/'):
39 return matcher.visitdir(path[len('meta/'):-len('/00manifest.i')] or '.')
39 return matcher.visitdir(path[len('meta/'):-len('/00manifest.i')] or '.')
40
40
41 raise error.ProgrammingError("cannot decode path %s" % path)
41 raise error.ProgrammingError("cannot decode path %s" % path)
42
42
43 # This avoids a collision between a file named foo and a dir named
43 # This avoids a collision between a file named foo and a dir named
44 # foo.i or foo.d
44 # foo.i or foo.d
45 def _encodedir(path):
45 def _encodedir(path):
46 '''
46 '''
47 >>> _encodedir(b'data/foo.i')
47 >>> _encodedir(b'data/foo.i')
48 'data/foo.i'
48 'data/foo.i'
49 >>> _encodedir(b'data/foo.i/bla.i')
49 >>> _encodedir(b'data/foo.i/bla.i')
50 'data/foo.i.hg/bla.i'
50 'data/foo.i.hg/bla.i'
51 >>> _encodedir(b'data/foo.i.hg/bla.i')
51 >>> _encodedir(b'data/foo.i.hg/bla.i')
52 'data/foo.i.hg.hg/bla.i'
52 'data/foo.i.hg.hg/bla.i'
53 >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
53 >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
54 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
54 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
55 '''
55 '''
56 return (path
56 return (path
57 .replace(".hg/", ".hg.hg/")
57 .replace(".hg/", ".hg.hg/")
58 .replace(".i/", ".i.hg/")
58 .replace(".i/", ".i.hg/")
59 .replace(".d/", ".d.hg/"))
59 .replace(".d/", ".d.hg/"))
60
60
61 encodedir = getattr(parsers, 'encodedir', _encodedir)
61 encodedir = getattr(parsers, 'encodedir', _encodedir)
62
62
63 def decodedir(path):
63 def decodedir(path):
64 '''
64 '''
65 >>> decodedir(b'data/foo.i')
65 >>> decodedir(b'data/foo.i')
66 'data/foo.i'
66 'data/foo.i'
67 >>> decodedir(b'data/foo.i.hg/bla.i')
67 >>> decodedir(b'data/foo.i.hg/bla.i')
68 'data/foo.i/bla.i'
68 'data/foo.i/bla.i'
69 >>> decodedir(b'data/foo.i.hg.hg/bla.i')
69 >>> decodedir(b'data/foo.i.hg.hg/bla.i')
70 'data/foo.i.hg/bla.i'
70 'data/foo.i.hg/bla.i'
71 '''
71 '''
72 if ".hg/" not in path:
72 if ".hg/" not in path:
73 return path
73 return path
74 return (path
74 return (path
75 .replace(".d.hg/", ".d/")
75 .replace(".d.hg/", ".d/")
76 .replace(".i.hg/", ".i/")
76 .replace(".i.hg/", ".i/")
77 .replace(".hg.hg/", ".hg/"))
77 .replace(".hg.hg/", ".hg/"))
78
78
79 def _reserved():
79 def _reserved():
80 ''' characters that are problematic for filesystems
80 ''' characters that are problematic for filesystems
81
81
82 * ascii escapes (0..31)
82 * ascii escapes (0..31)
83 * ascii hi (126..255)
83 * ascii hi (126..255)
84 * windows specials
84 * windows specials
85
85
86 these characters will be escaped by encodefunctions
86 these characters will be escaped by encodefunctions
87 '''
87 '''
88 winreserved = [ord(x) for x in u'\\:*?"<>|']
88 winreserved = [ord(x) for x in u'\\:*?"<>|']
89 for x in range(32):
89 for x in range(32):
90 yield x
90 yield x
91 for x in range(126, 256):
91 for x in range(126, 256):
92 yield x
92 yield x
93 for x in winreserved:
93 for x in winreserved:
94 yield x
94 yield x
95
95
96 def _buildencodefun():
96 def _buildencodefun():
97 '''
97 '''
98 >>> enc, dec = _buildencodefun()
98 >>> enc, dec = _buildencodefun()
99
99
100 >>> enc(b'nothing/special.txt')
100 >>> enc(b'nothing/special.txt')
101 'nothing/special.txt'
101 'nothing/special.txt'
102 >>> dec(b'nothing/special.txt')
102 >>> dec(b'nothing/special.txt')
103 'nothing/special.txt'
103 'nothing/special.txt'
104
104
105 >>> enc(b'HELLO')
105 >>> enc(b'HELLO')
106 '_h_e_l_l_o'
106 '_h_e_l_l_o'
107 >>> dec(b'_h_e_l_l_o')
107 >>> dec(b'_h_e_l_l_o')
108 'HELLO'
108 'HELLO'
109
109
110 >>> enc(b'hello:world?')
110 >>> enc(b'hello:world?')
111 'hello~3aworld~3f'
111 'hello~3aworld~3f'
112 >>> dec(b'hello~3aworld~3f')
112 >>> dec(b'hello~3aworld~3f')
113 'hello:world?'
113 'hello:world?'
114
114
115 >>> enc(b'the\\x07quick\\xADshot')
115 >>> enc(b'the\\x07quick\\xADshot')
116 'the~07quick~adshot'
116 'the~07quick~adshot'
117 >>> dec(b'the~07quick~adshot')
117 >>> dec(b'the~07quick~adshot')
118 'the\\x07quick\\xadshot'
118 'the\\x07quick\\xadshot'
119 '''
119 '''
120 e = '_'
120 e = '_'
121 xchr = pycompat.bytechr
121 xchr = pycompat.bytechr
122 asciistr = list(map(xchr, range(127)))
122 asciistr = list(map(xchr, range(127)))
123 capitals = list(range(ord("A"), ord("Z") + 1))
123 capitals = list(range(ord("A"), ord("Z") + 1))
124
124
125 cmap = dict((x, x) for x in asciistr)
125 cmap = dict((x, x) for x in asciistr)
126 for x in _reserved():
126 for x in _reserved():
127 cmap[xchr(x)] = "~%02x" % x
127 cmap[xchr(x)] = "~%02x" % x
128 for x in capitals + [ord(e)]:
128 for x in capitals + [ord(e)]:
129 cmap[xchr(x)] = e + xchr(x).lower()
129 cmap[xchr(x)] = e + xchr(x).lower()
130
130
131 dmap = {}
131 dmap = {}
132 for k, v in cmap.iteritems():
132 for k, v in cmap.iteritems():
133 dmap[v] = k
133 dmap[v] = k
134 def decode(s):
134 def decode(s):
135 i = 0
135 i = 0
136 while i < len(s):
136 while i < len(s):
137 for l in pycompat.xrange(1, 4):
137 for l in pycompat.xrange(1, 4):
138 try:
138 try:
139 yield dmap[s[i:i + l]]
139 yield dmap[s[i:i + l]]
140 i += l
140 i += l
141 break
141 break
142 except KeyError:
142 except KeyError:
143 pass
143 pass
144 else:
144 else:
145 raise KeyError
145 raise KeyError
146 return (lambda s: ''.join([cmap[s[c:c + 1]]
146 return (lambda s: ''.join([cmap[s[c:c + 1]]
147 for c in pycompat.xrange(len(s))]),
147 for c in pycompat.xrange(len(s))]),
148 lambda s: ''.join(list(decode(s))))
148 lambda s: ''.join(list(decode(s))))
149
149
150 _encodefname, _decodefname = _buildencodefun()
150 _encodefname, _decodefname = _buildencodefun()
151
151
152 def encodefilename(s):
152 def encodefilename(s):
153 '''
153 '''
154 >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO')
154 >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO')
155 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
155 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
156 '''
156 '''
157 return _encodefname(encodedir(s))
157 return _encodefname(encodedir(s))
158
158
159 def decodefilename(s):
159 def decodefilename(s):
160 '''
160 '''
161 >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
161 >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
162 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
162 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
163 '''
163 '''
164 return decodedir(_decodefname(s))
164 return decodedir(_decodefname(s))
165
165
166 def _buildlowerencodefun():
166 def _buildlowerencodefun():
167 '''
167 '''
168 >>> f = _buildlowerencodefun()
168 >>> f = _buildlowerencodefun()
169 >>> f(b'nothing/special.txt')
169 >>> f(b'nothing/special.txt')
170 'nothing/special.txt'
170 'nothing/special.txt'
171 >>> f(b'HELLO')
171 >>> f(b'HELLO')
172 'hello'
172 'hello'
173 >>> f(b'hello:world?')
173 >>> f(b'hello:world?')
174 'hello~3aworld~3f'
174 'hello~3aworld~3f'
175 >>> f(b'the\\x07quick\\xADshot')
175 >>> f(b'the\\x07quick\\xADshot')
176 'the~07quick~adshot'
176 'the~07quick~adshot'
177 '''
177 '''
178 xchr = pycompat.bytechr
178 xchr = pycompat.bytechr
179 cmap = dict([(xchr(x), xchr(x)) for x in pycompat.xrange(127)])
179 cmap = dict([(xchr(x), xchr(x)) for x in pycompat.xrange(127)])
180 for x in _reserved():
180 for x in _reserved():
181 cmap[xchr(x)] = "~%02x" % x
181 cmap[xchr(x)] = "~%02x" % x
182 for x in range(ord("A"), ord("Z") + 1):
182 for x in range(ord("A"), ord("Z") + 1):
183 cmap[xchr(x)] = xchr(x).lower()
183 cmap[xchr(x)] = xchr(x).lower()
184 def lowerencode(s):
184 def lowerencode(s):
185 return "".join([cmap[c] for c in pycompat.iterbytestr(s)])
185 return "".join([cmap[c] for c in pycompat.iterbytestr(s)])
186 return lowerencode
186 return lowerencode
187
187
188 lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
188 lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
189
189
190 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
190 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
191 _winres3 = ('aux', 'con', 'prn', 'nul') # length 3
191 _winres3 = ('aux', 'con', 'prn', 'nul') # length 3
192 _winres4 = ('com', 'lpt') # length 4 (with trailing 1..9)
192 _winres4 = ('com', 'lpt') # length 4 (with trailing 1..9)
193 def _auxencode(path, dotencode):
193 def _auxencode(path, dotencode):
194 '''
194 '''
195 Encodes filenames containing names reserved by Windows or which end in
195 Encodes filenames containing names reserved by Windows or which end in
196 period or space. Does not touch other single reserved characters c.
196 period or space. Does not touch other single reserved characters c.
197 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
197 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
198 Additionally encodes space or period at the beginning, if dotencode is
198 Additionally encodes space or period at the beginning, if dotencode is
199 True. Parameter path is assumed to be all lowercase.
199 True. Parameter path is assumed to be all lowercase.
200 A segment only needs encoding if a reserved name appears as a
200 A segment only needs encoding if a reserved name appears as a
201 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
201 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
202 doesn't need encoding.
202 doesn't need encoding.
203
203
204 >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.'
204 >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.'
205 >>> _auxencode(s.split(b'/'), True)
205 >>> _auxencode(s.split(b'/'), True)
206 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
206 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
207 >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
207 >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
208 >>> _auxencode(s.split(b'/'), False)
208 >>> _auxencode(s.split(b'/'), False)
209 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
209 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
210 >>> _auxencode([b'foo. '], True)
210 >>> _auxencode([b'foo. '], True)
211 ['foo.~20']
211 ['foo.~20']
212 >>> _auxencode([b' .foo'], True)
212 >>> _auxencode([b' .foo'], True)
213 ['~20.foo']
213 ['~20.foo']
214 '''
214 '''
215 for i, n in enumerate(path):
215 for i, n in enumerate(path):
216 if not n:
216 if not n:
217 continue
217 continue
218 if dotencode and n[0] in '. ':
218 if dotencode and n[0] in '. ':
219 n = "~%02x" % ord(n[0:1]) + n[1:]
219 n = "~%02x" % ord(n[0:1]) + n[1:]
220 path[i] = n
220 path[i] = n
221 else:
221 else:
222 l = n.find('.')
222 l = n.find('.')
223 if l == -1:
223 if l == -1:
224 l = len(n)
224 l = len(n)
225 if ((l == 3 and n[:3] in _winres3) or
225 if ((l == 3 and n[:3] in _winres3) or
226 (l == 4 and n[3:4] <= '9' and n[3:4] >= '1'
226 (l == 4 and n[3:4] <= '9' and n[3:4] >= '1'
227 and n[:3] in _winres4)):
227 and n[:3] in _winres4)):
228 # encode third letter ('aux' -> 'au~78')
228 # encode third letter ('aux' -> 'au~78')
229 ec = "~%02x" % ord(n[2:3])
229 ec = "~%02x" % ord(n[2:3])
230 n = n[0:2] + ec + n[3:]
230 n = n[0:2] + ec + n[3:]
231 path[i] = n
231 path[i] = n
232 if n[-1] in '. ':
232 if n[-1] in '. ':
233 # encode last period or space ('foo...' -> 'foo..~2e')
233 # encode last period or space ('foo...' -> 'foo..~2e')
234 path[i] = n[:-1] + "~%02x" % ord(n[-1:])
234 path[i] = n[:-1] + "~%02x" % ord(n[-1:])
235 return path
235 return path
236
236
237 _maxstorepathlen = 120
237 _maxstorepathlen = 120
238 _dirprefixlen = 8
238 _dirprefixlen = 8
239 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
239 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
240
240
241 def _hashencode(path, dotencode):
241 def _hashencode(path, dotencode):
242 digest = node.hex(hashlib.sha1(path).digest())
242 digest = node.hex(hashlib.sha1(path).digest())
243 le = lowerencode(path[5:]).split('/') # skips prefix 'data/' or 'meta/'
243 le = lowerencode(path[5:]).split('/') # skips prefix 'data/' or 'meta/'
244 parts = _auxencode(le, dotencode)
244 parts = _auxencode(le, dotencode)
245 basename = parts[-1]
245 basename = parts[-1]
246 _root, ext = os.path.splitext(basename)
246 _root, ext = os.path.splitext(basename)
247 sdirs = []
247 sdirs = []
248 sdirslen = 0
248 sdirslen = 0
249 for p in parts[:-1]:
249 for p in parts[:-1]:
250 d = p[:_dirprefixlen]
250 d = p[:_dirprefixlen]
251 if d[-1] in '. ':
251 if d[-1] in '. ':
252 # Windows can't access dirs ending in period or space
252 # Windows can't access dirs ending in period or space
253 d = d[:-1] + '_'
253 d = d[:-1] + '_'
254 if sdirslen == 0:
254 if sdirslen == 0:
255 t = len(d)
255 t = len(d)
256 else:
256 else:
257 t = sdirslen + 1 + len(d)
257 t = sdirslen + 1 + len(d)
258 if t > _maxshortdirslen:
258 if t > _maxshortdirslen:
259 break
259 break
260 sdirs.append(d)
260 sdirs.append(d)
261 sdirslen = t
261 sdirslen = t
262 dirs = '/'.join(sdirs)
262 dirs = '/'.join(sdirs)
263 if len(dirs) > 0:
263 if len(dirs) > 0:
264 dirs += '/'
264 dirs += '/'
265 res = 'dh/' + dirs + digest + ext
265 res = 'dh/' + dirs + digest + ext
266 spaceleft = _maxstorepathlen - len(res)
266 spaceleft = _maxstorepathlen - len(res)
267 if spaceleft > 0:
267 if spaceleft > 0:
268 filler = basename[:spaceleft]
268 filler = basename[:spaceleft]
269 res = 'dh/' + dirs + filler + digest + ext
269 res = 'dh/' + dirs + filler + digest + ext
270 return res
270 return res
271
271
272 def _hybridencode(path, dotencode):
272 def _hybridencode(path, dotencode):
273 '''encodes path with a length limit
273 '''encodes path with a length limit
274
274
275 Encodes all paths that begin with 'data/', according to the following.
275 Encodes all paths that begin with 'data/', according to the following.
276
276
277 Default encoding (reversible):
277 Default encoding (reversible):
278
278
279 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
279 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
280 characters are encoded as '~xx', where xx is the two digit hex code
280 characters are encoded as '~xx', where xx is the two digit hex code
281 of the character (see encodefilename).
281 of the character (see encodefilename).
282 Relevant path components consisting of Windows reserved filenames are
282 Relevant path components consisting of Windows reserved filenames are
283 masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
283 masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
284
284
285 Hashed encoding (not reversible):
285 Hashed encoding (not reversible):
286
286
287 If the default-encoded path is longer than _maxstorepathlen, a
287 If the default-encoded path is longer than _maxstorepathlen, a
288 non-reversible hybrid hashing of the path is done instead.
288 non-reversible hybrid hashing of the path is done instead.
289 This encoding uses up to _dirprefixlen characters of all directory
289 This encoding uses up to _dirprefixlen characters of all directory
290 levels of the lowerencoded path, but not more levels than can fit into
290 levels of the lowerencoded path, but not more levels than can fit into
291 _maxshortdirslen.
291 _maxshortdirslen.
292 Then follows the filler followed by the sha digest of the full path.
292 Then follows the filler followed by the sha digest of the full path.
293 The filler is the beginning of the basename of the lowerencoded path
293 The filler is the beginning of the basename of the lowerencoded path
294 (the basename is everything after the last path separator). The filler
294 (the basename is everything after the last path separator). The filler
295 is as long as possible, filling in characters from the basename until
295 is as long as possible, filling in characters from the basename until
296 the encoded path has _maxstorepathlen characters (or all chars of the
296 the encoded path has _maxstorepathlen characters (or all chars of the
297 basename have been taken).
297 basename have been taken).
298 The extension (e.g. '.i' or '.d') is preserved.
298 The extension (e.g. '.i' or '.d') is preserved.
299
299
300 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
300 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
301 encoding was used.
301 encoding was used.
302 '''
302 '''
303 path = encodedir(path)
303 path = encodedir(path)
304 ef = _encodefname(path).split('/')
304 ef = _encodefname(path).split('/')
305 res = '/'.join(_auxencode(ef, dotencode))
305 res = '/'.join(_auxencode(ef, dotencode))
306 if len(res) > _maxstorepathlen:
306 if len(res) > _maxstorepathlen:
307 res = _hashencode(path, dotencode)
307 res = _hashencode(path, dotencode)
308 return res
308 return res
309
309
310 def _pathencode(path):
310 def _pathencode(path):
311 de = encodedir(path)
311 de = encodedir(path)
312 if len(path) > _maxstorepathlen:
312 if len(path) > _maxstorepathlen:
313 return _hashencode(de, True)
313 return _hashencode(de, True)
314 ef = _encodefname(de).split('/')
314 ef = _encodefname(de).split('/')
315 res = '/'.join(_auxencode(ef, True))
315 res = '/'.join(_auxencode(ef, True))
316 if len(res) > _maxstorepathlen:
316 if len(res) > _maxstorepathlen:
317 return _hashencode(de, True)
317 return _hashencode(de, True)
318 return res
318 return res
319
319
320 _pathencode = getattr(parsers, 'pathencode', _pathencode)
320 _pathencode = getattr(parsers, 'pathencode', _pathencode)
321
321
322 def _plainhybridencode(f):
322 def _plainhybridencode(f):
323 return _hybridencode(f, False)
323 return _hybridencode(f, False)
324
324
325 def _calcmode(vfs):
325 def _calcmode(vfs):
326 try:
326 try:
327 # files in .hg/ will be created using this mode
327 # files in .hg/ will be created using this mode
328 mode = vfs.stat().st_mode
328 mode = vfs.stat().st_mode
329 # avoid some useless chmods
329 # avoid some useless chmods
330 if (0o777 & ~util.umask) == (0o777 & mode):
330 if (0o777 & ~util.umask) == (0o777 & mode):
331 mode = None
331 mode = None
332 except OSError:
332 except OSError:
333 mode = None
333 mode = None
334 return mode
334 return mode
335
335
336 _data = ('narrowspec data meta 00manifest.d 00manifest.i'
336 _data = ('narrowspec data meta 00manifest.d 00manifest.i'
337 ' 00changelog.d 00changelog.i phaseroots obsstore')
337 ' 00changelog.d 00changelog.i phaseroots obsstore')
338
338
339 def isrevlog(f, kind, st):
339 def isrevlog(f, kind, st):
340 return kind == stat.S_IFREG and f[-2:] in ('.i', '.d')
340 return kind == stat.S_IFREG and f[-2:] in ('.i', '.d')
341
341
342 class basicstore(object):
342 class basicstore(object):
343 '''base class for local repository stores'''
343 '''base class for local repository stores'''
344 def __init__(self, path, vfstype):
344 def __init__(self, path, vfstype):
345 vfs = vfstype(path)
345 vfs = vfstype(path)
346 self.path = vfs.base
346 self.path = vfs.base
347 self.createmode = _calcmode(vfs)
347 self.createmode = _calcmode(vfs)
348 vfs.createmode = self.createmode
348 vfs.createmode = self.createmode
349 self.rawvfs = vfs
349 self.rawvfs = vfs
350 self.vfs = vfsmod.filtervfs(vfs, encodedir)
350 self.vfs = vfsmod.filtervfs(vfs, encodedir)
351 self.opener = self.vfs
351 self.opener = self.vfs
352
352
353 def join(self, f):
353 def join(self, f):
354 return self.path + '/' + encodedir(f)
354 return self.path + '/' + encodedir(f)
355
355
356 def _walk(self, relpath, recurse, filefilter=isrevlog):
356 def _walk(self, relpath, recurse, filefilter=isrevlog):
357 '''yields (unencoded, encoded, size)'''
357 '''yields (unencoded, encoded, size)'''
358 path = self.path
358 path = self.path
359 if relpath:
359 if relpath:
360 path += '/' + relpath
360 path += '/' + relpath
361 striplen = len(self.path) + 1
361 striplen = len(self.path) + 1
362 l = []
362 l = []
363 if self.rawvfs.isdir(path):
363 if self.rawvfs.isdir(path):
364 visit = [path]
364 visit = [path]
365 readdir = self.rawvfs.readdir
365 readdir = self.rawvfs.readdir
366 while visit:
366 while visit:
367 p = visit.pop()
367 p = visit.pop()
368 for f, kind, st in readdir(p, stat=True):
368 for f, kind, st in readdir(p, stat=True):
369 fp = p + '/' + f
369 fp = p + '/' + f
370 if filefilter(f, kind, st):
370 if filefilter(f, kind, st):
371 n = util.pconvert(fp[striplen:])
371 n = util.pconvert(fp[striplen:])
372 l.append((decodedir(n), n, st.st_size))
372 l.append((decodedir(n), n, st.st_size))
373 elif kind == stat.S_IFDIR and recurse:
373 elif kind == stat.S_IFDIR and recurse:
374 visit.append(fp)
374 visit.append(fp)
375 l.sort()
375 l.sort()
376 return l
376 return l
377
377
378 def datafiles(self, matcher=None):
378 def datafiles(self, matcher=None):
379 return self._walk('data', True) + self._walk('meta', True)
379 return self._walk('data', True) + self._walk('meta', True)
380
380
381 def topfiles(self):
381 def topfiles(self):
382 # yield manifest before changelog
382 # yield manifest before changelog
383 return reversed(self._walk('', False))
383 return reversed(self._walk('', False))
384
384
385 def walk(self, matcher=None):
385 def walk(self, matcher=None):
386 '''yields (unencoded, encoded, size)
386 '''yields (unencoded, encoded, size)
387
387
388 if a matcher is passed, storage files of only those tracked paths
388 if a matcher is passed, storage files of only those tracked paths
389 are passed with matches the matcher
389 are passed with matches the matcher
390 '''
390 '''
391 # yield data files first
391 # yield data files first
392 for x in self.datafiles(matcher):
392 for x in self.datafiles(matcher):
393 yield x
393 yield x
394 for x in self.topfiles():
394 for x in self.topfiles():
395 yield x
395 yield x
396
396
397 def copylist(self):
397 def copylist(self):
398 return ['requires'] + _data.split()
398 return ['requires'] + _data.split()
399
399
400 def write(self, tr):
400 def write(self, tr):
401 pass
401 pass
402
402
403 def invalidatecaches(self):
403 def invalidatecaches(self):
404 pass
404 pass
405
405
406 def markremoved(self, fn):
406 def markremoved(self, fn):
407 pass
407 pass
408
408
409 def __contains__(self, path):
409 def __contains__(self, path):
410 '''Checks if the store contains path'''
410 '''Checks if the store contains path'''
411 path = "/".join(("data", path))
411 path = "/".join(("data", path))
412 # file?
412 # file?
413 if self.vfs.exists(path + ".i"):
413 if self.vfs.exists(path + ".i"):
414 return True
414 return True
415 # dir?
415 # dir?
416 if not path.endswith("/"):
416 if not path.endswith("/"):
417 path = path + "/"
417 path = path + "/"
418 return self.vfs.exists(path)
418 return self.vfs.exists(path)
419
419
420 class encodedstore(basicstore):
420 class encodedstore(basicstore):
421 def __init__(self, path, vfstype):
421 def __init__(self, path, vfstype):
422 vfs = vfstype(path + '/store')
422 vfs = vfstype(path + '/store')
423 self.path = vfs.base
423 self.path = vfs.base
424 self.createmode = _calcmode(vfs)
424 self.createmode = _calcmode(vfs)
425 vfs.createmode = self.createmode
425 vfs.createmode = self.createmode
426 self.rawvfs = vfs
426 self.rawvfs = vfs
427 self.vfs = vfsmod.filtervfs(vfs, encodefilename)
427 self.vfs = vfsmod.filtervfs(vfs, encodefilename)
428 self.opener = self.vfs
428 self.opener = self.vfs
429
429
430 def datafiles(self, matcher=None):
430 def datafiles(self, matcher=None):
431 for a, b, size in super(encodedstore, self).datafiles():
431 for a, b, size in super(encodedstore, self).datafiles():
432 try:
432 try:
433 a = decodefilename(a)
433 a = decodefilename(a)
434 except KeyError:
434 except KeyError:
435 a = None
435 a = None
436 if a is not None and not _matchtrackedpath(a, matcher):
436 if a is not None and not _matchtrackedpath(a, matcher):
437 continue
437 continue
438 yield a, b, size
438 yield a, b, size
439
439
440 def join(self, f):
440 def join(self, f):
441 return self.path + '/' + encodefilename(f)
441 return self.path + '/' + encodefilename(f)
442
442
443 def copylist(self):
443 def copylist(self):
444 return (['requires', '00changelog.i'] +
444 return (['requires', '00changelog.i'] +
445 ['store/' + f for f in _data.split()])
445 ['store/' + f for f in _data.split()])
446
446
447 class fncache(object):
447 class fncache(object):
448 # the filename used to be partially encoded
448 # the filename used to be partially encoded
449 # hence the encodedir/decodedir dance
449 # hence the encodedir/decodedir dance
450 def __init__(self, vfs):
450 def __init__(self, vfs):
451 self.vfs = vfs
451 self.vfs = vfs
452 self.entries = None
452 self.entries = None
453 self._dirty = False
453 self._dirty = False
454 # set of new additions to fncache
454 # set of new additions to fncache
455 self.addls = set()
455 self.addls = set()
456
456
457 def _load(self):
457 def _load(self):
458 '''fill the entries from the fncache file'''
458 '''fill the entries from the fncache file'''
459 self._dirty = False
459 self._dirty = False
460 try:
460 try:
461 fp = self.vfs('fncache', mode='rb')
461 fp = self.vfs('fncache', mode='rb')
462 except IOError:
462 except IOError:
463 # skip nonexistent file
463 # skip nonexistent file
464 self.entries = set()
464 self.entries = set()
465 return
465 return
466 self.entries = set(decodedir(fp.read()).splitlines())
466 self.entries = set(decodedir(fp.read()).splitlines())
467 if '' in self.entries:
467 if '' in self.entries:
468 fp.seek(0)
468 fp.seek(0)
469 for n, line in enumerate(util.iterfile(fp)):
469 for n, line in enumerate(util.iterfile(fp)):
470 if not line.rstrip('\n'):
470 if not line.rstrip('\n'):
471 t = _('invalid entry in fncache, line %d') % (n + 1)
471 t = _('invalid entry in fncache, line %d') % (n + 1)
472 raise error.Abort(t)
472 raise error.Abort(t)
473 fp.close()
473 fp.close()
474
474
475 def write(self, tr):
475 def write(self, tr):
476 if self._dirty:
476 if self._dirty:
477 assert self.entries is not None
477 assert self.entries is not None
478 self.entries = self.entries | self.addls
478 self.entries = self.entries | self.addls
479 self.addls = set()
479 self.addls = set()
480 tr.addbackup('fncache')
480 tr.addbackup('fncache')
481 fp = self.vfs('fncache', mode='wb', atomictemp=True)
481 fp = self.vfs('fncache', mode='wb', atomictemp=True)
482 if self.entries:
482 if self.entries:
483 fp.write(encodedir('\n'.join(self.entries) + '\n'))
483 fp.write(encodedir('\n'.join(self.entries) + '\n'))
484 fp.close()
484 fp.close()
485 self._dirty = False
485 self._dirty = False
486 if self.addls:
486 if self.addls:
487 # if we have just new entries, let's append them to the fncache
487 # if we have just new entries, let's append them to the fncache
488 tr.addbackup('fncache')
488 tr.addbackup('fncache')
489 fp = self.vfs('fncache', mode='ab', atomictemp=True)
489 fp = self.vfs('fncache', mode='ab', atomictemp=True)
490 if self.addls:
490 if self.addls:
491 fp.write(encodedir('\n'.join(self.addls) + '\n'))
491 fp.write(encodedir('\n'.join(self.addls) + '\n'))
492 fp.close()
492 fp.close()
493 self.entries = None
493 self.entries = None
494 self.addls = set()
494 self.addls = set()
495
495
496 def add(self, fn):
496 def add(self, fn):
497 if self.entries is None:
497 if self.entries is None:
498 self._load()
498 self._load()
499 if fn not in self.entries:
499 if fn not in self.entries:
500 self.addls.add(fn)
500 self.addls.add(fn)
501
501
502 def remove(self, fn):
502 def remove(self, fn):
503 if self.entries is None:
503 if self.entries is None:
504 self._load()
504 self._load()
505 if fn in self.addls:
505 if fn in self.addls:
506 self.addls.remove(fn)
506 self.addls.remove(fn)
507 return
507 return
508 try:
508 try:
509 self.entries.remove(fn)
509 self.entries.remove(fn)
510 self._dirty = True
510 self._dirty = True
511 except KeyError:
511 except KeyError:
512 pass
512 pass
513
513
514 def __contains__(self, fn):
514 def __contains__(self, fn):
515 if fn in self.addls:
515 if fn in self.addls:
516 return True
516 return True
517 if self.entries is None:
517 if self.entries is None:
518 self._load()
518 self._load()
519 return fn in self.entries
519 return fn in self.entries
520
520
521 def __iter__(self):
521 def __iter__(self):
522 if self.entries is None:
522 if self.entries is None:
523 self._load()
523 self._load()
524 return iter(self.entries | self.addls)
524 return iter(self.entries | self.addls)
525
525
526 class _fncachevfs(vfsmod.abstractvfs, vfsmod.proxyvfs):
526 class _fncachevfs(vfsmod.proxyvfs):
527 def __init__(self, vfs, fnc, encode):
527 def __init__(self, vfs, fnc, encode):
528 vfsmod.proxyvfs.__init__(self, vfs)
528 vfsmod.proxyvfs.__init__(self, vfs)
529 self.fncache = fnc
529 self.fncache = fnc
530 self.encode = encode
530 self.encode = encode
531
531
532 def __call__(self, path, mode='r', *args, **kw):
532 def __call__(self, path, mode='r', *args, **kw):
533 encoded = self.encode(path)
533 encoded = self.encode(path)
534 if mode not in ('r', 'rb') and (path.startswith('data/') or
534 if mode not in ('r', 'rb') and (path.startswith('data/') or
535 path.startswith('meta/')):
535 path.startswith('meta/')):
536 # do not trigger a fncache load when adding a file that already is
536 # do not trigger a fncache load when adding a file that already is
537 # known to exist.
537 # known to exist.
538 notload = self.fncache.entries is None and self.vfs.exists(encoded)
538 notload = self.fncache.entries is None and self.vfs.exists(encoded)
539 if notload and 'a' in mode and not self.vfs.stat(encoded).st_size:
539 if notload and 'a' in mode and not self.vfs.stat(encoded).st_size:
540 # when appending to an existing file, if the file has size zero,
540 # when appending to an existing file, if the file has size zero,
541 # it should be considered as missing. Such zero-size files are
541 # it should be considered as missing. Such zero-size files are
542 # the result of truncation when a transaction is aborted.
542 # the result of truncation when a transaction is aborted.
543 notload = False
543 notload = False
544 if not notload:
544 if not notload:
545 self.fncache.add(path)
545 self.fncache.add(path)
546 return self.vfs(encoded, mode, *args, **kw)
546 return self.vfs(encoded, mode, *args, **kw)
547
547
548 def join(self, path):
548 def join(self, path):
549 if path:
549 if path:
550 return self.vfs.join(self.encode(path))
550 return self.vfs.join(self.encode(path))
551 else:
551 else:
552 return self.vfs.join(path)
552 return self.vfs.join(path)
553
553
554 class fncachestore(basicstore):
554 class fncachestore(basicstore):
555 def __init__(self, path, vfstype, dotencode):
555 def __init__(self, path, vfstype, dotencode):
556 if dotencode:
556 if dotencode:
557 encode = _pathencode
557 encode = _pathencode
558 else:
558 else:
559 encode = _plainhybridencode
559 encode = _plainhybridencode
560 self.encode = encode
560 self.encode = encode
561 vfs = vfstype(path + '/store')
561 vfs = vfstype(path + '/store')
562 self.path = vfs.base
562 self.path = vfs.base
563 self.pathsep = self.path + '/'
563 self.pathsep = self.path + '/'
564 self.createmode = _calcmode(vfs)
564 self.createmode = _calcmode(vfs)
565 vfs.createmode = self.createmode
565 vfs.createmode = self.createmode
566 self.rawvfs = vfs
566 self.rawvfs = vfs
567 fnc = fncache(vfs)
567 fnc = fncache(vfs)
568 self.fncache = fnc
568 self.fncache = fnc
569 self.vfs = _fncachevfs(vfs, fnc, encode)
569 self.vfs = _fncachevfs(vfs, fnc, encode)
570 self.opener = self.vfs
570 self.opener = self.vfs
571
571
572 def join(self, f):
572 def join(self, f):
573 return self.pathsep + self.encode(f)
573 return self.pathsep + self.encode(f)
574
574
575 def getsize(self, path):
575 def getsize(self, path):
576 return self.rawvfs.stat(path).st_size
576 return self.rawvfs.stat(path).st_size
577
577
578 def datafiles(self, matcher=None):
578 def datafiles(self, matcher=None):
579 for f in sorted(self.fncache):
579 for f in sorted(self.fncache):
580 if not _matchtrackedpath(f, matcher):
580 if not _matchtrackedpath(f, matcher):
581 continue
581 continue
582 ef = self.encode(f)
582 ef = self.encode(f)
583 try:
583 try:
584 yield f, ef, self.getsize(ef)
584 yield f, ef, self.getsize(ef)
585 except OSError as err:
585 except OSError as err:
586 if err.errno != errno.ENOENT:
586 if err.errno != errno.ENOENT:
587 raise
587 raise
588
588
589 def copylist(self):
589 def copylist(self):
590 d = ('narrowspec data meta dh fncache phaseroots obsstore'
590 d = ('narrowspec data meta dh fncache phaseroots obsstore'
591 ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i')
591 ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i')
592 return (['requires', '00changelog.i'] +
592 return (['requires', '00changelog.i'] +
593 ['store/' + f for f in d.split()])
593 ['store/' + f for f in d.split()])
594
594
595 def write(self, tr):
595 def write(self, tr):
596 self.fncache.write(tr)
596 self.fncache.write(tr)
597
597
598 def invalidatecaches(self):
598 def invalidatecaches(self):
599 self.fncache.entries = None
599 self.fncache.entries = None
600 self.fncache.addls = set()
600 self.fncache.addls = set()
601
601
602 def markremoved(self, fn):
602 def markremoved(self, fn):
603 self.fncache.remove(fn)
603 self.fncache.remove(fn)
604
604
605 def _exists(self, f):
605 def _exists(self, f):
606 ef = self.encode(f)
606 ef = self.encode(f)
607 try:
607 try:
608 self.getsize(ef)
608 self.getsize(ef)
609 return True
609 return True
610 except OSError as err:
610 except OSError as err:
611 if err.errno != errno.ENOENT:
611 if err.errno != errno.ENOENT:
612 raise
612 raise
613 # nonexistent entry
613 # nonexistent entry
614 return False
614 return False
615
615
616 def __contains__(self, path):
616 def __contains__(self, path):
617 '''Checks if the store contains path'''
617 '''Checks if the store contains path'''
618 path = "/".join(("data", path))
618 path = "/".join(("data", path))
619 # check for files (exact match)
619 # check for files (exact match)
620 e = path + '.i'
620 e = path + '.i'
621 if e in self.fncache and self._exists(e):
621 if e in self.fncache and self._exists(e):
622 return True
622 return True
623 # now check for directories (prefix match)
623 # now check for directories (prefix match)
624 if not path.endswith('/'):
624 if not path.endswith('/'):
625 path += '/'
625 path += '/'
626 for e in self.fncache:
626 for e in self.fncache:
627 if e.startswith(path) and self._exists(e):
627 if e.startswith(path) and self._exists(e):
628 return True
628 return True
629 return False
629 return False
@@ -1,671 +1,671 b''
1 # vfs.py - Mercurial 'vfs' classes
1 # vfs.py - Mercurial 'vfs' classes
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 import contextlib
9 import contextlib
10 import errno
10 import errno
11 import os
11 import os
12 import shutil
12 import shutil
13 import stat
13 import stat
14 import threading
14 import threading
15
15
16 from .i18n import _
16 from .i18n import _
17 from . import (
17 from . import (
18 encoding,
18 encoding,
19 error,
19 error,
20 pathutil,
20 pathutil,
21 pycompat,
21 pycompat,
22 util,
22 util,
23 )
23 )
24
24
25 def _avoidambig(path, oldstat):
25 def _avoidambig(path, oldstat):
26 """Avoid file stat ambiguity forcibly
26 """Avoid file stat ambiguity forcibly
27
27
28 This function causes copying ``path`` file, if it is owned by
28 This function causes copying ``path`` file, if it is owned by
29 another (see issue5418 and issue5584 for detail).
29 another (see issue5418 and issue5584 for detail).
30 """
30 """
31 def checkandavoid():
31 def checkandavoid():
32 newstat = util.filestat.frompath(path)
32 newstat = util.filestat.frompath(path)
33 # return whether file stat ambiguity is (already) avoided
33 # return whether file stat ambiguity is (already) avoided
34 return (not newstat.isambig(oldstat) or
34 return (not newstat.isambig(oldstat) or
35 newstat.avoidambig(path, oldstat))
35 newstat.avoidambig(path, oldstat))
36 if not checkandavoid():
36 if not checkandavoid():
37 # simply copy to change owner of path to get privilege to
37 # simply copy to change owner of path to get privilege to
38 # advance mtime (see issue5418)
38 # advance mtime (see issue5418)
39 util.rename(util.mktempcopy(path), path)
39 util.rename(util.mktempcopy(path), path)
40 checkandavoid()
40 checkandavoid()
41
41
42 class abstractvfs(object):
42 class abstractvfs(object):
43 """Abstract base class; cannot be instantiated"""
43 """Abstract base class; cannot be instantiated"""
44
44
45 def __init__(self, *args, **kwargs):
45 def __init__(self, *args, **kwargs):
46 '''Prevent instantiation; don't call this from subclasses.'''
46 '''Prevent instantiation; don't call this from subclasses.'''
47 raise NotImplementedError('attempted instantiating ' + str(type(self)))
47 raise NotImplementedError('attempted instantiating ' + str(type(self)))
48
48
49 def _auditpath(self, path, mode):
49 def _auditpath(self, path, mode):
50 pass
50 pass
51
51
52 def tryread(self, path):
52 def tryread(self, path):
53 '''gracefully return an empty string for missing files'''
53 '''gracefully return an empty string for missing files'''
54 try:
54 try:
55 return self.read(path)
55 return self.read(path)
56 except IOError as inst:
56 except IOError as inst:
57 if inst.errno != errno.ENOENT:
57 if inst.errno != errno.ENOENT:
58 raise
58 raise
59 return ""
59 return ""
60
60
61 def tryreadlines(self, path, mode='rb'):
61 def tryreadlines(self, path, mode='rb'):
62 '''gracefully return an empty array for missing files'''
62 '''gracefully return an empty array for missing files'''
63 try:
63 try:
64 return self.readlines(path, mode=mode)
64 return self.readlines(path, mode=mode)
65 except IOError as inst:
65 except IOError as inst:
66 if inst.errno != errno.ENOENT:
66 if inst.errno != errno.ENOENT:
67 raise
67 raise
68 return []
68 return []
69
69
70 @util.propertycache
70 @util.propertycache
71 def open(self):
71 def open(self):
72 '''Open ``path`` file, which is relative to vfs root.
72 '''Open ``path`` file, which is relative to vfs root.
73
73
74 Newly created directories are marked as "not to be indexed by
74 Newly created directories are marked as "not to be indexed by
75 the content indexing service", if ``notindexed`` is specified
75 the content indexing service", if ``notindexed`` is specified
76 for "write" mode access.
76 for "write" mode access.
77 '''
77 '''
78 return self.__call__
78 return self.__call__
79
79
80 def read(self, path):
80 def read(self, path):
81 with self(path, 'rb') as fp:
81 with self(path, 'rb') as fp:
82 return fp.read()
82 return fp.read()
83
83
84 def readlines(self, path, mode='rb'):
84 def readlines(self, path, mode='rb'):
85 with self(path, mode=mode) as fp:
85 with self(path, mode=mode) as fp:
86 return fp.readlines()
86 return fp.readlines()
87
87
88 def write(self, path, data, backgroundclose=False, **kwargs):
88 def write(self, path, data, backgroundclose=False, **kwargs):
89 with self(path, 'wb', backgroundclose=backgroundclose, **kwargs) as fp:
89 with self(path, 'wb', backgroundclose=backgroundclose, **kwargs) as fp:
90 return fp.write(data)
90 return fp.write(data)
91
91
92 def writelines(self, path, data, mode='wb', notindexed=False):
92 def writelines(self, path, data, mode='wb', notindexed=False):
93 with self(path, mode=mode, notindexed=notindexed) as fp:
93 with self(path, mode=mode, notindexed=notindexed) as fp:
94 return fp.writelines(data)
94 return fp.writelines(data)
95
95
96 def append(self, path, data):
96 def append(self, path, data):
97 with self(path, 'ab') as fp:
97 with self(path, 'ab') as fp:
98 return fp.write(data)
98 return fp.write(data)
99
99
100 def basename(self, path):
100 def basename(self, path):
101 """return base element of a path (as os.path.basename would do)
101 """return base element of a path (as os.path.basename would do)
102
102
103 This exists to allow handling of strange encoding if needed."""
103 This exists to allow handling of strange encoding if needed."""
104 return os.path.basename(path)
104 return os.path.basename(path)
105
105
106 def chmod(self, path, mode):
106 def chmod(self, path, mode):
107 return os.chmod(self.join(path), mode)
107 return os.chmod(self.join(path), mode)
108
108
109 def dirname(self, path):
109 def dirname(self, path):
110 """return dirname element of a path (as os.path.dirname would do)
110 """return dirname element of a path (as os.path.dirname would do)
111
111
112 This exists to allow handling of strange encoding if needed."""
112 This exists to allow handling of strange encoding if needed."""
113 return os.path.dirname(path)
113 return os.path.dirname(path)
114
114
115 def exists(self, path=None):
115 def exists(self, path=None):
116 return os.path.exists(self.join(path))
116 return os.path.exists(self.join(path))
117
117
118 def fstat(self, fp):
118 def fstat(self, fp):
119 return util.fstat(fp)
119 return util.fstat(fp)
120
120
121 def isdir(self, path=None):
121 def isdir(self, path=None):
122 return os.path.isdir(self.join(path))
122 return os.path.isdir(self.join(path))
123
123
124 def isfile(self, path=None):
124 def isfile(self, path=None):
125 return os.path.isfile(self.join(path))
125 return os.path.isfile(self.join(path))
126
126
127 def islink(self, path=None):
127 def islink(self, path=None):
128 return os.path.islink(self.join(path))
128 return os.path.islink(self.join(path))
129
129
130 def isfileorlink(self, path=None):
130 def isfileorlink(self, path=None):
131 '''return whether path is a regular file or a symlink
131 '''return whether path is a regular file or a symlink
132
132
133 Unlike isfile, this doesn't follow symlinks.'''
133 Unlike isfile, this doesn't follow symlinks.'''
134 try:
134 try:
135 st = self.lstat(path)
135 st = self.lstat(path)
136 except OSError:
136 except OSError:
137 return False
137 return False
138 mode = st.st_mode
138 mode = st.st_mode
139 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
139 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
140
140
141 def reljoin(self, *paths):
141 def reljoin(self, *paths):
142 """join various elements of a path together (as os.path.join would do)
142 """join various elements of a path together (as os.path.join would do)
143
143
144 The vfs base is not injected so that path stay relative. This exists
144 The vfs base is not injected so that path stay relative. This exists
145 to allow handling of strange encoding if needed."""
145 to allow handling of strange encoding if needed."""
146 return os.path.join(*paths)
146 return os.path.join(*paths)
147
147
148 def split(self, path):
148 def split(self, path):
149 """split top-most element of a path (as os.path.split would do)
149 """split top-most element of a path (as os.path.split would do)
150
150
151 This exists to allow handling of strange encoding if needed."""
151 This exists to allow handling of strange encoding if needed."""
152 return os.path.split(path)
152 return os.path.split(path)
153
153
154 def lexists(self, path=None):
154 def lexists(self, path=None):
155 return os.path.lexists(self.join(path))
155 return os.path.lexists(self.join(path))
156
156
157 def lstat(self, path=None):
157 def lstat(self, path=None):
158 return os.lstat(self.join(path))
158 return os.lstat(self.join(path))
159
159
160 def listdir(self, path=None):
160 def listdir(self, path=None):
161 return os.listdir(self.join(path))
161 return os.listdir(self.join(path))
162
162
163 def makedir(self, path=None, notindexed=True):
163 def makedir(self, path=None, notindexed=True):
164 return util.makedir(self.join(path), notindexed)
164 return util.makedir(self.join(path), notindexed)
165
165
166 def makedirs(self, path=None, mode=None):
166 def makedirs(self, path=None, mode=None):
167 return util.makedirs(self.join(path), mode)
167 return util.makedirs(self.join(path), mode)
168
168
169 def makelock(self, info, path):
169 def makelock(self, info, path):
170 return util.makelock(info, self.join(path))
170 return util.makelock(info, self.join(path))
171
171
172 def mkdir(self, path=None):
172 def mkdir(self, path=None):
173 return os.mkdir(self.join(path))
173 return os.mkdir(self.join(path))
174
174
175 def mkstemp(self, suffix='', prefix='tmp', dir=None):
175 def mkstemp(self, suffix='', prefix='tmp', dir=None):
176 fd, name = pycompat.mkstemp(suffix=suffix, prefix=prefix,
176 fd, name = pycompat.mkstemp(suffix=suffix, prefix=prefix,
177 dir=self.join(dir))
177 dir=self.join(dir))
178 dname, fname = util.split(name)
178 dname, fname = util.split(name)
179 if dir:
179 if dir:
180 return fd, os.path.join(dir, fname)
180 return fd, os.path.join(dir, fname)
181 else:
181 else:
182 return fd, fname
182 return fd, fname
183
183
184 def readdir(self, path=None, stat=None, skip=None):
184 def readdir(self, path=None, stat=None, skip=None):
185 return util.listdir(self.join(path), stat, skip)
185 return util.listdir(self.join(path), stat, skip)
186
186
187 def readlock(self, path):
187 def readlock(self, path):
188 return util.readlock(self.join(path))
188 return util.readlock(self.join(path))
189
189
190 def rename(self, src, dst, checkambig=False):
190 def rename(self, src, dst, checkambig=False):
191 """Rename from src to dst
191 """Rename from src to dst
192
192
193 checkambig argument is used with util.filestat, and is useful
193 checkambig argument is used with util.filestat, and is useful
194 only if destination file is guarded by any lock
194 only if destination file is guarded by any lock
195 (e.g. repo.lock or repo.wlock).
195 (e.g. repo.lock or repo.wlock).
196
196
197 To avoid file stat ambiguity forcibly, checkambig=True involves
197 To avoid file stat ambiguity forcibly, checkambig=True involves
198 copying ``src`` file, if it is owned by another. Therefore, use
198 copying ``src`` file, if it is owned by another. Therefore, use
199 checkambig=True only in limited cases (see also issue5418 and
199 checkambig=True only in limited cases (see also issue5418 and
200 issue5584 for detail).
200 issue5584 for detail).
201 """
201 """
202 self._auditpath(dst, 'w')
202 self._auditpath(dst, 'w')
203 srcpath = self.join(src)
203 srcpath = self.join(src)
204 dstpath = self.join(dst)
204 dstpath = self.join(dst)
205 oldstat = checkambig and util.filestat.frompath(dstpath)
205 oldstat = checkambig and util.filestat.frompath(dstpath)
206 if oldstat and oldstat.stat:
206 if oldstat and oldstat.stat:
207 ret = util.rename(srcpath, dstpath)
207 ret = util.rename(srcpath, dstpath)
208 _avoidambig(dstpath, oldstat)
208 _avoidambig(dstpath, oldstat)
209 return ret
209 return ret
210 return util.rename(srcpath, dstpath)
210 return util.rename(srcpath, dstpath)
211
211
212 def readlink(self, path):
212 def readlink(self, path):
213 return util.readlink(self.join(path))
213 return util.readlink(self.join(path))
214
214
215 def removedirs(self, path=None):
215 def removedirs(self, path=None):
216 """Remove a leaf directory and all empty intermediate ones
216 """Remove a leaf directory and all empty intermediate ones
217 """
217 """
218 return util.removedirs(self.join(path))
218 return util.removedirs(self.join(path))
219
219
220 def rmdir(self, path=None):
220 def rmdir(self, path=None):
221 """Remove an empty directory."""
221 """Remove an empty directory."""
222 return os.rmdir(self.join(path))
222 return os.rmdir(self.join(path))
223
223
224 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
224 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
225 """Remove a directory tree recursively
225 """Remove a directory tree recursively
226
226
227 If ``forcibly``, this tries to remove READ-ONLY files, too.
227 If ``forcibly``, this tries to remove READ-ONLY files, too.
228 """
228 """
229 if forcibly:
229 if forcibly:
230 def onerror(function, path, excinfo):
230 def onerror(function, path, excinfo):
231 if function is not os.remove:
231 if function is not os.remove:
232 raise
232 raise
233 # read-only files cannot be unlinked under Windows
233 # read-only files cannot be unlinked under Windows
234 s = os.stat(path)
234 s = os.stat(path)
235 if (s.st_mode & stat.S_IWRITE) != 0:
235 if (s.st_mode & stat.S_IWRITE) != 0:
236 raise
236 raise
237 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
237 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
238 os.remove(path)
238 os.remove(path)
239 else:
239 else:
240 onerror = None
240 onerror = None
241 return shutil.rmtree(self.join(path),
241 return shutil.rmtree(self.join(path),
242 ignore_errors=ignore_errors, onerror=onerror)
242 ignore_errors=ignore_errors, onerror=onerror)
243
243
244 def setflags(self, path, l, x):
244 def setflags(self, path, l, x):
245 return util.setflags(self.join(path), l, x)
245 return util.setflags(self.join(path), l, x)
246
246
247 def stat(self, path=None):
247 def stat(self, path=None):
248 return os.stat(self.join(path))
248 return os.stat(self.join(path))
249
249
250 def unlink(self, path=None):
250 def unlink(self, path=None):
251 return util.unlink(self.join(path))
251 return util.unlink(self.join(path))
252
252
253 def tryunlink(self, path=None):
253 def tryunlink(self, path=None):
254 """Attempt to remove a file, ignoring missing file errors."""
254 """Attempt to remove a file, ignoring missing file errors."""
255 util.tryunlink(self.join(path))
255 util.tryunlink(self.join(path))
256
256
257 def unlinkpath(self, path=None, ignoremissing=False, rmdir=True):
257 def unlinkpath(self, path=None, ignoremissing=False, rmdir=True):
258 return util.unlinkpath(self.join(path), ignoremissing=ignoremissing,
258 return util.unlinkpath(self.join(path), ignoremissing=ignoremissing,
259 rmdir=rmdir)
259 rmdir=rmdir)
260
260
261 def utime(self, path=None, t=None):
261 def utime(self, path=None, t=None):
262 return os.utime(self.join(path), t)
262 return os.utime(self.join(path), t)
263
263
264 def walk(self, path=None, onerror=None):
264 def walk(self, path=None, onerror=None):
265 """Yield (dirpath, dirs, files) tuple for each directories under path
265 """Yield (dirpath, dirs, files) tuple for each directories under path
266
266
267 ``dirpath`` is relative one from the root of this vfs. This
267 ``dirpath`` is relative one from the root of this vfs. This
268 uses ``os.sep`` as path separator, even you specify POSIX
268 uses ``os.sep`` as path separator, even you specify POSIX
269 style ``path``.
269 style ``path``.
270
270
271 "The root of this vfs" is represented as empty ``dirpath``.
271 "The root of this vfs" is represented as empty ``dirpath``.
272 """
272 """
273 root = os.path.normpath(self.join(None))
273 root = os.path.normpath(self.join(None))
274 # when dirpath == root, dirpath[prefixlen:] becomes empty
274 # when dirpath == root, dirpath[prefixlen:] becomes empty
275 # because len(dirpath) < prefixlen.
275 # because len(dirpath) < prefixlen.
276 prefixlen = len(pathutil.normasprefix(root))
276 prefixlen = len(pathutil.normasprefix(root))
277 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
277 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
278 yield (dirpath[prefixlen:], dirs, files)
278 yield (dirpath[prefixlen:], dirs, files)
279
279
280 @contextlib.contextmanager
280 @contextlib.contextmanager
281 def backgroundclosing(self, ui, expectedcount=-1):
281 def backgroundclosing(self, ui, expectedcount=-1):
282 """Allow files to be closed asynchronously.
282 """Allow files to be closed asynchronously.
283
283
284 When this context manager is active, ``backgroundclose`` can be passed
284 When this context manager is active, ``backgroundclose`` can be passed
285 to ``__call__``/``open`` to result in the file possibly being closed
285 to ``__call__``/``open`` to result in the file possibly being closed
286 asynchronously, on a background thread.
286 asynchronously, on a background thread.
287 """
287 """
288 # Sharing backgroundfilecloser between threads is complex and using
288 # Sharing backgroundfilecloser between threads is complex and using
289 # multiple instances puts us at risk of running out of file descriptors
289 # multiple instances puts us at risk of running out of file descriptors
290 # only allow to use backgroundfilecloser when in main thread.
290 # only allow to use backgroundfilecloser when in main thread.
291 if not isinstance(threading.currentThread(), threading._MainThread):
291 if not isinstance(threading.currentThread(), threading._MainThread):
292 yield
292 yield
293 return
293 return
294 vfs = getattr(self, 'vfs', self)
294 vfs = getattr(self, 'vfs', self)
295 if getattr(vfs, '_backgroundfilecloser', None):
295 if getattr(vfs, '_backgroundfilecloser', None):
296 raise error.Abort(
296 raise error.Abort(
297 _('can only have 1 active background file closer'))
297 _('can only have 1 active background file closer'))
298
298
299 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
299 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
300 try:
300 try:
301 vfs._backgroundfilecloser = bfc
301 vfs._backgroundfilecloser = bfc
302 yield bfc
302 yield bfc
303 finally:
303 finally:
304 vfs._backgroundfilecloser = None
304 vfs._backgroundfilecloser = None
305
305
306 class vfs(abstractvfs):
306 class vfs(abstractvfs):
307 '''Operate files relative to a base directory
307 '''Operate files relative to a base directory
308
308
309 This class is used to hide the details of COW semantics and
309 This class is used to hide the details of COW semantics and
310 remote file access from higher level code.
310 remote file access from higher level code.
311
311
312 'cacheaudited' should be enabled only if (a) vfs object is short-lived, or
312 'cacheaudited' should be enabled only if (a) vfs object is short-lived, or
313 (b) the base directory is managed by hg and considered sort-of append-only.
313 (b) the base directory is managed by hg and considered sort-of append-only.
314 See pathutil.pathauditor() for details.
314 See pathutil.pathauditor() for details.
315 '''
315 '''
316 def __init__(self, base, audit=True, cacheaudited=False, expandpath=False,
316 def __init__(self, base, audit=True, cacheaudited=False, expandpath=False,
317 realpath=False):
317 realpath=False):
318 if expandpath:
318 if expandpath:
319 base = util.expandpath(base)
319 base = util.expandpath(base)
320 if realpath:
320 if realpath:
321 base = os.path.realpath(base)
321 base = os.path.realpath(base)
322 self.base = base
322 self.base = base
323 self._audit = audit
323 self._audit = audit
324 if audit:
324 if audit:
325 self.audit = pathutil.pathauditor(self.base, cached=cacheaudited)
325 self.audit = pathutil.pathauditor(self.base, cached=cacheaudited)
326 else:
326 else:
327 self.audit = (lambda path, mode=None: True)
327 self.audit = (lambda path, mode=None: True)
328 self.createmode = None
328 self.createmode = None
329 self._trustnlink = None
329 self._trustnlink = None
330
330
331 @util.propertycache
331 @util.propertycache
332 def _cansymlink(self):
332 def _cansymlink(self):
333 return util.checklink(self.base)
333 return util.checklink(self.base)
334
334
335 @util.propertycache
335 @util.propertycache
336 def _chmod(self):
336 def _chmod(self):
337 return util.checkexec(self.base)
337 return util.checkexec(self.base)
338
338
339 def _fixfilemode(self, name):
339 def _fixfilemode(self, name):
340 if self.createmode is None or not self._chmod:
340 if self.createmode is None or not self._chmod:
341 return
341 return
342 os.chmod(name, self.createmode & 0o666)
342 os.chmod(name, self.createmode & 0o666)
343
343
344 def _auditpath(self, path, mode):
344 def _auditpath(self, path, mode):
345 if self._audit:
345 if self._audit:
346 if os.path.isabs(path) and path.startswith(self.base):
346 if os.path.isabs(path) and path.startswith(self.base):
347 path = os.path.relpath(path, self.base)
347 path = os.path.relpath(path, self.base)
348 r = util.checkosfilename(path)
348 r = util.checkosfilename(path)
349 if r:
349 if r:
350 raise error.Abort("%s: %r" % (r, path))
350 raise error.Abort("%s: %r" % (r, path))
351 self.audit(path, mode=mode)
351 self.audit(path, mode=mode)
352
352
353 def __call__(self, path, mode="r", atomictemp=False, notindexed=False,
353 def __call__(self, path, mode="r", atomictemp=False, notindexed=False,
354 backgroundclose=False, checkambig=False, auditpath=True,
354 backgroundclose=False, checkambig=False, auditpath=True,
355 makeparentdirs=True):
355 makeparentdirs=True):
356 '''Open ``path`` file, which is relative to vfs root.
356 '''Open ``path`` file, which is relative to vfs root.
357
357
358 By default, parent directories are created as needed. Newly created
358 By default, parent directories are created as needed. Newly created
359 directories are marked as "not to be indexed by the content indexing
359 directories are marked as "not to be indexed by the content indexing
360 service", if ``notindexed`` is specified for "write" mode access.
360 service", if ``notindexed`` is specified for "write" mode access.
361 Set ``makeparentdirs=False`` to not create directories implicitly.
361 Set ``makeparentdirs=False`` to not create directories implicitly.
362
362
363 If ``backgroundclose`` is passed, the file may be closed asynchronously.
363 If ``backgroundclose`` is passed, the file may be closed asynchronously.
364 It can only be used if the ``self.backgroundclosing()`` context manager
364 It can only be used if the ``self.backgroundclosing()`` context manager
365 is active. This should only be specified if the following criteria hold:
365 is active. This should only be specified if the following criteria hold:
366
366
367 1. There is a potential for writing thousands of files. Unless you
367 1. There is a potential for writing thousands of files. Unless you
368 are writing thousands of files, the performance benefits of
368 are writing thousands of files, the performance benefits of
369 asynchronously closing files is not realized.
369 asynchronously closing files is not realized.
370 2. Files are opened exactly once for the ``backgroundclosing``
370 2. Files are opened exactly once for the ``backgroundclosing``
371 active duration and are therefore free of race conditions between
371 active duration and are therefore free of race conditions between
372 closing a file on a background thread and reopening it. (If the
372 closing a file on a background thread and reopening it. (If the
373 file were opened multiple times, there could be unflushed data
373 file were opened multiple times, there could be unflushed data
374 because the original file handle hasn't been flushed/closed yet.)
374 because the original file handle hasn't been flushed/closed yet.)
375
375
376 ``checkambig`` argument is passed to atomictemplfile (valid
376 ``checkambig`` argument is passed to atomictemplfile (valid
377 only for writing), and is useful only if target file is
377 only for writing), and is useful only if target file is
378 guarded by any lock (e.g. repo.lock or repo.wlock).
378 guarded by any lock (e.g. repo.lock or repo.wlock).
379
379
380 To avoid file stat ambiguity forcibly, checkambig=True involves
380 To avoid file stat ambiguity forcibly, checkambig=True involves
381 copying ``path`` file opened in "append" mode (e.g. for
381 copying ``path`` file opened in "append" mode (e.g. for
382 truncation), if it is owned by another. Therefore, use
382 truncation), if it is owned by another. Therefore, use
383 combination of append mode and checkambig=True only in limited
383 combination of append mode and checkambig=True only in limited
384 cases (see also issue5418 and issue5584 for detail).
384 cases (see also issue5418 and issue5584 for detail).
385 '''
385 '''
386 if auditpath:
386 if auditpath:
387 self._auditpath(path, mode)
387 self._auditpath(path, mode)
388 f = self.join(path)
388 f = self.join(path)
389
389
390 if "b" not in mode:
390 if "b" not in mode:
391 mode += "b" # for that other OS
391 mode += "b" # for that other OS
392
392
393 nlink = -1
393 nlink = -1
394 if mode not in ('r', 'rb'):
394 if mode not in ('r', 'rb'):
395 dirname, basename = util.split(f)
395 dirname, basename = util.split(f)
396 # If basename is empty, then the path is malformed because it points
396 # If basename is empty, then the path is malformed because it points
397 # to a directory. Let the posixfile() call below raise IOError.
397 # to a directory. Let the posixfile() call below raise IOError.
398 if basename:
398 if basename:
399 if atomictemp:
399 if atomictemp:
400 if makeparentdirs:
400 if makeparentdirs:
401 util.makedirs(dirname, self.createmode, notindexed)
401 util.makedirs(dirname, self.createmode, notindexed)
402 return util.atomictempfile(f, mode, self.createmode,
402 return util.atomictempfile(f, mode, self.createmode,
403 checkambig=checkambig)
403 checkambig=checkambig)
404 try:
404 try:
405 if 'w' in mode:
405 if 'w' in mode:
406 util.unlink(f)
406 util.unlink(f)
407 nlink = 0
407 nlink = 0
408 else:
408 else:
409 # nlinks() may behave differently for files on Windows
409 # nlinks() may behave differently for files on Windows
410 # shares if the file is open.
410 # shares if the file is open.
411 with util.posixfile(f):
411 with util.posixfile(f):
412 nlink = util.nlinks(f)
412 nlink = util.nlinks(f)
413 if nlink < 1:
413 if nlink < 1:
414 nlink = 2 # force mktempcopy (issue1922)
414 nlink = 2 # force mktempcopy (issue1922)
415 except (OSError, IOError) as e:
415 except (OSError, IOError) as e:
416 if e.errno != errno.ENOENT:
416 if e.errno != errno.ENOENT:
417 raise
417 raise
418 nlink = 0
418 nlink = 0
419 if makeparentdirs:
419 if makeparentdirs:
420 util.makedirs(dirname, self.createmode, notindexed)
420 util.makedirs(dirname, self.createmode, notindexed)
421 if nlink > 0:
421 if nlink > 0:
422 if self._trustnlink is None:
422 if self._trustnlink is None:
423 self._trustnlink = nlink > 1 or util.checknlink(f)
423 self._trustnlink = nlink > 1 or util.checknlink(f)
424 if nlink > 1 or not self._trustnlink:
424 if nlink > 1 or not self._trustnlink:
425 util.rename(util.mktempcopy(f), f)
425 util.rename(util.mktempcopy(f), f)
426 fp = util.posixfile(f, mode)
426 fp = util.posixfile(f, mode)
427 if nlink == 0:
427 if nlink == 0:
428 self._fixfilemode(f)
428 self._fixfilemode(f)
429
429
430 if checkambig:
430 if checkambig:
431 if mode in ('r', 'rb'):
431 if mode in ('r', 'rb'):
432 raise error.Abort(_('implementation error: mode %s is not'
432 raise error.Abort(_('implementation error: mode %s is not'
433 ' valid for checkambig=True') % mode)
433 ' valid for checkambig=True') % mode)
434 fp = checkambigatclosing(fp)
434 fp = checkambigatclosing(fp)
435
435
436 if (backgroundclose and
436 if (backgroundclose and
437 isinstance(threading.currentThread(), threading._MainThread)):
437 isinstance(threading.currentThread(), threading._MainThread)):
438 if not self._backgroundfilecloser:
438 if not self._backgroundfilecloser:
439 raise error.Abort(_('backgroundclose can only be used when a '
439 raise error.Abort(_('backgroundclose can only be used when a '
440 'backgroundclosing context manager is active')
440 'backgroundclosing context manager is active')
441 )
441 )
442
442
443 fp = delayclosedfile(fp, self._backgroundfilecloser)
443 fp = delayclosedfile(fp, self._backgroundfilecloser)
444
444
445 return fp
445 return fp
446
446
447 def symlink(self, src, dst):
447 def symlink(self, src, dst):
448 self.audit(dst)
448 self.audit(dst)
449 linkname = self.join(dst)
449 linkname = self.join(dst)
450 util.tryunlink(linkname)
450 util.tryunlink(linkname)
451
451
452 util.makedirs(os.path.dirname(linkname), self.createmode)
452 util.makedirs(os.path.dirname(linkname), self.createmode)
453
453
454 if self._cansymlink:
454 if self._cansymlink:
455 try:
455 try:
456 os.symlink(src, linkname)
456 os.symlink(src, linkname)
457 except OSError as err:
457 except OSError as err:
458 raise OSError(err.errno, _('could not symlink to %r: %s') %
458 raise OSError(err.errno, _('could not symlink to %r: %s') %
459 (src, encoding.strtolocal(err.strerror)),
459 (src, encoding.strtolocal(err.strerror)),
460 linkname)
460 linkname)
461 else:
461 else:
462 self.write(dst, src)
462 self.write(dst, src)
463
463
464 def join(self, path, *insidef):
464 def join(self, path, *insidef):
465 if path:
465 if path:
466 return os.path.join(self.base, path, *insidef)
466 return os.path.join(self.base, path, *insidef)
467 else:
467 else:
468 return self.base
468 return self.base
469
469
470 opener = vfs
470 opener = vfs
471
471
472 class proxyvfs(object):
472 class proxyvfs(abstractvfs):
473 def __init__(self, vfs):
473 def __init__(self, vfs):
474 self.vfs = vfs
474 self.vfs = vfs
475
475
476 @property
476 @property
477 def options(self):
477 def options(self):
478 return self.vfs.options
478 return self.vfs.options
479
479
480 @options.setter
480 @options.setter
481 def options(self, value):
481 def options(self, value):
482 self.vfs.options = value
482 self.vfs.options = value
483
483
484 class filtervfs(abstractvfs, proxyvfs):
484 class filtervfs(proxyvfs, abstractvfs):
485 '''Wrapper vfs for filtering filenames with a function.'''
485 '''Wrapper vfs for filtering filenames with a function.'''
486
486
487 def __init__(self, vfs, filter):
487 def __init__(self, vfs, filter):
488 proxyvfs.__init__(self, vfs)
488 proxyvfs.__init__(self, vfs)
489 self._filter = filter
489 self._filter = filter
490
490
491 def __call__(self, path, *args, **kwargs):
491 def __call__(self, path, *args, **kwargs):
492 return self.vfs(self._filter(path), *args, **kwargs)
492 return self.vfs(self._filter(path), *args, **kwargs)
493
493
494 def join(self, path, *insidef):
494 def join(self, path, *insidef):
495 if path:
495 if path:
496 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
496 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
497 else:
497 else:
498 return self.vfs.join(path)
498 return self.vfs.join(path)
499
499
500 filteropener = filtervfs
500 filteropener = filtervfs
501
501
502 class readonlyvfs(abstractvfs, proxyvfs):
502 class readonlyvfs(proxyvfs):
503 '''Wrapper vfs preventing any writing.'''
503 '''Wrapper vfs preventing any writing.'''
504
504
505 def __init__(self, vfs):
505 def __init__(self, vfs):
506 proxyvfs.__init__(self, vfs)
506 proxyvfs.__init__(self, vfs)
507
507
508 def __call__(self, path, mode='r', *args, **kw):
508 def __call__(self, path, mode='r', *args, **kw):
509 if mode not in ('r', 'rb'):
509 if mode not in ('r', 'rb'):
510 raise error.Abort(_('this vfs is read only'))
510 raise error.Abort(_('this vfs is read only'))
511 return self.vfs(path, mode, *args, **kw)
511 return self.vfs(path, mode, *args, **kw)
512
512
513 def join(self, path, *insidef):
513 def join(self, path, *insidef):
514 return self.vfs.join(path, *insidef)
514 return self.vfs.join(path, *insidef)
515
515
516 class closewrapbase(object):
516 class closewrapbase(object):
517 """Base class of wrapper, which hooks closing
517 """Base class of wrapper, which hooks closing
518
518
519 Do not instantiate outside of the vfs layer.
519 Do not instantiate outside of the vfs layer.
520 """
520 """
521 def __init__(self, fh):
521 def __init__(self, fh):
522 object.__setattr__(self, r'_origfh', fh)
522 object.__setattr__(self, r'_origfh', fh)
523
523
524 def __getattr__(self, attr):
524 def __getattr__(self, attr):
525 return getattr(self._origfh, attr)
525 return getattr(self._origfh, attr)
526
526
527 def __setattr__(self, attr, value):
527 def __setattr__(self, attr, value):
528 return setattr(self._origfh, attr, value)
528 return setattr(self._origfh, attr, value)
529
529
530 def __delattr__(self, attr):
530 def __delattr__(self, attr):
531 return delattr(self._origfh, attr)
531 return delattr(self._origfh, attr)
532
532
533 def __enter__(self):
533 def __enter__(self):
534 self._origfh.__enter__()
534 self._origfh.__enter__()
535 return self
535 return self
536
536
537 def __exit__(self, exc_type, exc_value, exc_tb):
537 def __exit__(self, exc_type, exc_value, exc_tb):
538 raise NotImplementedError('attempted instantiating ' + str(type(self)))
538 raise NotImplementedError('attempted instantiating ' + str(type(self)))
539
539
540 def close(self):
540 def close(self):
541 raise NotImplementedError('attempted instantiating ' + str(type(self)))
541 raise NotImplementedError('attempted instantiating ' + str(type(self)))
542
542
543 class delayclosedfile(closewrapbase):
543 class delayclosedfile(closewrapbase):
544 """Proxy for a file object whose close is delayed.
544 """Proxy for a file object whose close is delayed.
545
545
546 Do not instantiate outside of the vfs layer.
546 Do not instantiate outside of the vfs layer.
547 """
547 """
548 def __init__(self, fh, closer):
548 def __init__(self, fh, closer):
549 super(delayclosedfile, self).__init__(fh)
549 super(delayclosedfile, self).__init__(fh)
550 object.__setattr__(self, r'_closer', closer)
550 object.__setattr__(self, r'_closer', closer)
551
551
552 def __exit__(self, exc_type, exc_value, exc_tb):
552 def __exit__(self, exc_type, exc_value, exc_tb):
553 self._closer.close(self._origfh)
553 self._closer.close(self._origfh)
554
554
555 def close(self):
555 def close(self):
556 self._closer.close(self._origfh)
556 self._closer.close(self._origfh)
557
557
558 class backgroundfilecloser(object):
558 class backgroundfilecloser(object):
559 """Coordinates background closing of file handles on multiple threads."""
559 """Coordinates background closing of file handles on multiple threads."""
560 def __init__(self, ui, expectedcount=-1):
560 def __init__(self, ui, expectedcount=-1):
561 self._running = False
561 self._running = False
562 self._entered = False
562 self._entered = False
563 self._threads = []
563 self._threads = []
564 self._threadexception = None
564 self._threadexception = None
565
565
566 # Only Windows/NTFS has slow file closing. So only enable by default
566 # Only Windows/NTFS has slow file closing. So only enable by default
567 # on that platform. But allow to be enabled elsewhere for testing.
567 # on that platform. But allow to be enabled elsewhere for testing.
568 defaultenabled = pycompat.iswindows
568 defaultenabled = pycompat.iswindows
569 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
569 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
570
570
571 if not enabled:
571 if not enabled:
572 return
572 return
573
573
574 # There is overhead to starting and stopping the background threads.
574 # There is overhead to starting and stopping the background threads.
575 # Don't do background processing unless the file count is large enough
575 # Don't do background processing unless the file count is large enough
576 # to justify it.
576 # to justify it.
577 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount')
577 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount')
578 # FUTURE dynamically start background threads after minfilecount closes.
578 # FUTURE dynamically start background threads after minfilecount closes.
579 # (We don't currently have any callers that don't know their file count)
579 # (We don't currently have any callers that don't know their file count)
580 if expectedcount > 0 and expectedcount < minfilecount:
580 if expectedcount > 0 and expectedcount < minfilecount:
581 return
581 return
582
582
583 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue')
583 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue')
584 threadcount = ui.configint('worker', 'backgroundclosethreadcount')
584 threadcount = ui.configint('worker', 'backgroundclosethreadcount')
585
585
586 ui.debug('starting %d threads for background file closing\n' %
586 ui.debug('starting %d threads for background file closing\n' %
587 threadcount)
587 threadcount)
588
588
589 self._queue = pycompat.queue.Queue(maxsize=maxqueue)
589 self._queue = pycompat.queue.Queue(maxsize=maxqueue)
590 self._running = True
590 self._running = True
591
591
592 for i in range(threadcount):
592 for i in range(threadcount):
593 t = threading.Thread(target=self._worker, name='backgroundcloser')
593 t = threading.Thread(target=self._worker, name='backgroundcloser')
594 self._threads.append(t)
594 self._threads.append(t)
595 t.start()
595 t.start()
596
596
597 def __enter__(self):
597 def __enter__(self):
598 self._entered = True
598 self._entered = True
599 return self
599 return self
600
600
601 def __exit__(self, exc_type, exc_value, exc_tb):
601 def __exit__(self, exc_type, exc_value, exc_tb):
602 self._running = False
602 self._running = False
603
603
604 # Wait for threads to finish closing so open files don't linger for
604 # Wait for threads to finish closing so open files don't linger for
605 # longer than lifetime of context manager.
605 # longer than lifetime of context manager.
606 for t in self._threads:
606 for t in self._threads:
607 t.join()
607 t.join()
608
608
609 def _worker(self):
609 def _worker(self):
610 """Main routine for worker thread."""
610 """Main routine for worker thread."""
611 while True:
611 while True:
612 try:
612 try:
613 fh = self._queue.get(block=True, timeout=0.100)
613 fh = self._queue.get(block=True, timeout=0.100)
614 # Need to catch or the thread will terminate and
614 # Need to catch or the thread will terminate and
615 # we could orphan file descriptors.
615 # we could orphan file descriptors.
616 try:
616 try:
617 fh.close()
617 fh.close()
618 except Exception as e:
618 except Exception as e:
619 # Stash so can re-raise from main thread later.
619 # Stash so can re-raise from main thread later.
620 self._threadexception = e
620 self._threadexception = e
621 except pycompat.queue.Empty:
621 except pycompat.queue.Empty:
622 if not self._running:
622 if not self._running:
623 break
623 break
624
624
625 def close(self, fh):
625 def close(self, fh):
626 """Schedule a file for closing."""
626 """Schedule a file for closing."""
627 if not self._entered:
627 if not self._entered:
628 raise error.Abort(_('can only call close() when context manager '
628 raise error.Abort(_('can only call close() when context manager '
629 'active'))
629 'active'))
630
630
631 # If a background thread encountered an exception, raise now so we fail
631 # If a background thread encountered an exception, raise now so we fail
632 # fast. Otherwise we may potentially go on for minutes until the error
632 # fast. Otherwise we may potentially go on for minutes until the error
633 # is acted on.
633 # is acted on.
634 if self._threadexception:
634 if self._threadexception:
635 e = self._threadexception
635 e = self._threadexception
636 self._threadexception = None
636 self._threadexception = None
637 raise e
637 raise e
638
638
639 # If we're not actively running, close synchronously.
639 # If we're not actively running, close synchronously.
640 if not self._running:
640 if not self._running:
641 fh.close()
641 fh.close()
642 return
642 return
643
643
644 self._queue.put(fh, block=True, timeout=None)
644 self._queue.put(fh, block=True, timeout=None)
645
645
646 class checkambigatclosing(closewrapbase):
646 class checkambigatclosing(closewrapbase):
647 """Proxy for a file object, to avoid ambiguity of file stat
647 """Proxy for a file object, to avoid ambiguity of file stat
648
648
649 See also util.filestat for detail about "ambiguity of file stat".
649 See also util.filestat for detail about "ambiguity of file stat".
650
650
651 This proxy is useful only if the target file is guarded by any
651 This proxy is useful only if the target file is guarded by any
652 lock (e.g. repo.lock or repo.wlock)
652 lock (e.g. repo.lock or repo.wlock)
653
653
654 Do not instantiate outside of the vfs layer.
654 Do not instantiate outside of the vfs layer.
655 """
655 """
656 def __init__(self, fh):
656 def __init__(self, fh):
657 super(checkambigatclosing, self).__init__(fh)
657 super(checkambigatclosing, self).__init__(fh)
658 object.__setattr__(self, r'_oldstat', util.filestat.frompath(fh.name))
658 object.__setattr__(self, r'_oldstat', util.filestat.frompath(fh.name))
659
659
660 def _checkambig(self):
660 def _checkambig(self):
661 oldstat = self._oldstat
661 oldstat = self._oldstat
662 if oldstat.stat:
662 if oldstat.stat:
663 _avoidambig(self._origfh.name, oldstat)
663 _avoidambig(self._origfh.name, oldstat)
664
664
665 def __exit__(self, exc_type, exc_value, exc_tb):
665 def __exit__(self, exc_type, exc_value, exc_tb):
666 self._origfh.__exit__(exc_type, exc_value, exc_tb)
666 self._origfh.__exit__(exc_type, exc_value, exc_tb)
667 self._checkambig()
667 self._checkambig()
668
668
669 def close(self):
669 def close(self):
670 self._origfh.close()
670 self._origfh.close()
671 self._checkambig()
671 self._checkambig()
General Comments 0
You need to be logged in to leave comments. Login now