##// END OF EJS Templates
vfs: add a `register_file` method on the vfs class...
marmoute -
r48236:9ab54aa5 default
parent child Browse files
Show More
@@ -1,824 +1,829 b''
1 # store.py - repository store handling for Mercurial
1 # store.py - repository store handling for Mercurial
2 #
2 #
3 # Copyright 2008 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2008 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import functools
11 import functools
12 import os
12 import os
13 import re
13 import re
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import getattr
17 from .pycompat import getattr
18 from .node import hex
18 from .node import hex
19 from . import (
19 from . import (
20 changelog,
20 changelog,
21 error,
21 error,
22 manifest,
22 manifest,
23 policy,
23 policy,
24 pycompat,
24 pycompat,
25 util,
25 util,
26 vfs as vfsmod,
26 vfs as vfsmod,
27 )
27 )
28 from .utils import hashutil
28 from .utils import hashutil
29
29
30 parsers = policy.importmod('parsers')
30 parsers = policy.importmod('parsers')
31 # how much bytes should be read from fncache in one read
31 # how much bytes should be read from fncache in one read
32 # It is done to prevent loading large fncache files into memory
32 # It is done to prevent loading large fncache files into memory
33 fncache_chunksize = 10 ** 6
33 fncache_chunksize = 10 ** 6
34
34
35
35
36 def _matchtrackedpath(path, matcher):
36 def _matchtrackedpath(path, matcher):
37 """parses a fncache entry and returns whether the entry is tracking a path
37 """parses a fncache entry and returns whether the entry is tracking a path
38 matched by matcher or not.
38 matched by matcher or not.
39
39
40 If matcher is None, returns True"""
40 If matcher is None, returns True"""
41
41
42 if matcher is None:
42 if matcher is None:
43 return True
43 return True
44 path = decodedir(path)
44 path = decodedir(path)
45 if path.startswith(b'data/'):
45 if path.startswith(b'data/'):
46 return matcher(path[len(b'data/') : -len(b'.i')])
46 return matcher(path[len(b'data/') : -len(b'.i')])
47 elif path.startswith(b'meta/'):
47 elif path.startswith(b'meta/'):
48 return matcher.visitdir(path[len(b'meta/') : -len(b'/00manifest.i')])
48 return matcher.visitdir(path[len(b'meta/') : -len(b'/00manifest.i')])
49
49
50 raise error.ProgrammingError(b"cannot decode path %s" % path)
50 raise error.ProgrammingError(b"cannot decode path %s" % path)
51
51
52
52
53 # This avoids a collision between a file named foo and a dir named
53 # This avoids a collision between a file named foo and a dir named
54 # foo.i or foo.d
54 # foo.i or foo.d
55 def _encodedir(path):
55 def _encodedir(path):
56 """
56 """
57 >>> _encodedir(b'data/foo.i')
57 >>> _encodedir(b'data/foo.i')
58 'data/foo.i'
58 'data/foo.i'
59 >>> _encodedir(b'data/foo.i/bla.i')
59 >>> _encodedir(b'data/foo.i/bla.i')
60 'data/foo.i.hg/bla.i'
60 'data/foo.i.hg/bla.i'
61 >>> _encodedir(b'data/foo.i.hg/bla.i')
61 >>> _encodedir(b'data/foo.i.hg/bla.i')
62 'data/foo.i.hg.hg/bla.i'
62 'data/foo.i.hg.hg/bla.i'
63 >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
63 >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
64 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
64 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
65 """
65 """
66 return (
66 return (
67 path.replace(b".hg/", b".hg.hg/")
67 path.replace(b".hg/", b".hg.hg/")
68 .replace(b".i/", b".i.hg/")
68 .replace(b".i/", b".i.hg/")
69 .replace(b".d/", b".d.hg/")
69 .replace(b".d/", b".d.hg/")
70 )
70 )
71
71
72
72
73 encodedir = getattr(parsers, 'encodedir', _encodedir)
73 encodedir = getattr(parsers, 'encodedir', _encodedir)
74
74
75
75
76 def decodedir(path):
76 def decodedir(path):
77 """
77 """
78 >>> decodedir(b'data/foo.i')
78 >>> decodedir(b'data/foo.i')
79 'data/foo.i'
79 'data/foo.i'
80 >>> decodedir(b'data/foo.i.hg/bla.i')
80 >>> decodedir(b'data/foo.i.hg/bla.i')
81 'data/foo.i/bla.i'
81 'data/foo.i/bla.i'
82 >>> decodedir(b'data/foo.i.hg.hg/bla.i')
82 >>> decodedir(b'data/foo.i.hg.hg/bla.i')
83 'data/foo.i.hg/bla.i'
83 'data/foo.i.hg/bla.i'
84 """
84 """
85 if b".hg/" not in path:
85 if b".hg/" not in path:
86 return path
86 return path
87 return (
87 return (
88 path.replace(b".d.hg/", b".d/")
88 path.replace(b".d.hg/", b".d/")
89 .replace(b".i.hg/", b".i/")
89 .replace(b".i.hg/", b".i/")
90 .replace(b".hg.hg/", b".hg/")
90 .replace(b".hg.hg/", b".hg/")
91 )
91 )
92
92
93
93
94 def _reserved():
94 def _reserved():
95 """characters that are problematic for filesystems
95 """characters that are problematic for filesystems
96
96
97 * ascii escapes (0..31)
97 * ascii escapes (0..31)
98 * ascii hi (126..255)
98 * ascii hi (126..255)
99 * windows specials
99 * windows specials
100
100
101 these characters will be escaped by encodefunctions
101 these characters will be escaped by encodefunctions
102 """
102 """
103 winreserved = [ord(x) for x in u'\\:*?"<>|']
103 winreserved = [ord(x) for x in u'\\:*?"<>|']
104 for x in range(32):
104 for x in range(32):
105 yield x
105 yield x
106 for x in range(126, 256):
106 for x in range(126, 256):
107 yield x
107 yield x
108 for x in winreserved:
108 for x in winreserved:
109 yield x
109 yield x
110
110
111
111
112 def _buildencodefun():
112 def _buildencodefun():
113 """
113 """
114 >>> enc, dec = _buildencodefun()
114 >>> enc, dec = _buildencodefun()
115
115
116 >>> enc(b'nothing/special.txt')
116 >>> enc(b'nothing/special.txt')
117 'nothing/special.txt'
117 'nothing/special.txt'
118 >>> dec(b'nothing/special.txt')
118 >>> dec(b'nothing/special.txt')
119 'nothing/special.txt'
119 'nothing/special.txt'
120
120
121 >>> enc(b'HELLO')
121 >>> enc(b'HELLO')
122 '_h_e_l_l_o'
122 '_h_e_l_l_o'
123 >>> dec(b'_h_e_l_l_o')
123 >>> dec(b'_h_e_l_l_o')
124 'HELLO'
124 'HELLO'
125
125
126 >>> enc(b'hello:world?')
126 >>> enc(b'hello:world?')
127 'hello~3aworld~3f'
127 'hello~3aworld~3f'
128 >>> dec(b'hello~3aworld~3f')
128 >>> dec(b'hello~3aworld~3f')
129 'hello:world?'
129 'hello:world?'
130
130
131 >>> enc(b'the\\x07quick\\xADshot')
131 >>> enc(b'the\\x07quick\\xADshot')
132 'the~07quick~adshot'
132 'the~07quick~adshot'
133 >>> dec(b'the~07quick~adshot')
133 >>> dec(b'the~07quick~adshot')
134 'the\\x07quick\\xadshot'
134 'the\\x07quick\\xadshot'
135 """
135 """
136 e = b'_'
136 e = b'_'
137 xchr = pycompat.bytechr
137 xchr = pycompat.bytechr
138 asciistr = list(map(xchr, range(127)))
138 asciistr = list(map(xchr, range(127)))
139 capitals = list(range(ord(b"A"), ord(b"Z") + 1))
139 capitals = list(range(ord(b"A"), ord(b"Z") + 1))
140
140
141 cmap = {x: x for x in asciistr}
141 cmap = {x: x for x in asciistr}
142 for x in _reserved():
142 for x in _reserved():
143 cmap[xchr(x)] = b"~%02x" % x
143 cmap[xchr(x)] = b"~%02x" % x
144 for x in capitals + [ord(e)]:
144 for x in capitals + [ord(e)]:
145 cmap[xchr(x)] = e + xchr(x).lower()
145 cmap[xchr(x)] = e + xchr(x).lower()
146
146
147 dmap = {}
147 dmap = {}
148 for k, v in pycompat.iteritems(cmap):
148 for k, v in pycompat.iteritems(cmap):
149 dmap[v] = k
149 dmap[v] = k
150
150
151 def decode(s):
151 def decode(s):
152 i = 0
152 i = 0
153 while i < len(s):
153 while i < len(s):
154 for l in pycompat.xrange(1, 4):
154 for l in pycompat.xrange(1, 4):
155 try:
155 try:
156 yield dmap[s[i : i + l]]
156 yield dmap[s[i : i + l]]
157 i += l
157 i += l
158 break
158 break
159 except KeyError:
159 except KeyError:
160 pass
160 pass
161 else:
161 else:
162 raise KeyError
162 raise KeyError
163
163
164 return (
164 return (
165 lambda s: b''.join(
165 lambda s: b''.join(
166 [cmap[s[c : c + 1]] for c in pycompat.xrange(len(s))]
166 [cmap[s[c : c + 1]] for c in pycompat.xrange(len(s))]
167 ),
167 ),
168 lambda s: b''.join(list(decode(s))),
168 lambda s: b''.join(list(decode(s))),
169 )
169 )
170
170
171
171
172 _encodefname, _decodefname = _buildencodefun()
172 _encodefname, _decodefname = _buildencodefun()
173
173
174
174
175 def encodefilename(s):
175 def encodefilename(s):
176 """
176 """
177 >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO')
177 >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO')
178 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
178 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
179 """
179 """
180 return _encodefname(encodedir(s))
180 return _encodefname(encodedir(s))
181
181
182
182
183 def decodefilename(s):
183 def decodefilename(s):
184 """
184 """
185 >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
185 >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
186 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
186 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
187 """
187 """
188 return decodedir(_decodefname(s))
188 return decodedir(_decodefname(s))
189
189
190
190
191 def _buildlowerencodefun():
191 def _buildlowerencodefun():
192 """
192 """
193 >>> f = _buildlowerencodefun()
193 >>> f = _buildlowerencodefun()
194 >>> f(b'nothing/special.txt')
194 >>> f(b'nothing/special.txt')
195 'nothing/special.txt'
195 'nothing/special.txt'
196 >>> f(b'HELLO')
196 >>> f(b'HELLO')
197 'hello'
197 'hello'
198 >>> f(b'hello:world?')
198 >>> f(b'hello:world?')
199 'hello~3aworld~3f'
199 'hello~3aworld~3f'
200 >>> f(b'the\\x07quick\\xADshot')
200 >>> f(b'the\\x07quick\\xADshot')
201 'the~07quick~adshot'
201 'the~07quick~adshot'
202 """
202 """
203 xchr = pycompat.bytechr
203 xchr = pycompat.bytechr
204 cmap = {xchr(x): xchr(x) for x in pycompat.xrange(127)}
204 cmap = {xchr(x): xchr(x) for x in pycompat.xrange(127)}
205 for x in _reserved():
205 for x in _reserved():
206 cmap[xchr(x)] = b"~%02x" % x
206 cmap[xchr(x)] = b"~%02x" % x
207 for x in range(ord(b"A"), ord(b"Z") + 1):
207 for x in range(ord(b"A"), ord(b"Z") + 1):
208 cmap[xchr(x)] = xchr(x).lower()
208 cmap[xchr(x)] = xchr(x).lower()
209
209
210 def lowerencode(s):
210 def lowerencode(s):
211 return b"".join([cmap[c] for c in pycompat.iterbytestr(s)])
211 return b"".join([cmap[c] for c in pycompat.iterbytestr(s)])
212
212
213 return lowerencode
213 return lowerencode
214
214
215
215
216 lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
216 lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
217
217
218 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
218 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
219 _winres3 = (b'aux', b'con', b'prn', b'nul') # length 3
219 _winres3 = (b'aux', b'con', b'prn', b'nul') # length 3
220 _winres4 = (b'com', b'lpt') # length 4 (with trailing 1..9)
220 _winres4 = (b'com', b'lpt') # length 4 (with trailing 1..9)
221
221
222
222
223 def _auxencode(path, dotencode):
223 def _auxencode(path, dotencode):
224 """
224 """
225 Encodes filenames containing names reserved by Windows or which end in
225 Encodes filenames containing names reserved by Windows or which end in
226 period or space. Does not touch other single reserved characters c.
226 period or space. Does not touch other single reserved characters c.
227 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
227 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
228 Additionally encodes space or period at the beginning, if dotencode is
228 Additionally encodes space or period at the beginning, if dotencode is
229 True. Parameter path is assumed to be all lowercase.
229 True. Parameter path is assumed to be all lowercase.
230 A segment only needs encoding if a reserved name appears as a
230 A segment only needs encoding if a reserved name appears as a
231 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
231 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
232 doesn't need encoding.
232 doesn't need encoding.
233
233
234 >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.'
234 >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.'
235 >>> _auxencode(s.split(b'/'), True)
235 >>> _auxencode(s.split(b'/'), True)
236 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
236 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
237 >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
237 >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
238 >>> _auxencode(s.split(b'/'), False)
238 >>> _auxencode(s.split(b'/'), False)
239 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
239 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
240 >>> _auxencode([b'foo. '], True)
240 >>> _auxencode([b'foo. '], True)
241 ['foo.~20']
241 ['foo.~20']
242 >>> _auxencode([b' .foo'], True)
242 >>> _auxencode([b' .foo'], True)
243 ['~20.foo']
243 ['~20.foo']
244 """
244 """
245 for i, n in enumerate(path):
245 for i, n in enumerate(path):
246 if not n:
246 if not n:
247 continue
247 continue
248 if dotencode and n[0] in b'. ':
248 if dotencode and n[0] in b'. ':
249 n = b"~%02x" % ord(n[0:1]) + n[1:]
249 n = b"~%02x" % ord(n[0:1]) + n[1:]
250 path[i] = n
250 path[i] = n
251 else:
251 else:
252 l = n.find(b'.')
252 l = n.find(b'.')
253 if l == -1:
253 if l == -1:
254 l = len(n)
254 l = len(n)
255 if (l == 3 and n[:3] in _winres3) or (
255 if (l == 3 and n[:3] in _winres3) or (
256 l == 4
256 l == 4
257 and n[3:4] <= b'9'
257 and n[3:4] <= b'9'
258 and n[3:4] >= b'1'
258 and n[3:4] >= b'1'
259 and n[:3] in _winres4
259 and n[:3] in _winres4
260 ):
260 ):
261 # encode third letter ('aux' -> 'au~78')
261 # encode third letter ('aux' -> 'au~78')
262 ec = b"~%02x" % ord(n[2:3])
262 ec = b"~%02x" % ord(n[2:3])
263 n = n[0:2] + ec + n[3:]
263 n = n[0:2] + ec + n[3:]
264 path[i] = n
264 path[i] = n
265 if n[-1] in b'. ':
265 if n[-1] in b'. ':
266 # encode last period or space ('foo...' -> 'foo..~2e')
266 # encode last period or space ('foo...' -> 'foo..~2e')
267 path[i] = n[:-1] + b"~%02x" % ord(n[-1:])
267 path[i] = n[:-1] + b"~%02x" % ord(n[-1:])
268 return path
268 return path
269
269
270
270
271 _maxstorepathlen = 120
271 _maxstorepathlen = 120
272 _dirprefixlen = 8
272 _dirprefixlen = 8
273 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
273 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
274
274
275
275
276 def _hashencode(path, dotencode):
276 def _hashencode(path, dotencode):
277 digest = hex(hashutil.sha1(path).digest())
277 digest = hex(hashutil.sha1(path).digest())
278 le = lowerencode(path[5:]).split(b'/') # skips prefix 'data/' or 'meta/'
278 le = lowerencode(path[5:]).split(b'/') # skips prefix 'data/' or 'meta/'
279 parts = _auxencode(le, dotencode)
279 parts = _auxencode(le, dotencode)
280 basename = parts[-1]
280 basename = parts[-1]
281 _root, ext = os.path.splitext(basename)
281 _root, ext = os.path.splitext(basename)
282 sdirs = []
282 sdirs = []
283 sdirslen = 0
283 sdirslen = 0
284 for p in parts[:-1]:
284 for p in parts[:-1]:
285 d = p[:_dirprefixlen]
285 d = p[:_dirprefixlen]
286 if d[-1] in b'. ':
286 if d[-1] in b'. ':
287 # Windows can't access dirs ending in period or space
287 # Windows can't access dirs ending in period or space
288 d = d[:-1] + b'_'
288 d = d[:-1] + b'_'
289 if sdirslen == 0:
289 if sdirslen == 0:
290 t = len(d)
290 t = len(d)
291 else:
291 else:
292 t = sdirslen + 1 + len(d)
292 t = sdirslen + 1 + len(d)
293 if t > _maxshortdirslen:
293 if t > _maxshortdirslen:
294 break
294 break
295 sdirs.append(d)
295 sdirs.append(d)
296 sdirslen = t
296 sdirslen = t
297 dirs = b'/'.join(sdirs)
297 dirs = b'/'.join(sdirs)
298 if len(dirs) > 0:
298 if len(dirs) > 0:
299 dirs += b'/'
299 dirs += b'/'
300 res = b'dh/' + dirs + digest + ext
300 res = b'dh/' + dirs + digest + ext
301 spaceleft = _maxstorepathlen - len(res)
301 spaceleft = _maxstorepathlen - len(res)
302 if spaceleft > 0:
302 if spaceleft > 0:
303 filler = basename[:spaceleft]
303 filler = basename[:spaceleft]
304 res = b'dh/' + dirs + filler + digest + ext
304 res = b'dh/' + dirs + filler + digest + ext
305 return res
305 return res
306
306
307
307
308 def _hybridencode(path, dotencode):
308 def _hybridencode(path, dotencode):
309 """encodes path with a length limit
309 """encodes path with a length limit
310
310
311 Encodes all paths that begin with 'data/', according to the following.
311 Encodes all paths that begin with 'data/', according to the following.
312
312
313 Default encoding (reversible):
313 Default encoding (reversible):
314
314
315 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
315 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
316 characters are encoded as '~xx', where xx is the two digit hex code
316 characters are encoded as '~xx', where xx is the two digit hex code
317 of the character (see encodefilename).
317 of the character (see encodefilename).
318 Relevant path components consisting of Windows reserved filenames are
318 Relevant path components consisting of Windows reserved filenames are
319 masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
319 masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
320
320
321 Hashed encoding (not reversible):
321 Hashed encoding (not reversible):
322
322
323 If the default-encoded path is longer than _maxstorepathlen, a
323 If the default-encoded path is longer than _maxstorepathlen, a
324 non-reversible hybrid hashing of the path is done instead.
324 non-reversible hybrid hashing of the path is done instead.
325 This encoding uses up to _dirprefixlen characters of all directory
325 This encoding uses up to _dirprefixlen characters of all directory
326 levels of the lowerencoded path, but not more levels than can fit into
326 levels of the lowerencoded path, but not more levels than can fit into
327 _maxshortdirslen.
327 _maxshortdirslen.
328 Then follows the filler followed by the sha digest of the full path.
328 Then follows the filler followed by the sha digest of the full path.
329 The filler is the beginning of the basename of the lowerencoded path
329 The filler is the beginning of the basename of the lowerencoded path
330 (the basename is everything after the last path separator). The filler
330 (the basename is everything after the last path separator). The filler
331 is as long as possible, filling in characters from the basename until
331 is as long as possible, filling in characters from the basename until
332 the encoded path has _maxstorepathlen characters (or all chars of the
332 the encoded path has _maxstorepathlen characters (or all chars of the
333 basename have been taken).
333 basename have been taken).
334 The extension (e.g. '.i' or '.d') is preserved.
334 The extension (e.g. '.i' or '.d') is preserved.
335
335
336 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
336 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
337 encoding was used.
337 encoding was used.
338 """
338 """
339 path = encodedir(path)
339 path = encodedir(path)
340 ef = _encodefname(path).split(b'/')
340 ef = _encodefname(path).split(b'/')
341 res = b'/'.join(_auxencode(ef, dotencode))
341 res = b'/'.join(_auxencode(ef, dotencode))
342 if len(res) > _maxstorepathlen:
342 if len(res) > _maxstorepathlen:
343 res = _hashencode(path, dotencode)
343 res = _hashencode(path, dotencode)
344 return res
344 return res
345
345
346
346
347 def _pathencode(path):
347 def _pathencode(path):
348 de = encodedir(path)
348 de = encodedir(path)
349 if len(path) > _maxstorepathlen:
349 if len(path) > _maxstorepathlen:
350 return _hashencode(de, True)
350 return _hashencode(de, True)
351 ef = _encodefname(de).split(b'/')
351 ef = _encodefname(de).split(b'/')
352 res = b'/'.join(_auxencode(ef, True))
352 res = b'/'.join(_auxencode(ef, True))
353 if len(res) > _maxstorepathlen:
353 if len(res) > _maxstorepathlen:
354 return _hashencode(de, True)
354 return _hashencode(de, True)
355 return res
355 return res
356
356
357
357
358 _pathencode = getattr(parsers, 'pathencode', _pathencode)
358 _pathencode = getattr(parsers, 'pathencode', _pathencode)
359
359
360
360
361 def _plainhybridencode(f):
361 def _plainhybridencode(f):
362 return _hybridencode(f, False)
362 return _hybridencode(f, False)
363
363
364
364
365 def _calcmode(vfs):
365 def _calcmode(vfs):
366 try:
366 try:
367 # files in .hg/ will be created using this mode
367 # files in .hg/ will be created using this mode
368 mode = vfs.stat().st_mode
368 mode = vfs.stat().st_mode
369 # avoid some useless chmods
369 # avoid some useless chmods
370 if (0o777 & ~util.umask) == (0o777 & mode):
370 if (0o777 & ~util.umask) == (0o777 & mode):
371 mode = None
371 mode = None
372 except OSError:
372 except OSError:
373 mode = None
373 mode = None
374 return mode
374 return mode
375
375
376
376
377 _data = [
377 _data = [
378 b'bookmarks',
378 b'bookmarks',
379 b'narrowspec',
379 b'narrowspec',
380 b'data',
380 b'data',
381 b'meta',
381 b'meta',
382 b'00manifest.d',
382 b'00manifest.d',
383 b'00manifest.i',
383 b'00manifest.i',
384 b'00changelog.d',
384 b'00changelog.d',
385 b'00changelog.i',
385 b'00changelog.i',
386 b'phaseroots',
386 b'phaseroots',
387 b'obsstore',
387 b'obsstore',
388 b'requires',
388 b'requires',
389 ]
389 ]
390
390
391 REVLOG_FILES_MAIN_EXT = (b'.i', b'i.tmpcensored')
391 REVLOG_FILES_MAIN_EXT = (b'.i', b'i.tmpcensored')
392 REVLOG_FILES_OTHER_EXT = (
392 REVLOG_FILES_OTHER_EXT = (
393 b'.idx',
393 b'.idx',
394 b'.d',
394 b'.d',
395 b'.dat',
395 b'.dat',
396 b'.n',
396 b'.n',
397 b'.nd',
397 b'.nd',
398 b'.sda',
398 b'.sda',
399 b'd.tmpcensored',
399 b'd.tmpcensored',
400 )
400 )
401 # files that are "volatile" and might change between listing and streaming
401 # files that are "volatile" and might change between listing and streaming
402 #
402 #
403 # note: the ".nd" file are nodemap data and won't "change" but they might be
403 # note: the ".nd" file are nodemap data and won't "change" but they might be
404 # deleted.
404 # deleted.
405 REVLOG_FILES_VOLATILE_EXT = (b'.n', b'.nd')
405 REVLOG_FILES_VOLATILE_EXT = (b'.n', b'.nd')
406
406
407 # some exception to the above matching
407 # some exception to the above matching
408 EXCLUDED = re.compile(b'.*undo\.[^/]+\.(nd?|i)$')
408 EXCLUDED = re.compile(b'.*undo\.[^/]+\.(nd?|i)$')
409
409
410
410
411 def is_revlog(f, kind, st):
411 def is_revlog(f, kind, st):
412 if kind != stat.S_IFREG:
412 if kind != stat.S_IFREG:
413 return None
413 return None
414 return revlog_type(f)
414 return revlog_type(f)
415
415
416
416
417 def revlog_type(f):
417 def revlog_type(f):
418 if f.endswith(REVLOG_FILES_MAIN_EXT) and EXCLUDED.match(f) is None:
418 if f.endswith(REVLOG_FILES_MAIN_EXT) and EXCLUDED.match(f) is None:
419 return FILEFLAGS_REVLOG_MAIN
419 return FILEFLAGS_REVLOG_MAIN
420 elif f.endswith(REVLOG_FILES_OTHER_EXT) and EXCLUDED.match(f) is None:
420 elif f.endswith(REVLOG_FILES_OTHER_EXT) and EXCLUDED.match(f) is None:
421 t = FILETYPE_FILELOG_OTHER
421 t = FILETYPE_FILELOG_OTHER
422 if f.endswith(REVLOG_FILES_VOLATILE_EXT):
422 if f.endswith(REVLOG_FILES_VOLATILE_EXT):
423 t |= FILEFLAGS_VOLATILE
423 t |= FILEFLAGS_VOLATILE
424 return t
424 return t
425 return None
425 return None
426
426
427
427
428 # the file is part of changelog data
428 # the file is part of changelog data
429 FILEFLAGS_CHANGELOG = 1 << 13
429 FILEFLAGS_CHANGELOG = 1 << 13
430 # the file is part of manifest data
430 # the file is part of manifest data
431 FILEFLAGS_MANIFESTLOG = 1 << 12
431 FILEFLAGS_MANIFESTLOG = 1 << 12
432 # the file is part of filelog data
432 # the file is part of filelog data
433 FILEFLAGS_FILELOG = 1 << 11
433 FILEFLAGS_FILELOG = 1 << 11
434 # file that are not directly part of a revlog
434 # file that are not directly part of a revlog
435 FILEFLAGS_OTHER = 1 << 10
435 FILEFLAGS_OTHER = 1 << 10
436
436
437 # the main entry point for a revlog
437 # the main entry point for a revlog
438 FILEFLAGS_REVLOG_MAIN = 1 << 1
438 FILEFLAGS_REVLOG_MAIN = 1 << 1
439 # a secondary file for a revlog
439 # a secondary file for a revlog
440 FILEFLAGS_REVLOG_OTHER = 1 << 0
440 FILEFLAGS_REVLOG_OTHER = 1 << 0
441
441
442 # files that are "volatile" and might change between listing and streaming
442 # files that are "volatile" and might change between listing and streaming
443 FILEFLAGS_VOLATILE = 1 << 20
443 FILEFLAGS_VOLATILE = 1 << 20
444
444
445 FILETYPE_CHANGELOG_MAIN = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_MAIN
445 FILETYPE_CHANGELOG_MAIN = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_MAIN
446 FILETYPE_CHANGELOG_OTHER = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_OTHER
446 FILETYPE_CHANGELOG_OTHER = FILEFLAGS_CHANGELOG | FILEFLAGS_REVLOG_OTHER
447 FILETYPE_MANIFESTLOG_MAIN = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_MAIN
447 FILETYPE_MANIFESTLOG_MAIN = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_MAIN
448 FILETYPE_MANIFESTLOG_OTHER = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_OTHER
448 FILETYPE_MANIFESTLOG_OTHER = FILEFLAGS_MANIFESTLOG | FILEFLAGS_REVLOG_OTHER
449 FILETYPE_FILELOG_MAIN = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_MAIN
449 FILETYPE_FILELOG_MAIN = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_MAIN
450 FILETYPE_FILELOG_OTHER = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_OTHER
450 FILETYPE_FILELOG_OTHER = FILEFLAGS_FILELOG | FILEFLAGS_REVLOG_OTHER
451 FILETYPE_OTHER = FILEFLAGS_OTHER
451 FILETYPE_OTHER = FILEFLAGS_OTHER
452
452
453
453
454 class basicstore(object):
454 class basicstore(object):
455 '''base class for local repository stores'''
455 '''base class for local repository stores'''
456
456
457 def __init__(self, path, vfstype):
457 def __init__(self, path, vfstype):
458 vfs = vfstype(path)
458 vfs = vfstype(path)
459 self.path = vfs.base
459 self.path = vfs.base
460 self.createmode = _calcmode(vfs)
460 self.createmode = _calcmode(vfs)
461 vfs.createmode = self.createmode
461 vfs.createmode = self.createmode
462 self.rawvfs = vfs
462 self.rawvfs = vfs
463 self.vfs = vfsmod.filtervfs(vfs, encodedir)
463 self.vfs = vfsmod.filtervfs(vfs, encodedir)
464 self.opener = self.vfs
464 self.opener = self.vfs
465
465
466 def join(self, f):
466 def join(self, f):
467 return self.path + b'/' + encodedir(f)
467 return self.path + b'/' + encodedir(f)
468
468
469 def _walk(self, relpath, recurse):
469 def _walk(self, relpath, recurse):
470 '''yields (unencoded, encoded, size)'''
470 '''yields (unencoded, encoded, size)'''
471 path = self.path
471 path = self.path
472 if relpath:
472 if relpath:
473 path += b'/' + relpath
473 path += b'/' + relpath
474 striplen = len(self.path) + 1
474 striplen = len(self.path) + 1
475 l = []
475 l = []
476 if self.rawvfs.isdir(path):
476 if self.rawvfs.isdir(path):
477 visit = [path]
477 visit = [path]
478 readdir = self.rawvfs.readdir
478 readdir = self.rawvfs.readdir
479 while visit:
479 while visit:
480 p = visit.pop()
480 p = visit.pop()
481 for f, kind, st in readdir(p, stat=True):
481 for f, kind, st in readdir(p, stat=True):
482 fp = p + b'/' + f
482 fp = p + b'/' + f
483 rl_type = is_revlog(f, kind, st)
483 rl_type = is_revlog(f, kind, st)
484 if rl_type is not None:
484 if rl_type is not None:
485 n = util.pconvert(fp[striplen:])
485 n = util.pconvert(fp[striplen:])
486 l.append((rl_type, decodedir(n), n, st.st_size))
486 l.append((rl_type, decodedir(n), n, st.st_size))
487 elif kind == stat.S_IFDIR and recurse:
487 elif kind == stat.S_IFDIR and recurse:
488 visit.append(fp)
488 visit.append(fp)
489 l.sort()
489 l.sort()
490 return l
490 return l
491
491
492 def changelog(self, trypending, concurrencychecker=None):
492 def changelog(self, trypending, concurrencychecker=None):
493 return changelog.changelog(
493 return changelog.changelog(
494 self.vfs,
494 self.vfs,
495 trypending=trypending,
495 trypending=trypending,
496 concurrencychecker=concurrencychecker,
496 concurrencychecker=concurrencychecker,
497 )
497 )
498
498
499 def manifestlog(self, repo, storenarrowmatch):
499 def manifestlog(self, repo, storenarrowmatch):
500 rootstore = manifest.manifestrevlog(repo.nodeconstants, self.vfs)
500 rootstore = manifest.manifestrevlog(repo.nodeconstants, self.vfs)
501 return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch)
501 return manifest.manifestlog(self.vfs, repo, rootstore, storenarrowmatch)
502
502
503 def datafiles(self, matcher=None):
503 def datafiles(self, matcher=None):
504 files = self._walk(b'data', True) + self._walk(b'meta', True)
504 files = self._walk(b'data', True) + self._walk(b'meta', True)
505 for (t, u, e, s) in files:
505 for (t, u, e, s) in files:
506 yield (FILEFLAGS_FILELOG | t, u, e, s)
506 yield (FILEFLAGS_FILELOG | t, u, e, s)
507
507
508 def topfiles(self):
508 def topfiles(self):
509 # yield manifest before changelog
509 # yield manifest before changelog
510 files = reversed(self._walk(b'', False))
510 files = reversed(self._walk(b'', False))
511 for (t, u, e, s) in files:
511 for (t, u, e, s) in files:
512 if u.startswith(b'00changelog'):
512 if u.startswith(b'00changelog'):
513 yield (FILEFLAGS_CHANGELOG | t, u, e, s)
513 yield (FILEFLAGS_CHANGELOG | t, u, e, s)
514 elif u.startswith(b'00manifest'):
514 elif u.startswith(b'00manifest'):
515 yield (FILEFLAGS_MANIFESTLOG | t, u, e, s)
515 yield (FILEFLAGS_MANIFESTLOG | t, u, e, s)
516 else:
516 else:
517 yield (FILETYPE_OTHER | t, u, e, s)
517 yield (FILETYPE_OTHER | t, u, e, s)
518
518
519 def walk(self, matcher=None):
519 def walk(self, matcher=None):
520 """return file related to data storage (ie: revlogs)
520 """return file related to data storage (ie: revlogs)
521
521
522 yields (file_type, unencoded, encoded, size)
522 yields (file_type, unencoded, encoded, size)
523
523
524 if a matcher is passed, storage files of only those tracked paths
524 if a matcher is passed, storage files of only those tracked paths
525 are passed with matches the matcher
525 are passed with matches the matcher
526 """
526 """
527 # yield data files first
527 # yield data files first
528 for x in self.datafiles(matcher):
528 for x in self.datafiles(matcher):
529 yield x
529 yield x
530 for x in self.topfiles():
530 for x in self.topfiles():
531 yield x
531 yield x
532
532
533 def copylist(self):
533 def copylist(self):
534 return _data
534 return _data
535
535
536 def write(self, tr):
536 def write(self, tr):
537 pass
537 pass
538
538
539 def invalidatecaches(self):
539 def invalidatecaches(self):
540 pass
540 pass
541
541
542 def markremoved(self, fn):
542 def markremoved(self, fn):
543 pass
543 pass
544
544
545 def __contains__(self, path):
545 def __contains__(self, path):
546 '''Checks if the store contains path'''
546 '''Checks if the store contains path'''
547 path = b"/".join((b"data", path))
547 path = b"/".join((b"data", path))
548 # file?
548 # file?
549 if self.vfs.exists(path + b".i"):
549 if self.vfs.exists(path + b".i"):
550 return True
550 return True
551 # dir?
551 # dir?
552 if not path.endswith(b"/"):
552 if not path.endswith(b"/"):
553 path = path + b"/"
553 path = path + b"/"
554 return self.vfs.exists(path)
554 return self.vfs.exists(path)
555
555
556
556
557 class encodedstore(basicstore):
557 class encodedstore(basicstore):
558 def __init__(self, path, vfstype):
558 def __init__(self, path, vfstype):
559 vfs = vfstype(path + b'/store')
559 vfs = vfstype(path + b'/store')
560 self.path = vfs.base
560 self.path = vfs.base
561 self.createmode = _calcmode(vfs)
561 self.createmode = _calcmode(vfs)
562 vfs.createmode = self.createmode
562 vfs.createmode = self.createmode
563 self.rawvfs = vfs
563 self.rawvfs = vfs
564 self.vfs = vfsmod.filtervfs(vfs, encodefilename)
564 self.vfs = vfsmod.filtervfs(vfs, encodefilename)
565 self.opener = self.vfs
565 self.opener = self.vfs
566
566
567 def datafiles(self, matcher=None):
567 def datafiles(self, matcher=None):
568 for t, a, b, size in super(encodedstore, self).datafiles():
568 for t, a, b, size in super(encodedstore, self).datafiles():
569 try:
569 try:
570 a = decodefilename(a)
570 a = decodefilename(a)
571 except KeyError:
571 except KeyError:
572 a = None
572 a = None
573 if a is not None and not _matchtrackedpath(a, matcher):
573 if a is not None and not _matchtrackedpath(a, matcher):
574 continue
574 continue
575 yield t, a, b, size
575 yield t, a, b, size
576
576
577 def join(self, f):
577 def join(self, f):
578 return self.path + b'/' + encodefilename(f)
578 return self.path + b'/' + encodefilename(f)
579
579
580 def copylist(self):
580 def copylist(self):
581 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in _data]
581 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in _data]
582
582
583
583
584 class fncache(object):
584 class fncache(object):
585 # the filename used to be partially encoded
585 # the filename used to be partially encoded
586 # hence the encodedir/decodedir dance
586 # hence the encodedir/decodedir dance
587 def __init__(self, vfs):
587 def __init__(self, vfs):
588 self.vfs = vfs
588 self.vfs = vfs
589 self.entries = None
589 self.entries = None
590 self._dirty = False
590 self._dirty = False
591 # set of new additions to fncache
591 # set of new additions to fncache
592 self.addls = set()
592 self.addls = set()
593
593
594 def ensureloaded(self, warn=None):
594 def ensureloaded(self, warn=None):
595 """read the fncache file if not already read.
595 """read the fncache file if not already read.
596
596
597 If the file on disk is corrupted, raise. If warn is provided,
597 If the file on disk is corrupted, raise. If warn is provided,
598 warn and keep going instead."""
598 warn and keep going instead."""
599 if self.entries is None:
599 if self.entries is None:
600 self._load(warn)
600 self._load(warn)
601
601
602 def _load(self, warn=None):
602 def _load(self, warn=None):
603 '''fill the entries from the fncache file'''
603 '''fill the entries from the fncache file'''
604 self._dirty = False
604 self._dirty = False
605 try:
605 try:
606 fp = self.vfs(b'fncache', mode=b'rb')
606 fp = self.vfs(b'fncache', mode=b'rb')
607 except IOError:
607 except IOError:
608 # skip nonexistent file
608 # skip nonexistent file
609 self.entries = set()
609 self.entries = set()
610 return
610 return
611
611
612 self.entries = set()
612 self.entries = set()
613 chunk = b''
613 chunk = b''
614 for c in iter(functools.partial(fp.read, fncache_chunksize), b''):
614 for c in iter(functools.partial(fp.read, fncache_chunksize), b''):
615 chunk += c
615 chunk += c
616 try:
616 try:
617 p = chunk.rindex(b'\n')
617 p = chunk.rindex(b'\n')
618 self.entries.update(decodedir(chunk[: p + 1]).splitlines())
618 self.entries.update(decodedir(chunk[: p + 1]).splitlines())
619 chunk = chunk[p + 1 :]
619 chunk = chunk[p + 1 :]
620 except ValueError:
620 except ValueError:
621 # substring '\n' not found, maybe the entry is bigger than the
621 # substring '\n' not found, maybe the entry is bigger than the
622 # chunksize, so let's keep iterating
622 # chunksize, so let's keep iterating
623 pass
623 pass
624
624
625 if chunk:
625 if chunk:
626 msg = _(b"fncache does not ends with a newline")
626 msg = _(b"fncache does not ends with a newline")
627 if warn:
627 if warn:
628 warn(msg + b'\n')
628 warn(msg + b'\n')
629 else:
629 else:
630 raise error.Abort(
630 raise error.Abort(
631 msg,
631 msg,
632 hint=_(
632 hint=_(
633 b"use 'hg debugrebuildfncache' to "
633 b"use 'hg debugrebuildfncache' to "
634 b"rebuild the fncache"
634 b"rebuild the fncache"
635 ),
635 ),
636 )
636 )
637 self._checkentries(fp, warn)
637 self._checkentries(fp, warn)
638 fp.close()
638 fp.close()
639
639
640 def _checkentries(self, fp, warn):
640 def _checkentries(self, fp, warn):
641 """make sure there is no empty string in entries"""
641 """make sure there is no empty string in entries"""
642 if b'' in self.entries:
642 if b'' in self.entries:
643 fp.seek(0)
643 fp.seek(0)
644 for n, line in enumerate(util.iterfile(fp)):
644 for n, line in enumerate(util.iterfile(fp)):
645 if not line.rstrip(b'\n'):
645 if not line.rstrip(b'\n'):
646 t = _(b'invalid entry in fncache, line %d') % (n + 1)
646 t = _(b'invalid entry in fncache, line %d') % (n + 1)
647 if warn:
647 if warn:
648 warn(t + b'\n')
648 warn(t + b'\n')
649 else:
649 else:
650 raise error.Abort(t)
650 raise error.Abort(t)
651
651
652 def write(self, tr):
652 def write(self, tr):
653 if self._dirty:
653 if self._dirty:
654 assert self.entries is not None
654 assert self.entries is not None
655 self.entries = self.entries | self.addls
655 self.entries = self.entries | self.addls
656 self.addls = set()
656 self.addls = set()
657 tr.addbackup(b'fncache')
657 tr.addbackup(b'fncache')
658 fp = self.vfs(b'fncache', mode=b'wb', atomictemp=True)
658 fp = self.vfs(b'fncache', mode=b'wb', atomictemp=True)
659 if self.entries:
659 if self.entries:
660 fp.write(encodedir(b'\n'.join(self.entries) + b'\n'))
660 fp.write(encodedir(b'\n'.join(self.entries) + b'\n'))
661 fp.close()
661 fp.close()
662 self._dirty = False
662 self._dirty = False
663 if self.addls:
663 if self.addls:
664 # if we have just new entries, let's append them to the fncache
664 # if we have just new entries, let's append them to the fncache
665 tr.addbackup(b'fncache')
665 tr.addbackup(b'fncache')
666 fp = self.vfs(b'fncache', mode=b'ab', atomictemp=True)
666 fp = self.vfs(b'fncache', mode=b'ab', atomictemp=True)
667 if self.addls:
667 if self.addls:
668 fp.write(encodedir(b'\n'.join(self.addls) + b'\n'))
668 fp.write(encodedir(b'\n'.join(self.addls) + b'\n'))
669 fp.close()
669 fp.close()
670 self.entries = None
670 self.entries = None
671 self.addls = set()
671 self.addls = set()
672
672
673 def add(self, fn):
673 def add(self, fn):
674 if self.entries is None:
674 if self.entries is None:
675 self._load()
675 self._load()
676 if fn not in self.entries:
676 if fn not in self.entries:
677 self.addls.add(fn)
677 self.addls.add(fn)
678
678
679 def remove(self, fn):
679 def remove(self, fn):
680 if self.entries is None:
680 if self.entries is None:
681 self._load()
681 self._load()
682 if fn in self.addls:
682 if fn in self.addls:
683 self.addls.remove(fn)
683 self.addls.remove(fn)
684 return
684 return
685 try:
685 try:
686 self.entries.remove(fn)
686 self.entries.remove(fn)
687 self._dirty = True
687 self._dirty = True
688 except KeyError:
688 except KeyError:
689 pass
689 pass
690
690
691 def __contains__(self, fn):
691 def __contains__(self, fn):
692 if fn in self.addls:
692 if fn in self.addls:
693 return True
693 return True
694 if self.entries is None:
694 if self.entries is None:
695 self._load()
695 self._load()
696 return fn in self.entries
696 return fn in self.entries
697
697
698 def __iter__(self):
698 def __iter__(self):
699 if self.entries is None:
699 if self.entries is None:
700 self._load()
700 self._load()
701 return iter(self.entries | self.addls)
701 return iter(self.entries | self.addls)
702
702
703
703
704 class _fncachevfs(vfsmod.proxyvfs):
704 class _fncachevfs(vfsmod.proxyvfs):
705 def __init__(self, vfs, fnc, encode):
705 def __init__(self, vfs, fnc, encode):
706 vfsmod.proxyvfs.__init__(self, vfs)
706 vfsmod.proxyvfs.__init__(self, vfs)
707 self.fncache = fnc
707 self.fncache = fnc
708 self.encode = encode
708 self.encode = encode
709
709
710 def __call__(self, path, mode=b'r', *args, **kw):
710 def __call__(self, path, mode=b'r', *args, **kw):
711 encoded = self.encode(path)
711 encoded = self.encode(path)
712 if mode not in (b'r', b'rb') and (
712 if mode not in (b'r', b'rb') and (
713 path.startswith(b'data/') or path.startswith(b'meta/')
713 path.startswith(b'data/') or path.startswith(b'meta/')
714 ):
714 ):
715 # do not trigger a fncache load when adding a file that already is
715 # do not trigger a fncache load when adding a file that already is
716 # known to exist.
716 # known to exist.
717 notload = self.fncache.entries is None and self.vfs.exists(encoded)
717 notload = self.fncache.entries is None and self.vfs.exists(encoded)
718 if notload and b'r+' in mode and not self.vfs.stat(encoded).st_size:
718 if notload and b'r+' in mode and not self.vfs.stat(encoded).st_size:
719 # when appending to an existing file, if the file has size zero,
719 # when appending to an existing file, if the file has size zero,
720 # it should be considered as missing. Such zero-size files are
720 # it should be considered as missing. Such zero-size files are
721 # the result of truncation when a transaction is aborted.
721 # the result of truncation when a transaction is aborted.
722 notload = False
722 notload = False
723 if not notload:
723 if not notload:
724 self.fncache.add(path)
724 self.fncache.add(path)
725 return self.vfs(encoded, mode, *args, **kw)
725 return self.vfs(encoded, mode, *args, **kw)
726
726
727 def join(self, path):
727 def join(self, path):
728 if path:
728 if path:
729 return self.vfs.join(self.encode(path))
729 return self.vfs.join(self.encode(path))
730 else:
730 else:
731 return self.vfs.join(path)
731 return self.vfs.join(path)
732
732
733 def register_file(self, path):
734 """generic hook point to lets fncache steer its stew"""
735 if path.startswith(b'data/') or path.startswith(b'meta/'):
736 self.fncache.add(path)
737
733
738
734 class fncachestore(basicstore):
739 class fncachestore(basicstore):
735 def __init__(self, path, vfstype, dotencode):
740 def __init__(self, path, vfstype, dotencode):
736 if dotencode:
741 if dotencode:
737 encode = _pathencode
742 encode = _pathencode
738 else:
743 else:
739 encode = _plainhybridencode
744 encode = _plainhybridencode
740 self.encode = encode
745 self.encode = encode
741 vfs = vfstype(path + b'/store')
746 vfs = vfstype(path + b'/store')
742 self.path = vfs.base
747 self.path = vfs.base
743 self.pathsep = self.path + b'/'
748 self.pathsep = self.path + b'/'
744 self.createmode = _calcmode(vfs)
749 self.createmode = _calcmode(vfs)
745 vfs.createmode = self.createmode
750 vfs.createmode = self.createmode
746 self.rawvfs = vfs
751 self.rawvfs = vfs
747 fnc = fncache(vfs)
752 fnc = fncache(vfs)
748 self.fncache = fnc
753 self.fncache = fnc
749 self.vfs = _fncachevfs(vfs, fnc, encode)
754 self.vfs = _fncachevfs(vfs, fnc, encode)
750 self.opener = self.vfs
755 self.opener = self.vfs
751
756
752 def join(self, f):
757 def join(self, f):
753 return self.pathsep + self.encode(f)
758 return self.pathsep + self.encode(f)
754
759
755 def getsize(self, path):
760 def getsize(self, path):
756 return self.rawvfs.stat(path).st_size
761 return self.rawvfs.stat(path).st_size
757
762
758 def datafiles(self, matcher=None):
763 def datafiles(self, matcher=None):
759 for f in sorted(self.fncache):
764 for f in sorted(self.fncache):
760 if not _matchtrackedpath(f, matcher):
765 if not _matchtrackedpath(f, matcher):
761 continue
766 continue
762 ef = self.encode(f)
767 ef = self.encode(f)
763 try:
768 try:
764 t = revlog_type(f)
769 t = revlog_type(f)
765 assert t is not None, f
770 assert t is not None, f
766 t |= FILEFLAGS_FILELOG
771 t |= FILEFLAGS_FILELOG
767 yield t, f, ef, self.getsize(ef)
772 yield t, f, ef, self.getsize(ef)
768 except OSError as err:
773 except OSError as err:
769 if err.errno != errno.ENOENT:
774 if err.errno != errno.ENOENT:
770 raise
775 raise
771
776
772 def copylist(self):
777 def copylist(self):
773 d = (
778 d = (
774 b'bookmarks',
779 b'bookmarks',
775 b'narrowspec',
780 b'narrowspec',
776 b'data',
781 b'data',
777 b'meta',
782 b'meta',
778 b'dh',
783 b'dh',
779 b'fncache',
784 b'fncache',
780 b'phaseroots',
785 b'phaseroots',
781 b'obsstore',
786 b'obsstore',
782 b'00manifest.d',
787 b'00manifest.d',
783 b'00manifest.i',
788 b'00manifest.i',
784 b'00changelog.d',
789 b'00changelog.d',
785 b'00changelog.i',
790 b'00changelog.i',
786 b'requires',
791 b'requires',
787 )
792 )
788 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in d]
793 return [b'requires', b'00changelog.i'] + [b'store/' + f for f in d]
789
794
790 def write(self, tr):
795 def write(self, tr):
791 self.fncache.write(tr)
796 self.fncache.write(tr)
792
797
793 def invalidatecaches(self):
798 def invalidatecaches(self):
794 self.fncache.entries = None
799 self.fncache.entries = None
795 self.fncache.addls = set()
800 self.fncache.addls = set()
796
801
797 def markremoved(self, fn):
802 def markremoved(self, fn):
798 self.fncache.remove(fn)
803 self.fncache.remove(fn)
799
804
800 def _exists(self, f):
805 def _exists(self, f):
801 ef = self.encode(f)
806 ef = self.encode(f)
802 try:
807 try:
803 self.getsize(ef)
808 self.getsize(ef)
804 return True
809 return True
805 except OSError as err:
810 except OSError as err:
806 if err.errno != errno.ENOENT:
811 if err.errno != errno.ENOENT:
807 raise
812 raise
808 # nonexistent entry
813 # nonexistent entry
809 return False
814 return False
810
815
811 def __contains__(self, path):
816 def __contains__(self, path):
812 '''Checks if the store contains path'''
817 '''Checks if the store contains path'''
813 path = b"/".join((b"data", path))
818 path = b"/".join((b"data", path))
814 # check for files (exact match)
819 # check for files (exact match)
815 e = path + b'.i'
820 e = path + b'.i'
816 if e in self.fncache and self._exists(e):
821 if e in self.fncache and self._exists(e):
817 return True
822 return True
818 # now check for directories (prefix match)
823 # now check for directories (prefix match)
819 if not path.endswith(b'/'):
824 if not path.endswith(b'/'):
820 path += b'/'
825 path += b'/'
821 for e in self.fncache:
826 for e in self.fncache:
822 if e.startswith(path) and self._exists(e):
827 if e.startswith(path) and self._exists(e):
823 return True
828 return True
824 return False
829 return False
@@ -1,751 +1,754 b''
1 # vfs.py - Mercurial 'vfs' classes
1 # vfs.py - Mercurial 'vfs' classes
2 #
2 #
3 # Copyright Olivia Mackall <olivia@selenic.com>
3 # Copyright Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 import contextlib
9 import contextlib
10 import errno
10 import errno
11 import os
11 import os
12 import shutil
12 import shutil
13 import stat
13 import stat
14 import threading
14 import threading
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import (
17 from .pycompat import (
18 delattr,
18 delattr,
19 getattr,
19 getattr,
20 setattr,
20 setattr,
21 )
21 )
22 from . import (
22 from . import (
23 encoding,
23 encoding,
24 error,
24 error,
25 pathutil,
25 pathutil,
26 pycompat,
26 pycompat,
27 util,
27 util,
28 )
28 )
29
29
30
30
31 def _avoidambig(path, oldstat):
31 def _avoidambig(path, oldstat):
32 """Avoid file stat ambiguity forcibly
32 """Avoid file stat ambiguity forcibly
33
33
34 This function causes copying ``path`` file, if it is owned by
34 This function causes copying ``path`` file, if it is owned by
35 another (see issue5418 and issue5584 for detail).
35 another (see issue5418 and issue5584 for detail).
36 """
36 """
37
37
38 def checkandavoid():
38 def checkandavoid():
39 newstat = util.filestat.frompath(path)
39 newstat = util.filestat.frompath(path)
40 # return whether file stat ambiguity is (already) avoided
40 # return whether file stat ambiguity is (already) avoided
41 return not newstat.isambig(oldstat) or newstat.avoidambig(path, oldstat)
41 return not newstat.isambig(oldstat) or newstat.avoidambig(path, oldstat)
42
42
43 if not checkandavoid():
43 if not checkandavoid():
44 # simply copy to change owner of path to get privilege to
44 # simply copy to change owner of path to get privilege to
45 # advance mtime (see issue5418)
45 # advance mtime (see issue5418)
46 util.rename(util.mktempcopy(path), path)
46 util.rename(util.mktempcopy(path), path)
47 checkandavoid()
47 checkandavoid()
48
48
49
49
50 class abstractvfs(object):
50 class abstractvfs(object):
51 """Abstract base class; cannot be instantiated"""
51 """Abstract base class; cannot be instantiated"""
52
52
53 def __init__(self, *args, **kwargs):
53 def __init__(self, *args, **kwargs):
54 '''Prevent instantiation; don't call this from subclasses.'''
54 '''Prevent instantiation; don't call this from subclasses.'''
55 raise NotImplementedError('attempted instantiating ' + str(type(self)))
55 raise NotImplementedError('attempted instantiating ' + str(type(self)))
56
56
57 def __call__(self, path, mode=b'rb', **kwargs):
57 def __call__(self, path, mode=b'rb', **kwargs):
58 raise NotImplementedError
58 raise NotImplementedError
59
59
60 def _auditpath(self, path, mode):
60 def _auditpath(self, path, mode):
61 raise NotImplementedError
61 raise NotImplementedError
62
62
63 def join(self, path, *insidef):
63 def join(self, path, *insidef):
64 raise NotImplementedError
64 raise NotImplementedError
65
65
66 def tryread(self, path):
66 def tryread(self, path):
67 '''gracefully return an empty string for missing files'''
67 '''gracefully return an empty string for missing files'''
68 try:
68 try:
69 return self.read(path)
69 return self.read(path)
70 except IOError as inst:
70 except IOError as inst:
71 if inst.errno != errno.ENOENT:
71 if inst.errno != errno.ENOENT:
72 raise
72 raise
73 return b""
73 return b""
74
74
75 def tryreadlines(self, path, mode=b'rb'):
75 def tryreadlines(self, path, mode=b'rb'):
76 '''gracefully return an empty array for missing files'''
76 '''gracefully return an empty array for missing files'''
77 try:
77 try:
78 return self.readlines(path, mode=mode)
78 return self.readlines(path, mode=mode)
79 except IOError as inst:
79 except IOError as inst:
80 if inst.errno != errno.ENOENT:
80 if inst.errno != errno.ENOENT:
81 raise
81 raise
82 return []
82 return []
83
83
84 @util.propertycache
84 @util.propertycache
85 def open(self):
85 def open(self):
86 """Open ``path`` file, which is relative to vfs root.
86 """Open ``path`` file, which is relative to vfs root.
87
87
88 Newly created directories are marked as "not to be indexed by
88 Newly created directories are marked as "not to be indexed by
89 the content indexing service", if ``notindexed`` is specified
89 the content indexing service", if ``notindexed`` is specified
90 for "write" mode access.
90 for "write" mode access.
91 """
91 """
92 return self.__call__
92 return self.__call__
93
93
94 def read(self, path):
94 def read(self, path):
95 with self(path, b'rb') as fp:
95 with self(path, b'rb') as fp:
96 return fp.read()
96 return fp.read()
97
97
98 def readlines(self, path, mode=b'rb'):
98 def readlines(self, path, mode=b'rb'):
99 with self(path, mode=mode) as fp:
99 with self(path, mode=mode) as fp:
100 return fp.readlines()
100 return fp.readlines()
101
101
102 def write(self, path, data, backgroundclose=False, **kwargs):
102 def write(self, path, data, backgroundclose=False, **kwargs):
103 with self(path, b'wb', backgroundclose=backgroundclose, **kwargs) as fp:
103 with self(path, b'wb', backgroundclose=backgroundclose, **kwargs) as fp:
104 return fp.write(data)
104 return fp.write(data)
105
105
106 def writelines(self, path, data, mode=b'wb', notindexed=False):
106 def writelines(self, path, data, mode=b'wb', notindexed=False):
107 with self(path, mode=mode, notindexed=notindexed) as fp:
107 with self(path, mode=mode, notindexed=notindexed) as fp:
108 return fp.writelines(data)
108 return fp.writelines(data)
109
109
110 def append(self, path, data):
110 def append(self, path, data):
111 with self(path, b'ab') as fp:
111 with self(path, b'ab') as fp:
112 return fp.write(data)
112 return fp.write(data)
113
113
114 def basename(self, path):
114 def basename(self, path):
115 """return base element of a path (as os.path.basename would do)
115 """return base element of a path (as os.path.basename would do)
116
116
117 This exists to allow handling of strange encoding if needed."""
117 This exists to allow handling of strange encoding if needed."""
118 return os.path.basename(path)
118 return os.path.basename(path)
119
119
120 def chmod(self, path, mode):
120 def chmod(self, path, mode):
121 return os.chmod(self.join(path), mode)
121 return os.chmod(self.join(path), mode)
122
122
123 def dirname(self, path):
123 def dirname(self, path):
124 """return dirname element of a path (as os.path.dirname would do)
124 """return dirname element of a path (as os.path.dirname would do)
125
125
126 This exists to allow handling of strange encoding if needed."""
126 This exists to allow handling of strange encoding if needed."""
127 return os.path.dirname(path)
127 return os.path.dirname(path)
128
128
129 def exists(self, path=None):
129 def exists(self, path=None):
130 return os.path.exists(self.join(path))
130 return os.path.exists(self.join(path))
131
131
132 def fstat(self, fp):
132 def fstat(self, fp):
133 return util.fstat(fp)
133 return util.fstat(fp)
134
134
135 def isdir(self, path=None):
135 def isdir(self, path=None):
136 return os.path.isdir(self.join(path))
136 return os.path.isdir(self.join(path))
137
137
138 def isfile(self, path=None):
138 def isfile(self, path=None):
139 return os.path.isfile(self.join(path))
139 return os.path.isfile(self.join(path))
140
140
141 def islink(self, path=None):
141 def islink(self, path=None):
142 return os.path.islink(self.join(path))
142 return os.path.islink(self.join(path))
143
143
144 def isfileorlink(self, path=None):
144 def isfileorlink(self, path=None):
145 """return whether path is a regular file or a symlink
145 """return whether path is a regular file or a symlink
146
146
147 Unlike isfile, this doesn't follow symlinks."""
147 Unlike isfile, this doesn't follow symlinks."""
148 try:
148 try:
149 st = self.lstat(path)
149 st = self.lstat(path)
150 except OSError:
150 except OSError:
151 return False
151 return False
152 mode = st.st_mode
152 mode = st.st_mode
153 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
153 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
154
154
155 def reljoin(self, *paths):
155 def reljoin(self, *paths):
156 """join various elements of a path together (as os.path.join would do)
156 """join various elements of a path together (as os.path.join would do)
157
157
158 The vfs base is not injected so that path stay relative. This exists
158 The vfs base is not injected so that path stay relative. This exists
159 to allow handling of strange encoding if needed."""
159 to allow handling of strange encoding if needed."""
160 return os.path.join(*paths)
160 return os.path.join(*paths)
161
161
162 def split(self, path):
162 def split(self, path):
163 """split top-most element of a path (as os.path.split would do)
163 """split top-most element of a path (as os.path.split would do)
164
164
165 This exists to allow handling of strange encoding if needed."""
165 This exists to allow handling of strange encoding if needed."""
166 return os.path.split(path)
166 return os.path.split(path)
167
167
168 def lexists(self, path=None):
168 def lexists(self, path=None):
169 return os.path.lexists(self.join(path))
169 return os.path.lexists(self.join(path))
170
170
171 def lstat(self, path=None):
171 def lstat(self, path=None):
172 return os.lstat(self.join(path))
172 return os.lstat(self.join(path))
173
173
174 def listdir(self, path=None):
174 def listdir(self, path=None):
175 return os.listdir(self.join(path))
175 return os.listdir(self.join(path))
176
176
177 def makedir(self, path=None, notindexed=True):
177 def makedir(self, path=None, notindexed=True):
178 return util.makedir(self.join(path), notindexed)
178 return util.makedir(self.join(path), notindexed)
179
179
180 def makedirs(self, path=None, mode=None):
180 def makedirs(self, path=None, mode=None):
181 return util.makedirs(self.join(path), mode)
181 return util.makedirs(self.join(path), mode)
182
182
183 def makelock(self, info, path):
183 def makelock(self, info, path):
184 return util.makelock(info, self.join(path))
184 return util.makelock(info, self.join(path))
185
185
186 def mkdir(self, path=None):
186 def mkdir(self, path=None):
187 return os.mkdir(self.join(path))
187 return os.mkdir(self.join(path))
188
188
189 def mkstemp(self, suffix=b'', prefix=b'tmp', dir=None):
189 def mkstemp(self, suffix=b'', prefix=b'tmp', dir=None):
190 fd, name = pycompat.mkstemp(
190 fd, name = pycompat.mkstemp(
191 suffix=suffix, prefix=prefix, dir=self.join(dir)
191 suffix=suffix, prefix=prefix, dir=self.join(dir)
192 )
192 )
193 dname, fname = util.split(name)
193 dname, fname = util.split(name)
194 if dir:
194 if dir:
195 return fd, os.path.join(dir, fname)
195 return fd, os.path.join(dir, fname)
196 else:
196 else:
197 return fd, fname
197 return fd, fname
198
198
199 def readdir(self, path=None, stat=None, skip=None):
199 def readdir(self, path=None, stat=None, skip=None):
200 return util.listdir(self.join(path), stat, skip)
200 return util.listdir(self.join(path), stat, skip)
201
201
202 def readlock(self, path):
202 def readlock(self, path):
203 return util.readlock(self.join(path))
203 return util.readlock(self.join(path))
204
204
205 def rename(self, src, dst, checkambig=False):
205 def rename(self, src, dst, checkambig=False):
206 """Rename from src to dst
206 """Rename from src to dst
207
207
208 checkambig argument is used with util.filestat, and is useful
208 checkambig argument is used with util.filestat, and is useful
209 only if destination file is guarded by any lock
209 only if destination file is guarded by any lock
210 (e.g. repo.lock or repo.wlock).
210 (e.g. repo.lock or repo.wlock).
211
211
212 To avoid file stat ambiguity forcibly, checkambig=True involves
212 To avoid file stat ambiguity forcibly, checkambig=True involves
213 copying ``src`` file, if it is owned by another. Therefore, use
213 copying ``src`` file, if it is owned by another. Therefore, use
214 checkambig=True only in limited cases (see also issue5418 and
214 checkambig=True only in limited cases (see also issue5418 and
215 issue5584 for detail).
215 issue5584 for detail).
216 """
216 """
217 self._auditpath(dst, b'w')
217 self._auditpath(dst, b'w')
218 srcpath = self.join(src)
218 srcpath = self.join(src)
219 dstpath = self.join(dst)
219 dstpath = self.join(dst)
220 oldstat = checkambig and util.filestat.frompath(dstpath)
220 oldstat = checkambig and util.filestat.frompath(dstpath)
221 if oldstat and oldstat.stat:
221 if oldstat and oldstat.stat:
222 ret = util.rename(srcpath, dstpath)
222 ret = util.rename(srcpath, dstpath)
223 _avoidambig(dstpath, oldstat)
223 _avoidambig(dstpath, oldstat)
224 return ret
224 return ret
225 return util.rename(srcpath, dstpath)
225 return util.rename(srcpath, dstpath)
226
226
227 def readlink(self, path):
227 def readlink(self, path):
228 return util.readlink(self.join(path))
228 return util.readlink(self.join(path))
229
229
230 def removedirs(self, path=None):
230 def removedirs(self, path=None):
231 """Remove a leaf directory and all empty intermediate ones"""
231 """Remove a leaf directory and all empty intermediate ones"""
232 return util.removedirs(self.join(path))
232 return util.removedirs(self.join(path))
233
233
234 def rmdir(self, path=None):
234 def rmdir(self, path=None):
235 """Remove an empty directory."""
235 """Remove an empty directory."""
236 return os.rmdir(self.join(path))
236 return os.rmdir(self.join(path))
237
237
238 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
238 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
239 """Remove a directory tree recursively
239 """Remove a directory tree recursively
240
240
241 If ``forcibly``, this tries to remove READ-ONLY files, too.
241 If ``forcibly``, this tries to remove READ-ONLY files, too.
242 """
242 """
243 if forcibly:
243 if forcibly:
244
244
245 def onerror(function, path, excinfo):
245 def onerror(function, path, excinfo):
246 if function is not os.remove:
246 if function is not os.remove:
247 raise
247 raise
248 # read-only files cannot be unlinked under Windows
248 # read-only files cannot be unlinked under Windows
249 s = os.stat(path)
249 s = os.stat(path)
250 if (s.st_mode & stat.S_IWRITE) != 0:
250 if (s.st_mode & stat.S_IWRITE) != 0:
251 raise
251 raise
252 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
252 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
253 os.remove(path)
253 os.remove(path)
254
254
255 else:
255 else:
256 onerror = None
256 onerror = None
257 return shutil.rmtree(
257 return shutil.rmtree(
258 self.join(path), ignore_errors=ignore_errors, onerror=onerror
258 self.join(path), ignore_errors=ignore_errors, onerror=onerror
259 )
259 )
260
260
261 def setflags(self, path, l, x):
261 def setflags(self, path, l, x):
262 return util.setflags(self.join(path), l, x)
262 return util.setflags(self.join(path), l, x)
263
263
264 def stat(self, path=None):
264 def stat(self, path=None):
265 return os.stat(self.join(path))
265 return os.stat(self.join(path))
266
266
267 def unlink(self, path=None):
267 def unlink(self, path=None):
268 return util.unlink(self.join(path))
268 return util.unlink(self.join(path))
269
269
270 def tryunlink(self, path=None):
270 def tryunlink(self, path=None):
271 """Attempt to remove a file, ignoring missing file errors."""
271 """Attempt to remove a file, ignoring missing file errors."""
272 util.tryunlink(self.join(path))
272 util.tryunlink(self.join(path))
273
273
274 def unlinkpath(self, path=None, ignoremissing=False, rmdir=True):
274 def unlinkpath(self, path=None, ignoremissing=False, rmdir=True):
275 return util.unlinkpath(
275 return util.unlinkpath(
276 self.join(path), ignoremissing=ignoremissing, rmdir=rmdir
276 self.join(path), ignoremissing=ignoremissing, rmdir=rmdir
277 )
277 )
278
278
279 def utime(self, path=None, t=None):
279 def utime(self, path=None, t=None):
280 return os.utime(self.join(path), t)
280 return os.utime(self.join(path), t)
281
281
282 def walk(self, path=None, onerror=None):
282 def walk(self, path=None, onerror=None):
283 """Yield (dirpath, dirs, files) tuple for each directories under path
283 """Yield (dirpath, dirs, files) tuple for each directories under path
284
284
285 ``dirpath`` is relative one from the root of this vfs. This
285 ``dirpath`` is relative one from the root of this vfs. This
286 uses ``os.sep`` as path separator, even you specify POSIX
286 uses ``os.sep`` as path separator, even you specify POSIX
287 style ``path``.
287 style ``path``.
288
288
289 "The root of this vfs" is represented as empty ``dirpath``.
289 "The root of this vfs" is represented as empty ``dirpath``.
290 """
290 """
291 root = os.path.normpath(self.join(None))
291 root = os.path.normpath(self.join(None))
292 # when dirpath == root, dirpath[prefixlen:] becomes empty
292 # when dirpath == root, dirpath[prefixlen:] becomes empty
293 # because len(dirpath) < prefixlen.
293 # because len(dirpath) < prefixlen.
294 prefixlen = len(pathutil.normasprefix(root))
294 prefixlen = len(pathutil.normasprefix(root))
295 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
295 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
296 yield (dirpath[prefixlen:], dirs, files)
296 yield (dirpath[prefixlen:], dirs, files)
297
297
298 @contextlib.contextmanager
298 @contextlib.contextmanager
299 def backgroundclosing(self, ui, expectedcount=-1):
299 def backgroundclosing(self, ui, expectedcount=-1):
300 """Allow files to be closed asynchronously.
300 """Allow files to be closed asynchronously.
301
301
302 When this context manager is active, ``backgroundclose`` can be passed
302 When this context manager is active, ``backgroundclose`` can be passed
303 to ``__call__``/``open`` to result in the file possibly being closed
303 to ``__call__``/``open`` to result in the file possibly being closed
304 asynchronously, on a background thread.
304 asynchronously, on a background thread.
305 """
305 """
306 # Sharing backgroundfilecloser between threads is complex and using
306 # Sharing backgroundfilecloser between threads is complex and using
307 # multiple instances puts us at risk of running out of file descriptors
307 # multiple instances puts us at risk of running out of file descriptors
308 # only allow to use backgroundfilecloser when in main thread.
308 # only allow to use backgroundfilecloser when in main thread.
309 if not isinstance(
309 if not isinstance(
310 threading.current_thread(),
310 threading.current_thread(),
311 threading._MainThread, # pytype: disable=module-attr
311 threading._MainThread, # pytype: disable=module-attr
312 ):
312 ):
313 yield
313 yield
314 return
314 return
315 vfs = getattr(self, 'vfs', self)
315 vfs = getattr(self, 'vfs', self)
316 if getattr(vfs, '_backgroundfilecloser', None):
316 if getattr(vfs, '_backgroundfilecloser', None):
317 raise error.Abort(
317 raise error.Abort(
318 _(b'can only have 1 active background file closer')
318 _(b'can only have 1 active background file closer')
319 )
319 )
320
320
321 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
321 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
322 try:
322 try:
323 vfs._backgroundfilecloser = (
323 vfs._backgroundfilecloser = (
324 bfc # pytype: disable=attribute-error
324 bfc # pytype: disable=attribute-error
325 )
325 )
326 yield bfc
326 yield bfc
327 finally:
327 finally:
328 vfs._backgroundfilecloser = (
328 vfs._backgroundfilecloser = (
329 None # pytype: disable=attribute-error
329 None # pytype: disable=attribute-error
330 )
330 )
331
331
332 def register_file(self, path):
333 """generic hook point to lets fncache steer its stew"""
334
332
335
333 class vfs(abstractvfs):
336 class vfs(abstractvfs):
334 """Operate files relative to a base directory
337 """Operate files relative to a base directory
335
338
336 This class is used to hide the details of COW semantics and
339 This class is used to hide the details of COW semantics and
337 remote file access from higher level code.
340 remote file access from higher level code.
338
341
339 'cacheaudited' should be enabled only if (a) vfs object is short-lived, or
342 'cacheaudited' should be enabled only if (a) vfs object is short-lived, or
340 (b) the base directory is managed by hg and considered sort-of append-only.
343 (b) the base directory is managed by hg and considered sort-of append-only.
341 See pathutil.pathauditor() for details.
344 See pathutil.pathauditor() for details.
342 """
345 """
343
346
344 def __init__(
347 def __init__(
345 self,
348 self,
346 base,
349 base,
347 audit=True,
350 audit=True,
348 cacheaudited=False,
351 cacheaudited=False,
349 expandpath=False,
352 expandpath=False,
350 realpath=False,
353 realpath=False,
351 ):
354 ):
352 if expandpath:
355 if expandpath:
353 base = util.expandpath(base)
356 base = util.expandpath(base)
354 if realpath:
357 if realpath:
355 base = os.path.realpath(base)
358 base = os.path.realpath(base)
356 self.base = base
359 self.base = base
357 self._audit = audit
360 self._audit = audit
358 if audit:
361 if audit:
359 self.audit = pathutil.pathauditor(self.base, cached=cacheaudited)
362 self.audit = pathutil.pathauditor(self.base, cached=cacheaudited)
360 else:
363 else:
361 self.audit = lambda path, mode=None: True
364 self.audit = lambda path, mode=None: True
362 self.createmode = None
365 self.createmode = None
363 self._trustnlink = None
366 self._trustnlink = None
364 self.options = {}
367 self.options = {}
365
368
366 @util.propertycache
369 @util.propertycache
367 def _cansymlink(self):
370 def _cansymlink(self):
368 return util.checklink(self.base)
371 return util.checklink(self.base)
369
372
370 @util.propertycache
373 @util.propertycache
371 def _chmod(self):
374 def _chmod(self):
372 return util.checkexec(self.base)
375 return util.checkexec(self.base)
373
376
374 def _fixfilemode(self, name):
377 def _fixfilemode(self, name):
375 if self.createmode is None or not self._chmod:
378 if self.createmode is None or not self._chmod:
376 return
379 return
377 os.chmod(name, self.createmode & 0o666)
380 os.chmod(name, self.createmode & 0o666)
378
381
379 def _auditpath(self, path, mode):
382 def _auditpath(self, path, mode):
380 if self._audit:
383 if self._audit:
381 if os.path.isabs(path) and path.startswith(self.base):
384 if os.path.isabs(path) and path.startswith(self.base):
382 path = os.path.relpath(path, self.base)
385 path = os.path.relpath(path, self.base)
383 r = util.checkosfilename(path)
386 r = util.checkosfilename(path)
384 if r:
387 if r:
385 raise error.Abort(b"%s: %r" % (r, path))
388 raise error.Abort(b"%s: %r" % (r, path))
386 self.audit(path, mode=mode)
389 self.audit(path, mode=mode)
387
390
388 def __call__(
391 def __call__(
389 self,
392 self,
390 path,
393 path,
391 mode=b"r",
394 mode=b"r",
392 atomictemp=False,
395 atomictemp=False,
393 notindexed=False,
396 notindexed=False,
394 backgroundclose=False,
397 backgroundclose=False,
395 checkambig=False,
398 checkambig=False,
396 auditpath=True,
399 auditpath=True,
397 makeparentdirs=True,
400 makeparentdirs=True,
398 ):
401 ):
399 """Open ``path`` file, which is relative to vfs root.
402 """Open ``path`` file, which is relative to vfs root.
400
403
401 By default, parent directories are created as needed. Newly created
404 By default, parent directories are created as needed. Newly created
402 directories are marked as "not to be indexed by the content indexing
405 directories are marked as "not to be indexed by the content indexing
403 service", if ``notindexed`` is specified for "write" mode access.
406 service", if ``notindexed`` is specified for "write" mode access.
404 Set ``makeparentdirs=False`` to not create directories implicitly.
407 Set ``makeparentdirs=False`` to not create directories implicitly.
405
408
406 If ``backgroundclose`` is passed, the file may be closed asynchronously.
409 If ``backgroundclose`` is passed, the file may be closed asynchronously.
407 It can only be used if the ``self.backgroundclosing()`` context manager
410 It can only be used if the ``self.backgroundclosing()`` context manager
408 is active. This should only be specified if the following criteria hold:
411 is active. This should only be specified if the following criteria hold:
409
412
410 1. There is a potential for writing thousands of files. Unless you
413 1. There is a potential for writing thousands of files. Unless you
411 are writing thousands of files, the performance benefits of
414 are writing thousands of files, the performance benefits of
412 asynchronously closing files is not realized.
415 asynchronously closing files is not realized.
413 2. Files are opened exactly once for the ``backgroundclosing``
416 2. Files are opened exactly once for the ``backgroundclosing``
414 active duration and are therefore free of race conditions between
417 active duration and are therefore free of race conditions between
415 closing a file on a background thread and reopening it. (If the
418 closing a file on a background thread and reopening it. (If the
416 file were opened multiple times, there could be unflushed data
419 file were opened multiple times, there could be unflushed data
417 because the original file handle hasn't been flushed/closed yet.)
420 because the original file handle hasn't been flushed/closed yet.)
418
421
419 ``checkambig`` argument is passed to atomictempfile (valid
422 ``checkambig`` argument is passed to atomictempfile (valid
420 only for writing), and is useful only if target file is
423 only for writing), and is useful only if target file is
421 guarded by any lock (e.g. repo.lock or repo.wlock).
424 guarded by any lock (e.g. repo.lock or repo.wlock).
422
425
423 To avoid file stat ambiguity forcibly, checkambig=True involves
426 To avoid file stat ambiguity forcibly, checkambig=True involves
424 copying ``path`` file opened in "append" mode (e.g. for
427 copying ``path`` file opened in "append" mode (e.g. for
425 truncation), if it is owned by another. Therefore, use
428 truncation), if it is owned by another. Therefore, use
426 combination of append mode and checkambig=True only in limited
429 combination of append mode and checkambig=True only in limited
427 cases (see also issue5418 and issue5584 for detail).
430 cases (see also issue5418 and issue5584 for detail).
428 """
431 """
429 if auditpath:
432 if auditpath:
430 self._auditpath(path, mode)
433 self._auditpath(path, mode)
431 f = self.join(path)
434 f = self.join(path)
432
435
433 if b"b" not in mode:
436 if b"b" not in mode:
434 mode += b"b" # for that other OS
437 mode += b"b" # for that other OS
435
438
436 nlink = -1
439 nlink = -1
437 if mode not in (b'r', b'rb'):
440 if mode not in (b'r', b'rb'):
438 dirname, basename = util.split(f)
441 dirname, basename = util.split(f)
439 # If basename is empty, then the path is malformed because it points
442 # If basename is empty, then the path is malformed because it points
440 # to a directory. Let the posixfile() call below raise IOError.
443 # to a directory. Let the posixfile() call below raise IOError.
441 if basename:
444 if basename:
442 if atomictemp:
445 if atomictemp:
443 if makeparentdirs:
446 if makeparentdirs:
444 util.makedirs(dirname, self.createmode, notindexed)
447 util.makedirs(dirname, self.createmode, notindexed)
445 return util.atomictempfile(
448 return util.atomictempfile(
446 f, mode, self.createmode, checkambig=checkambig
449 f, mode, self.createmode, checkambig=checkambig
447 )
450 )
448 try:
451 try:
449 if b'w' in mode:
452 if b'w' in mode:
450 util.unlink(f)
453 util.unlink(f)
451 nlink = 0
454 nlink = 0
452 else:
455 else:
453 # nlinks() may behave differently for files on Windows
456 # nlinks() may behave differently for files on Windows
454 # shares if the file is open.
457 # shares if the file is open.
455 with util.posixfile(f):
458 with util.posixfile(f):
456 nlink = util.nlinks(f)
459 nlink = util.nlinks(f)
457 if nlink < 1:
460 if nlink < 1:
458 nlink = 2 # force mktempcopy (issue1922)
461 nlink = 2 # force mktempcopy (issue1922)
459 except (OSError, IOError) as e:
462 except (OSError, IOError) as e:
460 if e.errno != errno.ENOENT:
463 if e.errno != errno.ENOENT:
461 raise
464 raise
462 nlink = 0
465 nlink = 0
463 if makeparentdirs:
466 if makeparentdirs:
464 util.makedirs(dirname, self.createmode, notindexed)
467 util.makedirs(dirname, self.createmode, notindexed)
465 if nlink > 0:
468 if nlink > 0:
466 if self._trustnlink is None:
469 if self._trustnlink is None:
467 self._trustnlink = nlink > 1 or util.checknlink(f)
470 self._trustnlink = nlink > 1 or util.checknlink(f)
468 if nlink > 1 or not self._trustnlink:
471 if nlink > 1 or not self._trustnlink:
469 util.rename(util.mktempcopy(f), f)
472 util.rename(util.mktempcopy(f), f)
470 fp = util.posixfile(f, mode)
473 fp = util.posixfile(f, mode)
471 if nlink == 0:
474 if nlink == 0:
472 self._fixfilemode(f)
475 self._fixfilemode(f)
473
476
474 if checkambig:
477 if checkambig:
475 if mode in (b'r', b'rb'):
478 if mode in (b'r', b'rb'):
476 raise error.Abort(
479 raise error.Abort(
477 _(
480 _(
478 b'implementation error: mode %s is not'
481 b'implementation error: mode %s is not'
479 b' valid for checkambig=True'
482 b' valid for checkambig=True'
480 )
483 )
481 % mode
484 % mode
482 )
485 )
483 fp = checkambigatclosing(fp)
486 fp = checkambigatclosing(fp)
484
487
485 if backgroundclose and isinstance(
488 if backgroundclose and isinstance(
486 threading.current_thread(),
489 threading.current_thread(),
487 threading._MainThread, # pytype: disable=module-attr
490 threading._MainThread, # pytype: disable=module-attr
488 ):
491 ):
489 if (
492 if (
490 not self._backgroundfilecloser # pytype: disable=attribute-error
493 not self._backgroundfilecloser # pytype: disable=attribute-error
491 ):
494 ):
492 raise error.Abort(
495 raise error.Abort(
493 _(
496 _(
494 b'backgroundclose can only be used when a '
497 b'backgroundclose can only be used when a '
495 b'backgroundclosing context manager is active'
498 b'backgroundclosing context manager is active'
496 )
499 )
497 )
500 )
498
501
499 fp = delayclosedfile(
502 fp = delayclosedfile(
500 fp,
503 fp,
501 self._backgroundfilecloser, # pytype: disable=attribute-error
504 self._backgroundfilecloser, # pytype: disable=attribute-error
502 )
505 )
503
506
504 return fp
507 return fp
505
508
506 def symlink(self, src, dst):
509 def symlink(self, src, dst):
507 self.audit(dst)
510 self.audit(dst)
508 linkname = self.join(dst)
511 linkname = self.join(dst)
509 util.tryunlink(linkname)
512 util.tryunlink(linkname)
510
513
511 util.makedirs(os.path.dirname(linkname), self.createmode)
514 util.makedirs(os.path.dirname(linkname), self.createmode)
512
515
513 if self._cansymlink:
516 if self._cansymlink:
514 try:
517 try:
515 os.symlink(src, linkname)
518 os.symlink(src, linkname)
516 except OSError as err:
519 except OSError as err:
517 raise OSError(
520 raise OSError(
518 err.errno,
521 err.errno,
519 _(b'could not symlink to %r: %s')
522 _(b'could not symlink to %r: %s')
520 % (src, encoding.strtolocal(err.strerror)),
523 % (src, encoding.strtolocal(err.strerror)),
521 linkname,
524 linkname,
522 )
525 )
523 else:
526 else:
524 self.write(dst, src)
527 self.write(dst, src)
525
528
526 def join(self, path, *insidef):
529 def join(self, path, *insidef):
527 if path:
530 if path:
528 return os.path.join(self.base, path, *insidef)
531 return os.path.join(self.base, path, *insidef)
529 else:
532 else:
530 return self.base
533 return self.base
531
534
532
535
533 opener = vfs
536 opener = vfs
534
537
535
538
536 class proxyvfs(abstractvfs):
539 class proxyvfs(abstractvfs):
537 def __init__(self, vfs):
540 def __init__(self, vfs):
538 self.vfs = vfs
541 self.vfs = vfs
539
542
540 def _auditpath(self, path, mode):
543 def _auditpath(self, path, mode):
541 return self.vfs._auditpath(path, mode)
544 return self.vfs._auditpath(path, mode)
542
545
543 @property
546 @property
544 def options(self):
547 def options(self):
545 return self.vfs.options
548 return self.vfs.options
546
549
547 @options.setter
550 @options.setter
548 def options(self, value):
551 def options(self, value):
549 self.vfs.options = value
552 self.vfs.options = value
550
553
551
554
552 class filtervfs(proxyvfs, abstractvfs):
555 class filtervfs(proxyvfs, abstractvfs):
553 '''Wrapper vfs for filtering filenames with a function.'''
556 '''Wrapper vfs for filtering filenames with a function.'''
554
557
555 def __init__(self, vfs, filter):
558 def __init__(self, vfs, filter):
556 proxyvfs.__init__(self, vfs)
559 proxyvfs.__init__(self, vfs)
557 self._filter = filter
560 self._filter = filter
558
561
559 def __call__(self, path, *args, **kwargs):
562 def __call__(self, path, *args, **kwargs):
560 return self.vfs(self._filter(path), *args, **kwargs)
563 return self.vfs(self._filter(path), *args, **kwargs)
561
564
562 def join(self, path, *insidef):
565 def join(self, path, *insidef):
563 if path:
566 if path:
564 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
567 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
565 else:
568 else:
566 return self.vfs.join(path)
569 return self.vfs.join(path)
567
570
568
571
569 filteropener = filtervfs
572 filteropener = filtervfs
570
573
571
574
572 class readonlyvfs(proxyvfs):
575 class readonlyvfs(proxyvfs):
573 '''Wrapper vfs preventing any writing.'''
576 '''Wrapper vfs preventing any writing.'''
574
577
575 def __init__(self, vfs):
578 def __init__(self, vfs):
576 proxyvfs.__init__(self, vfs)
579 proxyvfs.__init__(self, vfs)
577
580
578 def __call__(self, path, mode=b'r', *args, **kw):
581 def __call__(self, path, mode=b'r', *args, **kw):
579 if mode not in (b'r', b'rb'):
582 if mode not in (b'r', b'rb'):
580 raise error.Abort(_(b'this vfs is read only'))
583 raise error.Abort(_(b'this vfs is read only'))
581 return self.vfs(path, mode, *args, **kw)
584 return self.vfs(path, mode, *args, **kw)
582
585
583 def join(self, path, *insidef):
586 def join(self, path, *insidef):
584 return self.vfs.join(path, *insidef)
587 return self.vfs.join(path, *insidef)
585
588
586
589
587 class closewrapbase(object):
590 class closewrapbase(object):
588 """Base class of wrapper, which hooks closing
591 """Base class of wrapper, which hooks closing
589
592
590 Do not instantiate outside of the vfs layer.
593 Do not instantiate outside of the vfs layer.
591 """
594 """
592
595
593 def __init__(self, fh):
596 def __init__(self, fh):
594 object.__setattr__(self, '_origfh', fh)
597 object.__setattr__(self, '_origfh', fh)
595
598
596 def __getattr__(self, attr):
599 def __getattr__(self, attr):
597 return getattr(self._origfh, attr)
600 return getattr(self._origfh, attr)
598
601
599 def __setattr__(self, attr, value):
602 def __setattr__(self, attr, value):
600 return setattr(self._origfh, attr, value)
603 return setattr(self._origfh, attr, value)
601
604
602 def __delattr__(self, attr):
605 def __delattr__(self, attr):
603 return delattr(self._origfh, attr)
606 return delattr(self._origfh, attr)
604
607
605 def __enter__(self):
608 def __enter__(self):
606 self._origfh.__enter__()
609 self._origfh.__enter__()
607 return self
610 return self
608
611
609 def __exit__(self, exc_type, exc_value, exc_tb):
612 def __exit__(self, exc_type, exc_value, exc_tb):
610 raise NotImplementedError('attempted instantiating ' + str(type(self)))
613 raise NotImplementedError('attempted instantiating ' + str(type(self)))
611
614
612 def close(self):
615 def close(self):
613 raise NotImplementedError('attempted instantiating ' + str(type(self)))
616 raise NotImplementedError('attempted instantiating ' + str(type(self)))
614
617
615
618
616 class delayclosedfile(closewrapbase):
619 class delayclosedfile(closewrapbase):
617 """Proxy for a file object whose close is delayed.
620 """Proxy for a file object whose close is delayed.
618
621
619 Do not instantiate outside of the vfs layer.
622 Do not instantiate outside of the vfs layer.
620 """
623 """
621
624
622 def __init__(self, fh, closer):
625 def __init__(self, fh, closer):
623 super(delayclosedfile, self).__init__(fh)
626 super(delayclosedfile, self).__init__(fh)
624 object.__setattr__(self, '_closer', closer)
627 object.__setattr__(self, '_closer', closer)
625
628
626 def __exit__(self, exc_type, exc_value, exc_tb):
629 def __exit__(self, exc_type, exc_value, exc_tb):
627 self._closer.close(self._origfh)
630 self._closer.close(self._origfh)
628
631
629 def close(self):
632 def close(self):
630 self._closer.close(self._origfh)
633 self._closer.close(self._origfh)
631
634
632
635
633 class backgroundfilecloser(object):
636 class backgroundfilecloser(object):
634 """Coordinates background closing of file handles on multiple threads."""
637 """Coordinates background closing of file handles on multiple threads."""
635
638
636 def __init__(self, ui, expectedcount=-1):
639 def __init__(self, ui, expectedcount=-1):
637 self._running = False
640 self._running = False
638 self._entered = False
641 self._entered = False
639 self._threads = []
642 self._threads = []
640 self._threadexception = None
643 self._threadexception = None
641
644
642 # Only Windows/NTFS has slow file closing. So only enable by default
645 # Only Windows/NTFS has slow file closing. So only enable by default
643 # on that platform. But allow to be enabled elsewhere for testing.
646 # on that platform. But allow to be enabled elsewhere for testing.
644 defaultenabled = pycompat.iswindows
647 defaultenabled = pycompat.iswindows
645 enabled = ui.configbool(b'worker', b'backgroundclose', defaultenabled)
648 enabled = ui.configbool(b'worker', b'backgroundclose', defaultenabled)
646
649
647 if not enabled:
650 if not enabled:
648 return
651 return
649
652
650 # There is overhead to starting and stopping the background threads.
653 # There is overhead to starting and stopping the background threads.
651 # Don't do background processing unless the file count is large enough
654 # Don't do background processing unless the file count is large enough
652 # to justify it.
655 # to justify it.
653 minfilecount = ui.configint(b'worker', b'backgroundcloseminfilecount')
656 minfilecount = ui.configint(b'worker', b'backgroundcloseminfilecount')
654 # FUTURE dynamically start background threads after minfilecount closes.
657 # FUTURE dynamically start background threads after minfilecount closes.
655 # (We don't currently have any callers that don't know their file count)
658 # (We don't currently have any callers that don't know their file count)
656 if expectedcount > 0 and expectedcount < minfilecount:
659 if expectedcount > 0 and expectedcount < minfilecount:
657 return
660 return
658
661
659 maxqueue = ui.configint(b'worker', b'backgroundclosemaxqueue')
662 maxqueue = ui.configint(b'worker', b'backgroundclosemaxqueue')
660 threadcount = ui.configint(b'worker', b'backgroundclosethreadcount')
663 threadcount = ui.configint(b'worker', b'backgroundclosethreadcount')
661
664
662 ui.debug(
665 ui.debug(
663 b'starting %d threads for background file closing\n' % threadcount
666 b'starting %d threads for background file closing\n' % threadcount
664 )
667 )
665
668
666 self._queue = pycompat.queue.Queue(maxsize=maxqueue)
669 self._queue = pycompat.queue.Queue(maxsize=maxqueue)
667 self._running = True
670 self._running = True
668
671
669 for i in range(threadcount):
672 for i in range(threadcount):
670 t = threading.Thread(target=self._worker, name='backgroundcloser')
673 t = threading.Thread(target=self._worker, name='backgroundcloser')
671 self._threads.append(t)
674 self._threads.append(t)
672 t.start()
675 t.start()
673
676
674 def __enter__(self):
677 def __enter__(self):
675 self._entered = True
678 self._entered = True
676 return self
679 return self
677
680
678 def __exit__(self, exc_type, exc_value, exc_tb):
681 def __exit__(self, exc_type, exc_value, exc_tb):
679 self._running = False
682 self._running = False
680
683
681 # Wait for threads to finish closing so open files don't linger for
684 # Wait for threads to finish closing so open files don't linger for
682 # longer than lifetime of context manager.
685 # longer than lifetime of context manager.
683 for t in self._threads:
686 for t in self._threads:
684 t.join()
687 t.join()
685
688
686 def _worker(self):
689 def _worker(self):
687 """Main routine for worker thread."""
690 """Main routine for worker thread."""
688 while True:
691 while True:
689 try:
692 try:
690 fh = self._queue.get(block=True, timeout=0.100)
693 fh = self._queue.get(block=True, timeout=0.100)
691 # Need to catch or the thread will terminate and
694 # Need to catch or the thread will terminate and
692 # we could orphan file descriptors.
695 # we could orphan file descriptors.
693 try:
696 try:
694 fh.close()
697 fh.close()
695 except Exception as e:
698 except Exception as e:
696 # Stash so can re-raise from main thread later.
699 # Stash so can re-raise from main thread later.
697 self._threadexception = e
700 self._threadexception = e
698 except pycompat.queue.Empty:
701 except pycompat.queue.Empty:
699 if not self._running:
702 if not self._running:
700 break
703 break
701
704
702 def close(self, fh):
705 def close(self, fh):
703 """Schedule a file for closing."""
706 """Schedule a file for closing."""
704 if not self._entered:
707 if not self._entered:
705 raise error.Abort(
708 raise error.Abort(
706 _(b'can only call close() when context manager active')
709 _(b'can only call close() when context manager active')
707 )
710 )
708
711
709 # If a background thread encountered an exception, raise now so we fail
712 # If a background thread encountered an exception, raise now so we fail
710 # fast. Otherwise we may potentially go on for minutes until the error
713 # fast. Otherwise we may potentially go on for minutes until the error
711 # is acted on.
714 # is acted on.
712 if self._threadexception:
715 if self._threadexception:
713 e = self._threadexception
716 e = self._threadexception
714 self._threadexception = None
717 self._threadexception = None
715 raise e
718 raise e
716
719
717 # If we're not actively running, close synchronously.
720 # If we're not actively running, close synchronously.
718 if not self._running:
721 if not self._running:
719 fh.close()
722 fh.close()
720 return
723 return
721
724
722 self._queue.put(fh, block=True, timeout=None)
725 self._queue.put(fh, block=True, timeout=None)
723
726
724
727
725 class checkambigatclosing(closewrapbase):
728 class checkambigatclosing(closewrapbase):
726 """Proxy for a file object, to avoid ambiguity of file stat
729 """Proxy for a file object, to avoid ambiguity of file stat
727
730
728 See also util.filestat for detail about "ambiguity of file stat".
731 See also util.filestat for detail about "ambiguity of file stat".
729
732
730 This proxy is useful only if the target file is guarded by any
733 This proxy is useful only if the target file is guarded by any
731 lock (e.g. repo.lock or repo.wlock)
734 lock (e.g. repo.lock or repo.wlock)
732
735
733 Do not instantiate outside of the vfs layer.
736 Do not instantiate outside of the vfs layer.
734 """
737 """
735
738
736 def __init__(self, fh):
739 def __init__(self, fh):
737 super(checkambigatclosing, self).__init__(fh)
740 super(checkambigatclosing, self).__init__(fh)
738 object.__setattr__(self, '_oldstat', util.filestat.frompath(fh.name))
741 object.__setattr__(self, '_oldstat', util.filestat.frompath(fh.name))
739
742
740 def _checkambig(self):
743 def _checkambig(self):
741 oldstat = self._oldstat
744 oldstat = self._oldstat
742 if oldstat.stat:
745 if oldstat.stat:
743 _avoidambig(self._origfh.name, oldstat)
746 _avoidambig(self._origfh.name, oldstat)
744
747
745 def __exit__(self, exc_type, exc_value, exc_tb):
748 def __exit__(self, exc_type, exc_value, exc_tb):
746 self._origfh.__exit__(exc_type, exc_value, exc_tb)
749 self._origfh.__exit__(exc_type, exc_value, exc_tb)
747 self._checkambig()
750 self._checkambig()
748
751
749 def close(self):
752 def close(self):
750 self._origfh.close()
753 self._origfh.close()
751 self._checkambig()
754 self._checkambig()
General Comments 0
You need to be logged in to leave comments. Login now