##// END OF EJS Templates
vfs: fix proxyvfs inheritance...
Boris Feld -
r41125:6498f0e0 default
parent child Browse files
Show More
@@ -1,629 +1,629 b''
1 1 # store.py - repository store handling for Mercurial
2 2 #
3 3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from . import (
17 17 error,
18 18 node,
19 19 policy,
20 20 pycompat,
21 21 util,
22 22 vfs as vfsmod,
23 23 )
24 24
25 25 parsers = policy.importmod(r'parsers')
26 26
27 27 def _matchtrackedpath(path, matcher):
28 28 """parses a fncache entry and returns whether the entry is tracking a path
29 29 matched by matcher or not.
30 30
31 31 If matcher is None, returns True"""
32 32
33 33 if matcher is None:
34 34 return True
35 35 path = decodedir(path)
36 36 if path.startswith('data/'):
37 37 return matcher(path[len('data/'):-len('.i')])
38 38 elif path.startswith('meta/'):
39 39 return matcher.visitdir(path[len('meta/'):-len('/00manifest.i')] or '.')
40 40
41 41 raise error.ProgrammingError("cannot decode path %s" % path)
42 42
43 43 # This avoids a collision between a file named foo and a dir named
44 44 # foo.i or foo.d
45 45 def _encodedir(path):
46 46 '''
47 47 >>> _encodedir(b'data/foo.i')
48 48 'data/foo.i'
49 49 >>> _encodedir(b'data/foo.i/bla.i')
50 50 'data/foo.i.hg/bla.i'
51 51 >>> _encodedir(b'data/foo.i.hg/bla.i')
52 52 'data/foo.i.hg.hg/bla.i'
53 53 >>> _encodedir(b'data/foo.i\\ndata/foo.i/bla.i\\ndata/foo.i.hg/bla.i\\n')
54 54 'data/foo.i\\ndata/foo.i.hg/bla.i\\ndata/foo.i.hg.hg/bla.i\\n'
55 55 '''
56 56 return (path
57 57 .replace(".hg/", ".hg.hg/")
58 58 .replace(".i/", ".i.hg/")
59 59 .replace(".d/", ".d.hg/"))
60 60
61 61 encodedir = getattr(parsers, 'encodedir', _encodedir)
62 62
63 63 def decodedir(path):
64 64 '''
65 65 >>> decodedir(b'data/foo.i')
66 66 'data/foo.i'
67 67 >>> decodedir(b'data/foo.i.hg/bla.i')
68 68 'data/foo.i/bla.i'
69 69 >>> decodedir(b'data/foo.i.hg.hg/bla.i')
70 70 'data/foo.i.hg/bla.i'
71 71 '''
72 72 if ".hg/" not in path:
73 73 return path
74 74 return (path
75 75 .replace(".d.hg/", ".d/")
76 76 .replace(".i.hg/", ".i/")
77 77 .replace(".hg.hg/", ".hg/"))
78 78
79 79 def _reserved():
80 80 ''' characters that are problematic for filesystems
81 81
82 82 * ascii escapes (0..31)
83 83 * ascii hi (126..255)
84 84 * windows specials
85 85
86 86 these characters will be escaped by encodefunctions
87 87 '''
88 88 winreserved = [ord(x) for x in u'\\:*?"<>|']
89 89 for x in range(32):
90 90 yield x
91 91 for x in range(126, 256):
92 92 yield x
93 93 for x in winreserved:
94 94 yield x
95 95
96 96 def _buildencodefun():
97 97 '''
98 98 >>> enc, dec = _buildencodefun()
99 99
100 100 >>> enc(b'nothing/special.txt')
101 101 'nothing/special.txt'
102 102 >>> dec(b'nothing/special.txt')
103 103 'nothing/special.txt'
104 104
105 105 >>> enc(b'HELLO')
106 106 '_h_e_l_l_o'
107 107 >>> dec(b'_h_e_l_l_o')
108 108 'HELLO'
109 109
110 110 >>> enc(b'hello:world?')
111 111 'hello~3aworld~3f'
112 112 >>> dec(b'hello~3aworld~3f')
113 113 'hello:world?'
114 114
115 115 >>> enc(b'the\\x07quick\\xADshot')
116 116 'the~07quick~adshot'
117 117 >>> dec(b'the~07quick~adshot')
118 118 'the\\x07quick\\xadshot'
119 119 '''
120 120 e = '_'
121 121 xchr = pycompat.bytechr
122 122 asciistr = list(map(xchr, range(127)))
123 123 capitals = list(range(ord("A"), ord("Z") + 1))
124 124
125 125 cmap = dict((x, x) for x in asciistr)
126 126 for x in _reserved():
127 127 cmap[xchr(x)] = "~%02x" % x
128 128 for x in capitals + [ord(e)]:
129 129 cmap[xchr(x)] = e + xchr(x).lower()
130 130
131 131 dmap = {}
132 132 for k, v in cmap.iteritems():
133 133 dmap[v] = k
134 134 def decode(s):
135 135 i = 0
136 136 while i < len(s):
137 137 for l in pycompat.xrange(1, 4):
138 138 try:
139 139 yield dmap[s[i:i + l]]
140 140 i += l
141 141 break
142 142 except KeyError:
143 143 pass
144 144 else:
145 145 raise KeyError
146 146 return (lambda s: ''.join([cmap[s[c:c + 1]]
147 147 for c in pycompat.xrange(len(s))]),
148 148 lambda s: ''.join(list(decode(s))))
149 149
150 150 _encodefname, _decodefname = _buildencodefun()
151 151
152 152 def encodefilename(s):
153 153 '''
154 154 >>> encodefilename(b'foo.i/bar.d/bla.hg/hi:world?/HELLO')
155 155 'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o'
156 156 '''
157 157 return _encodefname(encodedir(s))
158 158
159 159 def decodefilename(s):
160 160 '''
161 161 >>> decodefilename(b'foo.i.hg/bar.d.hg/bla.hg.hg/hi~3aworld~3f/_h_e_l_l_o')
162 162 'foo.i/bar.d/bla.hg/hi:world?/HELLO'
163 163 '''
164 164 return decodedir(_decodefname(s))
165 165
166 166 def _buildlowerencodefun():
167 167 '''
168 168 >>> f = _buildlowerencodefun()
169 169 >>> f(b'nothing/special.txt')
170 170 'nothing/special.txt'
171 171 >>> f(b'HELLO')
172 172 'hello'
173 173 >>> f(b'hello:world?')
174 174 'hello~3aworld~3f'
175 175 >>> f(b'the\\x07quick\\xADshot')
176 176 'the~07quick~adshot'
177 177 '''
178 178 xchr = pycompat.bytechr
179 179 cmap = dict([(xchr(x), xchr(x)) for x in pycompat.xrange(127)])
180 180 for x in _reserved():
181 181 cmap[xchr(x)] = "~%02x" % x
182 182 for x in range(ord("A"), ord("Z") + 1):
183 183 cmap[xchr(x)] = xchr(x).lower()
184 184 def lowerencode(s):
185 185 return "".join([cmap[c] for c in pycompat.iterbytestr(s)])
186 186 return lowerencode
187 187
188 188 lowerencode = getattr(parsers, 'lowerencode', None) or _buildlowerencodefun()
189 189
190 190 # Windows reserved names: con, prn, aux, nul, com1..com9, lpt1..lpt9
191 191 _winres3 = ('aux', 'con', 'prn', 'nul') # length 3
192 192 _winres4 = ('com', 'lpt') # length 4 (with trailing 1..9)
193 193 def _auxencode(path, dotencode):
194 194 '''
195 195 Encodes filenames containing names reserved by Windows or which end in
196 196 period or space. Does not touch other single reserved characters c.
197 197 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
198 198 Additionally encodes space or period at the beginning, if dotencode is
199 199 True. Parameter path is assumed to be all lowercase.
200 200 A segment only needs encoding if a reserved name appears as a
201 201 basename (e.g. "aux", "aux.foo"). A directory or file named "foo.aux"
202 202 doesn't need encoding.
203 203
204 204 >>> s = b'.foo/aux.txt/txt.aux/con/prn/nul/foo.'
205 205 >>> _auxencode(s.split(b'/'), True)
206 206 ['~2efoo', 'au~78.txt', 'txt.aux', 'co~6e', 'pr~6e', 'nu~6c', 'foo~2e']
207 207 >>> s = b'.com1com2/lpt9.lpt4.lpt1/conprn/com0/lpt0/foo.'
208 208 >>> _auxencode(s.split(b'/'), False)
209 209 ['.com1com2', 'lp~749.lpt4.lpt1', 'conprn', 'com0', 'lpt0', 'foo~2e']
210 210 >>> _auxencode([b'foo. '], True)
211 211 ['foo.~20']
212 212 >>> _auxencode([b' .foo'], True)
213 213 ['~20.foo']
214 214 '''
215 215 for i, n in enumerate(path):
216 216 if not n:
217 217 continue
218 218 if dotencode and n[0] in '. ':
219 219 n = "~%02x" % ord(n[0:1]) + n[1:]
220 220 path[i] = n
221 221 else:
222 222 l = n.find('.')
223 223 if l == -1:
224 224 l = len(n)
225 225 if ((l == 3 and n[:3] in _winres3) or
226 226 (l == 4 and n[3:4] <= '9' and n[3:4] >= '1'
227 227 and n[:3] in _winres4)):
228 228 # encode third letter ('aux' -> 'au~78')
229 229 ec = "~%02x" % ord(n[2:3])
230 230 n = n[0:2] + ec + n[3:]
231 231 path[i] = n
232 232 if n[-1] in '. ':
233 233 # encode last period or space ('foo...' -> 'foo..~2e')
234 234 path[i] = n[:-1] + "~%02x" % ord(n[-1:])
235 235 return path
236 236
237 237 _maxstorepathlen = 120
238 238 _dirprefixlen = 8
239 239 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
240 240
241 241 def _hashencode(path, dotencode):
242 242 digest = node.hex(hashlib.sha1(path).digest())
243 243 le = lowerencode(path[5:]).split('/') # skips prefix 'data/' or 'meta/'
244 244 parts = _auxencode(le, dotencode)
245 245 basename = parts[-1]
246 246 _root, ext = os.path.splitext(basename)
247 247 sdirs = []
248 248 sdirslen = 0
249 249 for p in parts[:-1]:
250 250 d = p[:_dirprefixlen]
251 251 if d[-1] in '. ':
252 252 # Windows can't access dirs ending in period or space
253 253 d = d[:-1] + '_'
254 254 if sdirslen == 0:
255 255 t = len(d)
256 256 else:
257 257 t = sdirslen + 1 + len(d)
258 258 if t > _maxshortdirslen:
259 259 break
260 260 sdirs.append(d)
261 261 sdirslen = t
262 262 dirs = '/'.join(sdirs)
263 263 if len(dirs) > 0:
264 264 dirs += '/'
265 265 res = 'dh/' + dirs + digest + ext
266 266 spaceleft = _maxstorepathlen - len(res)
267 267 if spaceleft > 0:
268 268 filler = basename[:spaceleft]
269 269 res = 'dh/' + dirs + filler + digest + ext
270 270 return res
271 271
272 272 def _hybridencode(path, dotencode):
273 273 '''encodes path with a length limit
274 274
275 275 Encodes all paths that begin with 'data/', according to the following.
276 276
277 277 Default encoding (reversible):
278 278
279 279 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
280 280 characters are encoded as '~xx', where xx is the two digit hex code
281 281 of the character (see encodefilename).
282 282 Relevant path components consisting of Windows reserved filenames are
283 283 masked by encoding the third character ('aux' -> 'au~78', see _auxencode).
284 284
285 285 Hashed encoding (not reversible):
286 286
287 287 If the default-encoded path is longer than _maxstorepathlen, a
288 288 non-reversible hybrid hashing of the path is done instead.
289 289 This encoding uses up to _dirprefixlen characters of all directory
290 290 levels of the lowerencoded path, but not more levels than can fit into
291 291 _maxshortdirslen.
292 292 Then follows the filler followed by the sha digest of the full path.
293 293 The filler is the beginning of the basename of the lowerencoded path
294 294 (the basename is everything after the last path separator). The filler
295 295 is as long as possible, filling in characters from the basename until
296 296 the encoded path has _maxstorepathlen characters (or all chars of the
297 297 basename have been taken).
298 298 The extension (e.g. '.i' or '.d') is preserved.
299 299
300 300 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
301 301 encoding was used.
302 302 '''
303 303 path = encodedir(path)
304 304 ef = _encodefname(path).split('/')
305 305 res = '/'.join(_auxencode(ef, dotencode))
306 306 if len(res) > _maxstorepathlen:
307 307 res = _hashencode(path, dotencode)
308 308 return res
309 309
310 310 def _pathencode(path):
311 311 de = encodedir(path)
312 312 if len(path) > _maxstorepathlen:
313 313 return _hashencode(de, True)
314 314 ef = _encodefname(de).split('/')
315 315 res = '/'.join(_auxencode(ef, True))
316 316 if len(res) > _maxstorepathlen:
317 317 return _hashencode(de, True)
318 318 return res
319 319
320 320 _pathencode = getattr(parsers, 'pathencode', _pathencode)
321 321
322 322 def _plainhybridencode(f):
323 323 return _hybridencode(f, False)
324 324
325 325 def _calcmode(vfs):
326 326 try:
327 327 # files in .hg/ will be created using this mode
328 328 mode = vfs.stat().st_mode
329 329 # avoid some useless chmods
330 330 if (0o777 & ~util.umask) == (0o777 & mode):
331 331 mode = None
332 332 except OSError:
333 333 mode = None
334 334 return mode
335 335
336 336 _data = ('narrowspec data meta 00manifest.d 00manifest.i'
337 337 ' 00changelog.d 00changelog.i phaseroots obsstore')
338 338
339 339 def isrevlog(f, kind, st):
340 340 return kind == stat.S_IFREG and f[-2:] in ('.i', '.d')
341 341
342 342 class basicstore(object):
343 343 '''base class for local repository stores'''
344 344 def __init__(self, path, vfstype):
345 345 vfs = vfstype(path)
346 346 self.path = vfs.base
347 347 self.createmode = _calcmode(vfs)
348 348 vfs.createmode = self.createmode
349 349 self.rawvfs = vfs
350 350 self.vfs = vfsmod.filtervfs(vfs, encodedir)
351 351 self.opener = self.vfs
352 352
353 353 def join(self, f):
354 354 return self.path + '/' + encodedir(f)
355 355
356 356 def _walk(self, relpath, recurse, filefilter=isrevlog):
357 357 '''yields (unencoded, encoded, size)'''
358 358 path = self.path
359 359 if relpath:
360 360 path += '/' + relpath
361 361 striplen = len(self.path) + 1
362 362 l = []
363 363 if self.rawvfs.isdir(path):
364 364 visit = [path]
365 365 readdir = self.rawvfs.readdir
366 366 while visit:
367 367 p = visit.pop()
368 368 for f, kind, st in readdir(p, stat=True):
369 369 fp = p + '/' + f
370 370 if filefilter(f, kind, st):
371 371 n = util.pconvert(fp[striplen:])
372 372 l.append((decodedir(n), n, st.st_size))
373 373 elif kind == stat.S_IFDIR and recurse:
374 374 visit.append(fp)
375 375 l.sort()
376 376 return l
377 377
378 378 def datafiles(self, matcher=None):
379 379 return self._walk('data', True) + self._walk('meta', True)
380 380
381 381 def topfiles(self):
382 382 # yield manifest before changelog
383 383 return reversed(self._walk('', False))
384 384
385 385 def walk(self, matcher=None):
386 386 '''yields (unencoded, encoded, size)
387 387
388 388 if a matcher is passed, storage files of only those tracked paths
389 389 are passed with matches the matcher
390 390 '''
391 391 # yield data files first
392 392 for x in self.datafiles(matcher):
393 393 yield x
394 394 for x in self.topfiles():
395 395 yield x
396 396
397 397 def copylist(self):
398 398 return ['requires'] + _data.split()
399 399
400 400 def write(self, tr):
401 401 pass
402 402
403 403 def invalidatecaches(self):
404 404 pass
405 405
406 406 def markremoved(self, fn):
407 407 pass
408 408
409 409 def __contains__(self, path):
410 410 '''Checks if the store contains path'''
411 411 path = "/".join(("data", path))
412 412 # file?
413 413 if self.vfs.exists(path + ".i"):
414 414 return True
415 415 # dir?
416 416 if not path.endswith("/"):
417 417 path = path + "/"
418 418 return self.vfs.exists(path)
419 419
420 420 class encodedstore(basicstore):
421 421 def __init__(self, path, vfstype):
422 422 vfs = vfstype(path + '/store')
423 423 self.path = vfs.base
424 424 self.createmode = _calcmode(vfs)
425 425 vfs.createmode = self.createmode
426 426 self.rawvfs = vfs
427 427 self.vfs = vfsmod.filtervfs(vfs, encodefilename)
428 428 self.opener = self.vfs
429 429
430 430 def datafiles(self, matcher=None):
431 431 for a, b, size in super(encodedstore, self).datafiles():
432 432 try:
433 433 a = decodefilename(a)
434 434 except KeyError:
435 435 a = None
436 436 if a is not None and not _matchtrackedpath(a, matcher):
437 437 continue
438 438 yield a, b, size
439 439
440 440 def join(self, f):
441 441 return self.path + '/' + encodefilename(f)
442 442
443 443 def copylist(self):
444 444 return (['requires', '00changelog.i'] +
445 445 ['store/' + f for f in _data.split()])
446 446
447 447 class fncache(object):
448 448 # the filename used to be partially encoded
449 449 # hence the encodedir/decodedir dance
450 450 def __init__(self, vfs):
451 451 self.vfs = vfs
452 452 self.entries = None
453 453 self._dirty = False
454 454 # set of new additions to fncache
455 455 self.addls = set()
456 456
457 457 def _load(self):
458 458 '''fill the entries from the fncache file'''
459 459 self._dirty = False
460 460 try:
461 461 fp = self.vfs('fncache', mode='rb')
462 462 except IOError:
463 463 # skip nonexistent file
464 464 self.entries = set()
465 465 return
466 466 self.entries = set(decodedir(fp.read()).splitlines())
467 467 if '' in self.entries:
468 468 fp.seek(0)
469 469 for n, line in enumerate(util.iterfile(fp)):
470 470 if not line.rstrip('\n'):
471 471 t = _('invalid entry in fncache, line %d') % (n + 1)
472 472 raise error.Abort(t)
473 473 fp.close()
474 474
475 475 def write(self, tr):
476 476 if self._dirty:
477 477 assert self.entries is not None
478 478 self.entries = self.entries | self.addls
479 479 self.addls = set()
480 480 tr.addbackup('fncache')
481 481 fp = self.vfs('fncache', mode='wb', atomictemp=True)
482 482 if self.entries:
483 483 fp.write(encodedir('\n'.join(self.entries) + '\n'))
484 484 fp.close()
485 485 self._dirty = False
486 486 if self.addls:
487 487 # if we have just new entries, let's append them to the fncache
488 488 tr.addbackup('fncache')
489 489 fp = self.vfs('fncache', mode='ab', atomictemp=True)
490 490 if self.addls:
491 491 fp.write(encodedir('\n'.join(self.addls) + '\n'))
492 492 fp.close()
493 493 self.entries = None
494 494 self.addls = set()
495 495
496 496 def add(self, fn):
497 497 if self.entries is None:
498 498 self._load()
499 499 if fn not in self.entries:
500 500 self.addls.add(fn)
501 501
502 502 def remove(self, fn):
503 503 if self.entries is None:
504 504 self._load()
505 505 if fn in self.addls:
506 506 self.addls.remove(fn)
507 507 return
508 508 try:
509 509 self.entries.remove(fn)
510 510 self._dirty = True
511 511 except KeyError:
512 512 pass
513 513
514 514 def __contains__(self, fn):
515 515 if fn in self.addls:
516 516 return True
517 517 if self.entries is None:
518 518 self._load()
519 519 return fn in self.entries
520 520
521 521 def __iter__(self):
522 522 if self.entries is None:
523 523 self._load()
524 524 return iter(self.entries | self.addls)
525 525
526 class _fncachevfs(vfsmod.abstractvfs, vfsmod.proxyvfs):
526 class _fncachevfs(vfsmod.proxyvfs):
527 527 def __init__(self, vfs, fnc, encode):
528 528 vfsmod.proxyvfs.__init__(self, vfs)
529 529 self.fncache = fnc
530 530 self.encode = encode
531 531
532 532 def __call__(self, path, mode='r', *args, **kw):
533 533 encoded = self.encode(path)
534 534 if mode not in ('r', 'rb') and (path.startswith('data/') or
535 535 path.startswith('meta/')):
536 536 # do not trigger a fncache load when adding a file that already is
537 537 # known to exist.
538 538 notload = self.fncache.entries is None and self.vfs.exists(encoded)
539 539 if notload and 'a' in mode and not self.vfs.stat(encoded).st_size:
540 540 # when appending to an existing file, if the file has size zero,
541 541 # it should be considered as missing. Such zero-size files are
542 542 # the result of truncation when a transaction is aborted.
543 543 notload = False
544 544 if not notload:
545 545 self.fncache.add(path)
546 546 return self.vfs(encoded, mode, *args, **kw)
547 547
548 548 def join(self, path):
549 549 if path:
550 550 return self.vfs.join(self.encode(path))
551 551 else:
552 552 return self.vfs.join(path)
553 553
554 554 class fncachestore(basicstore):
555 555 def __init__(self, path, vfstype, dotencode):
556 556 if dotencode:
557 557 encode = _pathencode
558 558 else:
559 559 encode = _plainhybridencode
560 560 self.encode = encode
561 561 vfs = vfstype(path + '/store')
562 562 self.path = vfs.base
563 563 self.pathsep = self.path + '/'
564 564 self.createmode = _calcmode(vfs)
565 565 vfs.createmode = self.createmode
566 566 self.rawvfs = vfs
567 567 fnc = fncache(vfs)
568 568 self.fncache = fnc
569 569 self.vfs = _fncachevfs(vfs, fnc, encode)
570 570 self.opener = self.vfs
571 571
572 572 def join(self, f):
573 573 return self.pathsep + self.encode(f)
574 574
575 575 def getsize(self, path):
576 576 return self.rawvfs.stat(path).st_size
577 577
578 578 def datafiles(self, matcher=None):
579 579 for f in sorted(self.fncache):
580 580 if not _matchtrackedpath(f, matcher):
581 581 continue
582 582 ef = self.encode(f)
583 583 try:
584 584 yield f, ef, self.getsize(ef)
585 585 except OSError as err:
586 586 if err.errno != errno.ENOENT:
587 587 raise
588 588
589 589 def copylist(self):
590 590 d = ('narrowspec data meta dh fncache phaseroots obsstore'
591 591 ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i')
592 592 return (['requires', '00changelog.i'] +
593 593 ['store/' + f for f in d.split()])
594 594
595 595 def write(self, tr):
596 596 self.fncache.write(tr)
597 597
598 598 def invalidatecaches(self):
599 599 self.fncache.entries = None
600 600 self.fncache.addls = set()
601 601
602 602 def markremoved(self, fn):
603 603 self.fncache.remove(fn)
604 604
605 605 def _exists(self, f):
606 606 ef = self.encode(f)
607 607 try:
608 608 self.getsize(ef)
609 609 return True
610 610 except OSError as err:
611 611 if err.errno != errno.ENOENT:
612 612 raise
613 613 # nonexistent entry
614 614 return False
615 615
616 616 def __contains__(self, path):
617 617 '''Checks if the store contains path'''
618 618 path = "/".join(("data", path))
619 619 # check for files (exact match)
620 620 e = path + '.i'
621 621 if e in self.fncache and self._exists(e):
622 622 return True
623 623 # now check for directories (prefix match)
624 624 if not path.endswith('/'):
625 625 path += '/'
626 626 for e in self.fncache:
627 627 if e.startswith(path) and self._exists(e):
628 628 return True
629 629 return False
@@ -1,671 +1,671 b''
1 1 # vfs.py - Mercurial 'vfs' classes
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from __future__ import absolute_import
8 8
9 9 import contextlib
10 10 import errno
11 11 import os
12 12 import shutil
13 13 import stat
14 14 import threading
15 15
16 16 from .i18n import _
17 17 from . import (
18 18 encoding,
19 19 error,
20 20 pathutil,
21 21 pycompat,
22 22 util,
23 23 )
24 24
25 25 def _avoidambig(path, oldstat):
26 26 """Avoid file stat ambiguity forcibly
27 27
28 28 This function causes copying ``path`` file, if it is owned by
29 29 another (see issue5418 and issue5584 for detail).
30 30 """
31 31 def checkandavoid():
32 32 newstat = util.filestat.frompath(path)
33 33 # return whether file stat ambiguity is (already) avoided
34 34 return (not newstat.isambig(oldstat) or
35 35 newstat.avoidambig(path, oldstat))
36 36 if not checkandavoid():
37 37 # simply copy to change owner of path to get privilege to
38 38 # advance mtime (see issue5418)
39 39 util.rename(util.mktempcopy(path), path)
40 40 checkandavoid()
41 41
42 42 class abstractvfs(object):
43 43 """Abstract base class; cannot be instantiated"""
44 44
45 45 def __init__(self, *args, **kwargs):
46 46 '''Prevent instantiation; don't call this from subclasses.'''
47 47 raise NotImplementedError('attempted instantiating ' + str(type(self)))
48 48
49 49 def _auditpath(self, path, mode):
50 50 pass
51 51
52 52 def tryread(self, path):
53 53 '''gracefully return an empty string for missing files'''
54 54 try:
55 55 return self.read(path)
56 56 except IOError as inst:
57 57 if inst.errno != errno.ENOENT:
58 58 raise
59 59 return ""
60 60
61 61 def tryreadlines(self, path, mode='rb'):
62 62 '''gracefully return an empty array for missing files'''
63 63 try:
64 64 return self.readlines(path, mode=mode)
65 65 except IOError as inst:
66 66 if inst.errno != errno.ENOENT:
67 67 raise
68 68 return []
69 69
70 70 @util.propertycache
71 71 def open(self):
72 72 '''Open ``path`` file, which is relative to vfs root.
73 73
74 74 Newly created directories are marked as "not to be indexed by
75 75 the content indexing service", if ``notindexed`` is specified
76 76 for "write" mode access.
77 77 '''
78 78 return self.__call__
79 79
80 80 def read(self, path):
81 81 with self(path, 'rb') as fp:
82 82 return fp.read()
83 83
84 84 def readlines(self, path, mode='rb'):
85 85 with self(path, mode=mode) as fp:
86 86 return fp.readlines()
87 87
88 88 def write(self, path, data, backgroundclose=False, **kwargs):
89 89 with self(path, 'wb', backgroundclose=backgroundclose, **kwargs) as fp:
90 90 return fp.write(data)
91 91
92 92 def writelines(self, path, data, mode='wb', notindexed=False):
93 93 with self(path, mode=mode, notindexed=notindexed) as fp:
94 94 return fp.writelines(data)
95 95
96 96 def append(self, path, data):
97 97 with self(path, 'ab') as fp:
98 98 return fp.write(data)
99 99
100 100 def basename(self, path):
101 101 """return base element of a path (as os.path.basename would do)
102 102
103 103 This exists to allow handling of strange encoding if needed."""
104 104 return os.path.basename(path)
105 105
106 106 def chmod(self, path, mode):
107 107 return os.chmod(self.join(path), mode)
108 108
109 109 def dirname(self, path):
110 110 """return dirname element of a path (as os.path.dirname would do)
111 111
112 112 This exists to allow handling of strange encoding if needed."""
113 113 return os.path.dirname(path)
114 114
115 115 def exists(self, path=None):
116 116 return os.path.exists(self.join(path))
117 117
118 118 def fstat(self, fp):
119 119 return util.fstat(fp)
120 120
121 121 def isdir(self, path=None):
122 122 return os.path.isdir(self.join(path))
123 123
124 124 def isfile(self, path=None):
125 125 return os.path.isfile(self.join(path))
126 126
127 127 def islink(self, path=None):
128 128 return os.path.islink(self.join(path))
129 129
130 130 def isfileorlink(self, path=None):
131 131 '''return whether path is a regular file or a symlink
132 132
133 133 Unlike isfile, this doesn't follow symlinks.'''
134 134 try:
135 135 st = self.lstat(path)
136 136 except OSError:
137 137 return False
138 138 mode = st.st_mode
139 139 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
140 140
141 141 def reljoin(self, *paths):
142 142 """join various elements of a path together (as os.path.join would do)
143 143
144 144 The vfs base is not injected so that path stay relative. This exists
145 145 to allow handling of strange encoding if needed."""
146 146 return os.path.join(*paths)
147 147
148 148 def split(self, path):
149 149 """split top-most element of a path (as os.path.split would do)
150 150
151 151 This exists to allow handling of strange encoding if needed."""
152 152 return os.path.split(path)
153 153
154 154 def lexists(self, path=None):
155 155 return os.path.lexists(self.join(path))
156 156
157 157 def lstat(self, path=None):
158 158 return os.lstat(self.join(path))
159 159
160 160 def listdir(self, path=None):
161 161 return os.listdir(self.join(path))
162 162
163 163 def makedir(self, path=None, notindexed=True):
164 164 return util.makedir(self.join(path), notindexed)
165 165
166 166 def makedirs(self, path=None, mode=None):
167 167 return util.makedirs(self.join(path), mode)
168 168
169 169 def makelock(self, info, path):
170 170 return util.makelock(info, self.join(path))
171 171
172 172 def mkdir(self, path=None):
173 173 return os.mkdir(self.join(path))
174 174
175 175 def mkstemp(self, suffix='', prefix='tmp', dir=None):
176 176 fd, name = pycompat.mkstemp(suffix=suffix, prefix=prefix,
177 177 dir=self.join(dir))
178 178 dname, fname = util.split(name)
179 179 if dir:
180 180 return fd, os.path.join(dir, fname)
181 181 else:
182 182 return fd, fname
183 183
184 184 def readdir(self, path=None, stat=None, skip=None):
185 185 return util.listdir(self.join(path), stat, skip)
186 186
187 187 def readlock(self, path):
188 188 return util.readlock(self.join(path))
189 189
190 190 def rename(self, src, dst, checkambig=False):
191 191 """Rename from src to dst
192 192
193 193 checkambig argument is used with util.filestat, and is useful
194 194 only if destination file is guarded by any lock
195 195 (e.g. repo.lock or repo.wlock).
196 196
197 197 To avoid file stat ambiguity forcibly, checkambig=True involves
198 198 copying ``src`` file, if it is owned by another. Therefore, use
199 199 checkambig=True only in limited cases (see also issue5418 and
200 200 issue5584 for detail).
201 201 """
202 202 self._auditpath(dst, 'w')
203 203 srcpath = self.join(src)
204 204 dstpath = self.join(dst)
205 205 oldstat = checkambig and util.filestat.frompath(dstpath)
206 206 if oldstat and oldstat.stat:
207 207 ret = util.rename(srcpath, dstpath)
208 208 _avoidambig(dstpath, oldstat)
209 209 return ret
210 210 return util.rename(srcpath, dstpath)
211 211
212 212 def readlink(self, path):
213 213 return util.readlink(self.join(path))
214 214
215 215 def removedirs(self, path=None):
216 216 """Remove a leaf directory and all empty intermediate ones
217 217 """
218 218 return util.removedirs(self.join(path))
219 219
220 220 def rmdir(self, path=None):
221 221 """Remove an empty directory."""
222 222 return os.rmdir(self.join(path))
223 223
224 224 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
225 225 """Remove a directory tree recursively
226 226
227 227 If ``forcibly``, this tries to remove READ-ONLY files, too.
228 228 """
229 229 if forcibly:
230 230 def onerror(function, path, excinfo):
231 231 if function is not os.remove:
232 232 raise
233 233 # read-only files cannot be unlinked under Windows
234 234 s = os.stat(path)
235 235 if (s.st_mode & stat.S_IWRITE) != 0:
236 236 raise
237 237 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
238 238 os.remove(path)
239 239 else:
240 240 onerror = None
241 241 return shutil.rmtree(self.join(path),
242 242 ignore_errors=ignore_errors, onerror=onerror)
243 243
244 244 def setflags(self, path, l, x):
245 245 return util.setflags(self.join(path), l, x)
246 246
247 247 def stat(self, path=None):
248 248 return os.stat(self.join(path))
249 249
250 250 def unlink(self, path=None):
251 251 return util.unlink(self.join(path))
252 252
253 253 def tryunlink(self, path=None):
254 254 """Attempt to remove a file, ignoring missing file errors."""
255 255 util.tryunlink(self.join(path))
256 256
257 257 def unlinkpath(self, path=None, ignoremissing=False, rmdir=True):
258 258 return util.unlinkpath(self.join(path), ignoremissing=ignoremissing,
259 259 rmdir=rmdir)
260 260
261 261 def utime(self, path=None, t=None):
262 262 return os.utime(self.join(path), t)
263 263
264 264 def walk(self, path=None, onerror=None):
265 265 """Yield (dirpath, dirs, files) tuple for each directories under path
266 266
267 267 ``dirpath`` is relative one from the root of this vfs. This
268 268 uses ``os.sep`` as path separator, even you specify POSIX
269 269 style ``path``.
270 270
271 271 "The root of this vfs" is represented as empty ``dirpath``.
272 272 """
273 273 root = os.path.normpath(self.join(None))
274 274 # when dirpath == root, dirpath[prefixlen:] becomes empty
275 275 # because len(dirpath) < prefixlen.
276 276 prefixlen = len(pathutil.normasprefix(root))
277 277 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
278 278 yield (dirpath[prefixlen:], dirs, files)
279 279
280 280 @contextlib.contextmanager
281 281 def backgroundclosing(self, ui, expectedcount=-1):
282 282 """Allow files to be closed asynchronously.
283 283
284 284 When this context manager is active, ``backgroundclose`` can be passed
285 285 to ``__call__``/``open`` to result in the file possibly being closed
286 286 asynchronously, on a background thread.
287 287 """
288 288 # Sharing backgroundfilecloser between threads is complex and using
289 289 # multiple instances puts us at risk of running out of file descriptors
290 290 # only allow to use backgroundfilecloser when in main thread.
291 291 if not isinstance(threading.currentThread(), threading._MainThread):
292 292 yield
293 293 return
294 294 vfs = getattr(self, 'vfs', self)
295 295 if getattr(vfs, '_backgroundfilecloser', None):
296 296 raise error.Abort(
297 297 _('can only have 1 active background file closer'))
298 298
299 299 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
300 300 try:
301 301 vfs._backgroundfilecloser = bfc
302 302 yield bfc
303 303 finally:
304 304 vfs._backgroundfilecloser = None
305 305
306 306 class vfs(abstractvfs):
307 307 '''Operate files relative to a base directory
308 308
309 309 This class is used to hide the details of COW semantics and
310 310 remote file access from higher level code.
311 311
312 312 'cacheaudited' should be enabled only if (a) vfs object is short-lived, or
313 313 (b) the base directory is managed by hg and considered sort-of append-only.
314 314 See pathutil.pathauditor() for details.
315 315 '''
316 316 def __init__(self, base, audit=True, cacheaudited=False, expandpath=False,
317 317 realpath=False):
318 318 if expandpath:
319 319 base = util.expandpath(base)
320 320 if realpath:
321 321 base = os.path.realpath(base)
322 322 self.base = base
323 323 self._audit = audit
324 324 if audit:
325 325 self.audit = pathutil.pathauditor(self.base, cached=cacheaudited)
326 326 else:
327 327 self.audit = (lambda path, mode=None: True)
328 328 self.createmode = None
329 329 self._trustnlink = None
330 330
331 331 @util.propertycache
332 332 def _cansymlink(self):
333 333 return util.checklink(self.base)
334 334
335 335 @util.propertycache
336 336 def _chmod(self):
337 337 return util.checkexec(self.base)
338 338
339 339 def _fixfilemode(self, name):
340 340 if self.createmode is None or not self._chmod:
341 341 return
342 342 os.chmod(name, self.createmode & 0o666)
343 343
344 344 def _auditpath(self, path, mode):
345 345 if self._audit:
346 346 if os.path.isabs(path) and path.startswith(self.base):
347 347 path = os.path.relpath(path, self.base)
348 348 r = util.checkosfilename(path)
349 349 if r:
350 350 raise error.Abort("%s: %r" % (r, path))
351 351 self.audit(path, mode=mode)
352 352
353 353 def __call__(self, path, mode="r", atomictemp=False, notindexed=False,
354 354 backgroundclose=False, checkambig=False, auditpath=True,
355 355 makeparentdirs=True):
356 356 '''Open ``path`` file, which is relative to vfs root.
357 357
358 358 By default, parent directories are created as needed. Newly created
359 359 directories are marked as "not to be indexed by the content indexing
360 360 service", if ``notindexed`` is specified for "write" mode access.
361 361 Set ``makeparentdirs=False`` to not create directories implicitly.
362 362
363 363 If ``backgroundclose`` is passed, the file may be closed asynchronously.
364 364 It can only be used if the ``self.backgroundclosing()`` context manager
365 365 is active. This should only be specified if the following criteria hold:
366 366
367 367 1. There is a potential for writing thousands of files. Unless you
368 368 are writing thousands of files, the performance benefits of
369 369 asynchronously closing files is not realized.
370 370 2. Files are opened exactly once for the ``backgroundclosing``
371 371 active duration and are therefore free of race conditions between
372 372 closing a file on a background thread and reopening it. (If the
373 373 file were opened multiple times, there could be unflushed data
374 374 because the original file handle hasn't been flushed/closed yet.)
375 375
376 376 ``checkambig`` argument is passed to atomictemplfile (valid
377 377 only for writing), and is useful only if target file is
378 378 guarded by any lock (e.g. repo.lock or repo.wlock).
379 379
380 380 To avoid file stat ambiguity forcibly, checkambig=True involves
381 381 copying ``path`` file opened in "append" mode (e.g. for
382 382 truncation), if it is owned by another. Therefore, use
383 383 combination of append mode and checkambig=True only in limited
384 384 cases (see also issue5418 and issue5584 for detail).
385 385 '''
386 386 if auditpath:
387 387 self._auditpath(path, mode)
388 388 f = self.join(path)
389 389
390 390 if "b" not in mode:
391 391 mode += "b" # for that other OS
392 392
393 393 nlink = -1
394 394 if mode not in ('r', 'rb'):
395 395 dirname, basename = util.split(f)
396 396 # If basename is empty, then the path is malformed because it points
397 397 # to a directory. Let the posixfile() call below raise IOError.
398 398 if basename:
399 399 if atomictemp:
400 400 if makeparentdirs:
401 401 util.makedirs(dirname, self.createmode, notindexed)
402 402 return util.atomictempfile(f, mode, self.createmode,
403 403 checkambig=checkambig)
404 404 try:
405 405 if 'w' in mode:
406 406 util.unlink(f)
407 407 nlink = 0
408 408 else:
409 409 # nlinks() may behave differently for files on Windows
410 410 # shares if the file is open.
411 411 with util.posixfile(f):
412 412 nlink = util.nlinks(f)
413 413 if nlink < 1:
414 414 nlink = 2 # force mktempcopy (issue1922)
415 415 except (OSError, IOError) as e:
416 416 if e.errno != errno.ENOENT:
417 417 raise
418 418 nlink = 0
419 419 if makeparentdirs:
420 420 util.makedirs(dirname, self.createmode, notindexed)
421 421 if nlink > 0:
422 422 if self._trustnlink is None:
423 423 self._trustnlink = nlink > 1 or util.checknlink(f)
424 424 if nlink > 1 or not self._trustnlink:
425 425 util.rename(util.mktempcopy(f), f)
426 426 fp = util.posixfile(f, mode)
427 427 if nlink == 0:
428 428 self._fixfilemode(f)
429 429
430 430 if checkambig:
431 431 if mode in ('r', 'rb'):
432 432 raise error.Abort(_('implementation error: mode %s is not'
433 433 ' valid for checkambig=True') % mode)
434 434 fp = checkambigatclosing(fp)
435 435
436 436 if (backgroundclose and
437 437 isinstance(threading.currentThread(), threading._MainThread)):
438 438 if not self._backgroundfilecloser:
439 439 raise error.Abort(_('backgroundclose can only be used when a '
440 440 'backgroundclosing context manager is active')
441 441 )
442 442
443 443 fp = delayclosedfile(fp, self._backgroundfilecloser)
444 444
445 445 return fp
446 446
447 447 def symlink(self, src, dst):
448 448 self.audit(dst)
449 449 linkname = self.join(dst)
450 450 util.tryunlink(linkname)
451 451
452 452 util.makedirs(os.path.dirname(linkname), self.createmode)
453 453
454 454 if self._cansymlink:
455 455 try:
456 456 os.symlink(src, linkname)
457 457 except OSError as err:
458 458 raise OSError(err.errno, _('could not symlink to %r: %s') %
459 459 (src, encoding.strtolocal(err.strerror)),
460 460 linkname)
461 461 else:
462 462 self.write(dst, src)
463 463
464 464 def join(self, path, *insidef):
465 465 if path:
466 466 return os.path.join(self.base, path, *insidef)
467 467 else:
468 468 return self.base
469 469
470 470 opener = vfs
471 471
472 class proxyvfs(object):
472 class proxyvfs(abstractvfs):
473 473 def __init__(self, vfs):
474 474 self.vfs = vfs
475 475
476 476 @property
477 477 def options(self):
478 478 return self.vfs.options
479 479
480 480 @options.setter
481 481 def options(self, value):
482 482 self.vfs.options = value
483 483
484 class filtervfs(abstractvfs, proxyvfs):
484 class filtervfs(proxyvfs, abstractvfs):
485 485 '''Wrapper vfs for filtering filenames with a function.'''
486 486
487 487 def __init__(self, vfs, filter):
488 488 proxyvfs.__init__(self, vfs)
489 489 self._filter = filter
490 490
491 491 def __call__(self, path, *args, **kwargs):
492 492 return self.vfs(self._filter(path), *args, **kwargs)
493 493
494 494 def join(self, path, *insidef):
495 495 if path:
496 496 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
497 497 else:
498 498 return self.vfs.join(path)
499 499
500 500 filteropener = filtervfs
501 501
502 class readonlyvfs(abstractvfs, proxyvfs):
502 class readonlyvfs(proxyvfs):
503 503 '''Wrapper vfs preventing any writing.'''
504 504
505 505 def __init__(self, vfs):
506 506 proxyvfs.__init__(self, vfs)
507 507
508 508 def __call__(self, path, mode='r', *args, **kw):
509 509 if mode not in ('r', 'rb'):
510 510 raise error.Abort(_('this vfs is read only'))
511 511 return self.vfs(path, mode, *args, **kw)
512 512
513 513 def join(self, path, *insidef):
514 514 return self.vfs.join(path, *insidef)
515 515
516 516 class closewrapbase(object):
517 517 """Base class of wrapper, which hooks closing
518 518
519 519 Do not instantiate outside of the vfs layer.
520 520 """
521 521 def __init__(self, fh):
522 522 object.__setattr__(self, r'_origfh', fh)
523 523
524 524 def __getattr__(self, attr):
525 525 return getattr(self._origfh, attr)
526 526
527 527 def __setattr__(self, attr, value):
528 528 return setattr(self._origfh, attr, value)
529 529
530 530 def __delattr__(self, attr):
531 531 return delattr(self._origfh, attr)
532 532
533 533 def __enter__(self):
534 534 self._origfh.__enter__()
535 535 return self
536 536
537 537 def __exit__(self, exc_type, exc_value, exc_tb):
538 538 raise NotImplementedError('attempted instantiating ' + str(type(self)))
539 539
540 540 def close(self):
541 541 raise NotImplementedError('attempted instantiating ' + str(type(self)))
542 542
543 543 class delayclosedfile(closewrapbase):
544 544 """Proxy for a file object whose close is delayed.
545 545
546 546 Do not instantiate outside of the vfs layer.
547 547 """
548 548 def __init__(self, fh, closer):
549 549 super(delayclosedfile, self).__init__(fh)
550 550 object.__setattr__(self, r'_closer', closer)
551 551
552 552 def __exit__(self, exc_type, exc_value, exc_tb):
553 553 self._closer.close(self._origfh)
554 554
555 555 def close(self):
556 556 self._closer.close(self._origfh)
557 557
558 558 class backgroundfilecloser(object):
559 559 """Coordinates background closing of file handles on multiple threads."""
560 560 def __init__(self, ui, expectedcount=-1):
561 561 self._running = False
562 562 self._entered = False
563 563 self._threads = []
564 564 self._threadexception = None
565 565
566 566 # Only Windows/NTFS has slow file closing. So only enable by default
567 567 # on that platform. But allow to be enabled elsewhere for testing.
568 568 defaultenabled = pycompat.iswindows
569 569 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
570 570
571 571 if not enabled:
572 572 return
573 573
574 574 # There is overhead to starting and stopping the background threads.
575 575 # Don't do background processing unless the file count is large enough
576 576 # to justify it.
577 577 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount')
578 578 # FUTURE dynamically start background threads after minfilecount closes.
579 579 # (We don't currently have any callers that don't know their file count)
580 580 if expectedcount > 0 and expectedcount < minfilecount:
581 581 return
582 582
583 583 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue')
584 584 threadcount = ui.configint('worker', 'backgroundclosethreadcount')
585 585
586 586 ui.debug('starting %d threads for background file closing\n' %
587 587 threadcount)
588 588
589 589 self._queue = pycompat.queue.Queue(maxsize=maxqueue)
590 590 self._running = True
591 591
592 592 for i in range(threadcount):
593 593 t = threading.Thread(target=self._worker, name='backgroundcloser')
594 594 self._threads.append(t)
595 595 t.start()
596 596
597 597 def __enter__(self):
598 598 self._entered = True
599 599 return self
600 600
601 601 def __exit__(self, exc_type, exc_value, exc_tb):
602 602 self._running = False
603 603
604 604 # Wait for threads to finish closing so open files don't linger for
605 605 # longer than lifetime of context manager.
606 606 for t in self._threads:
607 607 t.join()
608 608
609 609 def _worker(self):
610 610 """Main routine for worker thread."""
611 611 while True:
612 612 try:
613 613 fh = self._queue.get(block=True, timeout=0.100)
614 614 # Need to catch or the thread will terminate and
615 615 # we could orphan file descriptors.
616 616 try:
617 617 fh.close()
618 618 except Exception as e:
619 619 # Stash so can re-raise from main thread later.
620 620 self._threadexception = e
621 621 except pycompat.queue.Empty:
622 622 if not self._running:
623 623 break
624 624
625 625 def close(self, fh):
626 626 """Schedule a file for closing."""
627 627 if not self._entered:
628 628 raise error.Abort(_('can only call close() when context manager '
629 629 'active'))
630 630
631 631 # If a background thread encountered an exception, raise now so we fail
632 632 # fast. Otherwise we may potentially go on for minutes until the error
633 633 # is acted on.
634 634 if self._threadexception:
635 635 e = self._threadexception
636 636 self._threadexception = None
637 637 raise e
638 638
639 639 # If we're not actively running, close synchronously.
640 640 if not self._running:
641 641 fh.close()
642 642 return
643 643
644 644 self._queue.put(fh, block=True, timeout=None)
645 645
646 646 class checkambigatclosing(closewrapbase):
647 647 """Proxy for a file object, to avoid ambiguity of file stat
648 648
649 649 See also util.filestat for detail about "ambiguity of file stat".
650 650
651 651 This proxy is useful only if the target file is guarded by any
652 652 lock (e.g. repo.lock or repo.wlock)
653 653
654 654 Do not instantiate outside of the vfs layer.
655 655 """
656 656 def __init__(self, fh):
657 657 super(checkambigatclosing, self).__init__(fh)
658 658 object.__setattr__(self, r'_oldstat', util.filestat.frompath(fh.name))
659 659
660 660 def _checkambig(self):
661 661 oldstat = self._oldstat
662 662 if oldstat.stat:
663 663 _avoidambig(self._origfh.name, oldstat)
664 664
665 665 def __exit__(self, exc_type, exc_value, exc_tb):
666 666 self._origfh.__exit__(exc_type, exc_value, exc_tb)
667 667 self._checkambig()
668 668
669 669 def close(self):
670 670 self._origfh.close()
671 671 self._checkambig()
General Comments 0
You need to be logged in to leave comments. Login now