##// END OF EJS Templates
vfs: extract 'vfs' class and related code to a new 'vfs' module (API)...
Pierre-Yves David -
r31217:0f31830f default
parent child Browse files
Show More
This diff has been collapsed as it changes many lines, (627 lines changed) Show them Hide them
@@ -7,17 +7,12 b''
7 7
8 8 from __future__ import absolute_import
9 9
10 import contextlib
11 10 import errno
12 11 import glob
13 12 import hashlib
14 13 import os
15 14 import re
16 import shutil
17 15 import socket
18 import stat
19 import tempfile
20 import threading
21 16
22 17 from .i18n import _
23 18 from .node import wdirrev
@@ -32,6 +27,7 b' from . import ('
32 27 revsetlang,
33 28 similar,
34 29 util,
30 vfs as vfsmod,
35 31 )
36 32
37 33 if pycompat.osname == 'nt':
@@ -336,455 +332,16 b' def filteredhash(repo, maxrev):'
336 332 key = s.digest()
337 333 return key
338 334
339 class abstractvfs(object):
340 """Abstract base class; cannot be instantiated"""
341
342 def __init__(self, *args, **kwargs):
343 '''Prevent instantiation; don't call this from subclasses.'''
344 raise NotImplementedError('attempted instantiating ' + str(type(self)))
345
346 def tryread(self, path):
347 '''gracefully return an empty string for missing files'''
348 try:
349 return self.read(path)
350 except IOError as inst:
351 if inst.errno != errno.ENOENT:
352 raise
353 return ""
354
355 def tryreadlines(self, path, mode='rb'):
356 '''gracefully return an empty array for missing files'''
357 try:
358 return self.readlines(path, mode=mode)
359 except IOError as inst:
360 if inst.errno != errno.ENOENT:
361 raise
362 return []
363
364 @util.propertycache
365 def open(self):
366 '''Open ``path`` file, which is relative to vfs root.
367
368 Newly created directories are marked as "not to be indexed by
369 the content indexing service", if ``notindexed`` is specified
370 for "write" mode access.
371 '''
372 return self.__call__
373
374 def read(self, path):
375 with self(path, 'rb') as fp:
376 return fp.read()
377
378 def readlines(self, path, mode='rb'):
379 with self(path, mode=mode) as fp:
380 return fp.readlines()
381
382 def write(self, path, data, backgroundclose=False):
383 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
384 return fp.write(data)
385
386 def writelines(self, path, data, mode='wb', notindexed=False):
387 with self(path, mode=mode, notindexed=notindexed) as fp:
388 return fp.writelines(data)
389
390 def append(self, path, data):
391 with self(path, 'ab') as fp:
392 return fp.write(data)
393
394 def basename(self, path):
395 """return base element of a path (as os.path.basename would do)
396
397 This exists to allow handling of strange encoding if needed."""
398 return os.path.basename(path)
399
400 def chmod(self, path, mode):
401 return os.chmod(self.join(path), mode)
402
403 def dirname(self, path):
404 """return dirname element of a path (as os.path.dirname would do)
405
406 This exists to allow handling of strange encoding if needed."""
407 return os.path.dirname(path)
408
409 def exists(self, path=None):
410 return os.path.exists(self.join(path))
411
412 def fstat(self, fp):
413 return util.fstat(fp)
414
415 def isdir(self, path=None):
416 return os.path.isdir(self.join(path))
417
418 def isfile(self, path=None):
419 return os.path.isfile(self.join(path))
420
421 def islink(self, path=None):
422 return os.path.islink(self.join(path))
423
424 def isfileorlink(self, path=None):
425 '''return whether path is a regular file or a symlink
426
427 Unlike isfile, this doesn't follow symlinks.'''
428 try:
429 st = self.lstat(path)
430 except OSError:
431 return False
432 mode = st.st_mode
433 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
434
435 def reljoin(self, *paths):
436 """join various elements of a path together (as os.path.join would do)
437
438 The vfs base is not injected so that path stay relative. This exists
439 to allow handling of strange encoding if needed."""
440 return os.path.join(*paths)
441
442 def split(self, path):
443 """split top-most element of a path (as os.path.split would do)
444
445 This exists to allow handling of strange encoding if needed."""
446 return os.path.split(path)
447
448 def lexists(self, path=None):
449 return os.path.lexists(self.join(path))
450
451 def lstat(self, path=None):
452 return os.lstat(self.join(path))
453
454 def listdir(self, path=None):
455 return os.listdir(self.join(path))
456
457 def makedir(self, path=None, notindexed=True):
458 return util.makedir(self.join(path), notindexed)
459
460 def makedirs(self, path=None, mode=None):
461 return util.makedirs(self.join(path), mode)
462
463 def makelock(self, info, path):
464 return util.makelock(info, self.join(path))
465
466 def mkdir(self, path=None):
467 return os.mkdir(self.join(path))
468
469 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
470 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
471 dir=self.join(dir), text=text)
472 dname, fname = util.split(name)
473 if dir:
474 return fd, os.path.join(dir, fname)
475 else:
476 return fd, fname
477
478 def readdir(self, path=None, stat=None, skip=None):
479 return osutil.listdir(self.join(path), stat, skip)
480
481 def readlock(self, path):
482 return util.readlock(self.join(path))
483
484 def rename(self, src, dst, checkambig=False):
485 """Rename from src to dst
486
487 checkambig argument is used with util.filestat, and is useful
488 only if destination file is guarded by any lock
489 (e.g. repo.lock or repo.wlock).
490 """
491 dstpath = self.join(dst)
492 oldstat = checkambig and util.filestat(dstpath)
493 if oldstat and oldstat.stat:
494 ret = util.rename(self.join(src), dstpath)
495 newstat = util.filestat(dstpath)
496 if newstat.isambig(oldstat):
497 # stat of renamed file is ambiguous to original one
498 newstat.avoidambig(dstpath, oldstat)
499 return ret
500 return util.rename(self.join(src), dstpath)
501
502 def readlink(self, path):
503 return os.readlink(self.join(path))
504
505 def removedirs(self, path=None):
506 """Remove a leaf directory and all empty intermediate ones
507 """
508 return util.removedirs(self.join(path))
509
510 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
511 """Remove a directory tree recursively
512
513 If ``forcibly``, this tries to remove READ-ONLY files, too.
514 """
515 if forcibly:
516 def onerror(function, path, excinfo):
517 if function is not os.remove:
518 raise
519 # read-only files cannot be unlinked under Windows
520 s = os.stat(path)
521 if (s.st_mode & stat.S_IWRITE) != 0:
522 raise
523 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
524 os.remove(path)
525 else:
526 onerror = None
527 return shutil.rmtree(self.join(path),
528 ignore_errors=ignore_errors, onerror=onerror)
529
530 def setflags(self, path, l, x):
531 return util.setflags(self.join(path), l, x)
532
533 def stat(self, path=None):
534 return os.stat(self.join(path))
535
536 def unlink(self, path=None):
537 return util.unlink(self.join(path))
538
539 def unlinkpath(self, path=None, ignoremissing=False):
540 return util.unlinkpath(self.join(path), ignoremissing)
541
542 def utime(self, path=None, t=None):
543 return os.utime(self.join(path), t)
544
545 def walk(self, path=None, onerror=None):
546 """Yield (dirpath, dirs, files) tuple for each directories under path
547
548 ``dirpath`` is relative one from the root of this vfs. This
549 uses ``os.sep`` as path separator, even you specify POSIX
550 style ``path``.
551
552 "The root of this vfs" is represented as empty ``dirpath``.
553 """
554 root = os.path.normpath(self.join(None))
555 # when dirpath == root, dirpath[prefixlen:] becomes empty
556 # because len(dirpath) < prefixlen.
557 prefixlen = len(pathutil.normasprefix(root))
558 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
559 yield (dirpath[prefixlen:], dirs, files)
560
561 @contextlib.contextmanager
562 def backgroundclosing(self, ui, expectedcount=-1):
563 """Allow files to be closed asynchronously.
564
565 When this context manager is active, ``backgroundclose`` can be passed
566 to ``__call__``/``open`` to result in the file possibly being closed
567 asynchronously, on a background thread.
568 """
569 # This is an arbitrary restriction and could be changed if we ever
570 # have a use case.
571 vfs = getattr(self, 'vfs', self)
572 if getattr(vfs, '_backgroundfilecloser', None):
573 raise error.Abort(
574 _('can only have 1 active background file closer'))
575
576 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
577 try:
578 vfs._backgroundfilecloser = bfc
579 yield bfc
580 finally:
581 vfs._backgroundfilecloser = None
582
583 class vfs(abstractvfs):
584 '''Operate files relative to a base directory
585
586 This class is used to hide the details of COW semantics and
587 remote file access from higher level code.
588 '''
589 def __init__(self, base, audit=True, expandpath=False, realpath=False):
590 if expandpath:
591 base = util.expandpath(base)
592 if realpath:
593 base = os.path.realpath(base)
594 self.base = base
595 self.mustaudit = audit
596 self.createmode = None
597 self._trustnlink = None
598
599 @property
600 def mustaudit(self):
601 return self._audit
602
603 @mustaudit.setter
604 def mustaudit(self, onoff):
605 self._audit = onoff
606 if onoff:
607 self.audit = pathutil.pathauditor(self.base)
608 else:
609 self.audit = util.always
610
611 @util.propertycache
612 def _cansymlink(self):
613 return util.checklink(self.base)
614
615 @util.propertycache
616 def _chmod(self):
617 return util.checkexec(self.base)
618
619 def _fixfilemode(self, name):
620 if self.createmode is None or not self._chmod:
621 return
622 os.chmod(name, self.createmode & 0o666)
623
624 def __call__(self, path, mode="r", text=False, atomictemp=False,
625 notindexed=False, backgroundclose=False, checkambig=False):
626 '''Open ``path`` file, which is relative to vfs root.
627
628 Newly created directories are marked as "not to be indexed by
629 the content indexing service", if ``notindexed`` is specified
630 for "write" mode access.
631
632 If ``backgroundclose`` is passed, the file may be closed asynchronously.
633 It can only be used if the ``self.backgroundclosing()`` context manager
634 is active. This should only be specified if the following criteria hold:
635
636 1. There is a potential for writing thousands of files. Unless you
637 are writing thousands of files, the performance benefits of
638 asynchronously closing files is not realized.
639 2. Files are opened exactly once for the ``backgroundclosing``
640 active duration and are therefore free of race conditions between
641 closing a file on a background thread and reopening it. (If the
642 file were opened multiple times, there could be unflushed data
643 because the original file handle hasn't been flushed/closed yet.)
644
645 ``checkambig`` argument is passed to atomictemplfile (valid
646 only for writing), and is useful only if target file is
647 guarded by any lock (e.g. repo.lock or repo.wlock).
648 '''
649 if self._audit:
650 r = util.checkosfilename(path)
651 if r:
652 raise error.Abort("%s: %r" % (r, path))
653 self.audit(path)
654 f = self.join(path)
655
656 if not text and "b" not in mode:
657 mode += "b" # for that other OS
658
659 nlink = -1
660 if mode not in ('r', 'rb'):
661 dirname, basename = util.split(f)
662 # If basename is empty, then the path is malformed because it points
663 # to a directory. Let the posixfile() call below raise IOError.
664 if basename:
665 if atomictemp:
666 util.makedirs(dirname, self.createmode, notindexed)
667 return util.atomictempfile(f, mode, self.createmode,
668 checkambig=checkambig)
669 try:
670 if 'w' in mode:
671 util.unlink(f)
672 nlink = 0
673 else:
674 # nlinks() may behave differently for files on Windows
675 # shares if the file is open.
676 with util.posixfile(f):
677 nlink = util.nlinks(f)
678 if nlink < 1:
679 nlink = 2 # force mktempcopy (issue1922)
680 except (OSError, IOError) as e:
681 if e.errno != errno.ENOENT:
682 raise
683 nlink = 0
684 util.makedirs(dirname, self.createmode, notindexed)
685 if nlink > 0:
686 if self._trustnlink is None:
687 self._trustnlink = nlink > 1 or util.checknlink(f)
688 if nlink > 1 or not self._trustnlink:
689 util.rename(util.mktempcopy(f), f)
690 fp = util.posixfile(f, mode)
691 if nlink == 0:
692 self._fixfilemode(f)
693
694 if checkambig:
695 if mode in ('r', 'rb'):
696 raise error.Abort(_('implementation error: mode %s is not'
697 ' valid for checkambig=True') % mode)
698 fp = checkambigatclosing(fp)
699
700 if backgroundclose:
701 if not self._backgroundfilecloser:
702 raise error.Abort(_('backgroundclose can only be used when a '
703 'backgroundclosing context manager is active')
704 )
705
706 fp = delayclosedfile(fp, self._backgroundfilecloser)
707
708 return fp
709
710 def symlink(self, src, dst):
711 self.audit(dst)
712 linkname = self.join(dst)
713 try:
714 os.unlink(linkname)
715 except OSError:
716 pass
717
718 util.makedirs(os.path.dirname(linkname), self.createmode)
719
720 if self._cansymlink:
721 try:
722 os.symlink(src, linkname)
723 except OSError as err:
724 raise OSError(err.errno, _('could not symlink to %r: %s') %
725 (src, err.strerror), linkname)
726 else:
727 self.write(dst, src)
728
729 def join(self, path, *insidef):
730 if path:
731 return os.path.join(self.base, path, *insidef)
732 else:
733 return self.base
734
735 opener = vfs
736
737 class auditvfs(object):
738 def __init__(self, vfs):
739 self.vfs = vfs
740
741 @property
742 def mustaudit(self):
743 return self.vfs.mustaudit
744
745 @mustaudit.setter
746 def mustaudit(self, onoff):
747 self.vfs.mustaudit = onoff
748
749 @property
750 def options(self):
751 return self.vfs.options
752
753 @options.setter
754 def options(self, value):
755 self.vfs.options = value
756
757 class filtervfs(abstractvfs, auditvfs):
758 '''Wrapper vfs for filtering filenames with a function.'''
759
760 def __init__(self, vfs, filter):
761 auditvfs.__init__(self, vfs)
762 self._filter = filter
763
764 def __call__(self, path, *args, **kwargs):
765 return self.vfs(self._filter(path), *args, **kwargs)
766
767 def join(self, path, *insidef):
768 if path:
769 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
770 else:
771 return self.vfs.join(path)
772
773 filteropener = filtervfs
774
775 class readonlyvfs(abstractvfs, auditvfs):
776 '''Wrapper vfs preventing any writing.'''
777
778 def __init__(self, vfs):
779 auditvfs.__init__(self, vfs)
780
781 def __call__(self, path, mode='r', *args, **kw):
782 if mode not in ('r', 'rb'):
783 raise error.Abort(_('this vfs is read only'))
784 return self.vfs(path, mode, *args, **kw)
785
786 def join(self, path, *insidef):
787 return self.vfs.join(path, *insidef)
335 # compatibility layer since all 'vfs' code moved to 'mercurial.vfs'
336 #
337 # This is hard to instal deprecation warning to this since we do not have
338 # access to a 'ui' object.
339 opener = vfs = vfsmod.vfs
340 filteropener = filtervfs = vfsmod.filtervfs
341 abstractvfs = vfsmod.abstractvfs
342 readonlyvfs = vfsmod.readonlyvfs
343 auditvfs = vfsmod.auditvfs
344 checkambigatclosing = vfsmod.checkambigatclosing
788 345
789 346 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
790 347 '''yield every hg repository under path, always recursively.
@@ -1408,165 +965,3 b' def gddeltaconfig(ui):'
1408 965 """
1409 966 # experimental config: format.generaldelta
1410 967 return ui.configbool('format', 'generaldelta', False)
1411
1412 class closewrapbase(object):
1413 """Base class of wrapper, which hooks closing
1414
1415 Do not instantiate outside of the vfs layer.
1416 """
1417 def __init__(self, fh):
1418 object.__setattr__(self, '_origfh', fh)
1419
1420 def __getattr__(self, attr):
1421 return getattr(self._origfh, attr)
1422
1423 def __setattr__(self, attr, value):
1424 return setattr(self._origfh, attr, value)
1425
1426 def __delattr__(self, attr):
1427 return delattr(self._origfh, attr)
1428
1429 def __enter__(self):
1430 return self._origfh.__enter__()
1431
1432 def __exit__(self, exc_type, exc_value, exc_tb):
1433 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1434
1435 def close(self):
1436 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1437
1438 class delayclosedfile(closewrapbase):
1439 """Proxy for a file object whose close is delayed.
1440
1441 Do not instantiate outside of the vfs layer.
1442 """
1443 def __init__(self, fh, closer):
1444 super(delayclosedfile, self).__init__(fh)
1445 object.__setattr__(self, '_closer', closer)
1446
1447 def __exit__(self, exc_type, exc_value, exc_tb):
1448 self._closer.close(self._origfh)
1449
1450 def close(self):
1451 self._closer.close(self._origfh)
1452
1453 class backgroundfilecloser(object):
1454 """Coordinates background closing of file handles on multiple threads."""
1455 def __init__(self, ui, expectedcount=-1):
1456 self._running = False
1457 self._entered = False
1458 self._threads = []
1459 self._threadexception = None
1460
1461 # Only Windows/NTFS has slow file closing. So only enable by default
1462 # on that platform. But allow to be enabled elsewhere for testing.
1463 defaultenabled = pycompat.osname == 'nt'
1464 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1465
1466 if not enabled:
1467 return
1468
1469 # There is overhead to starting and stopping the background threads.
1470 # Don't do background processing unless the file count is large enough
1471 # to justify it.
1472 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1473 2048)
1474 # FUTURE dynamically start background threads after minfilecount closes.
1475 # (We don't currently have any callers that don't know their file count)
1476 if expectedcount > 0 and expectedcount < minfilecount:
1477 return
1478
1479 # Windows defaults to a limit of 512 open files. A buffer of 128
1480 # should give us enough headway.
1481 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1482 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1483
1484 ui.debug('starting %d threads for background file closing\n' %
1485 threadcount)
1486
1487 self._queue = util.queue(maxsize=maxqueue)
1488 self._running = True
1489
1490 for i in range(threadcount):
1491 t = threading.Thread(target=self._worker, name='backgroundcloser')
1492 self._threads.append(t)
1493 t.start()
1494
1495 def __enter__(self):
1496 self._entered = True
1497 return self
1498
1499 def __exit__(self, exc_type, exc_value, exc_tb):
1500 self._running = False
1501
1502 # Wait for threads to finish closing so open files don't linger for
1503 # longer than lifetime of context manager.
1504 for t in self._threads:
1505 t.join()
1506
1507 def _worker(self):
1508 """Main routine for worker thread."""
1509 while True:
1510 try:
1511 fh = self._queue.get(block=True, timeout=0.100)
1512 # Need to catch or the thread will terminate and
1513 # we could orphan file descriptors.
1514 try:
1515 fh.close()
1516 except Exception as e:
1517 # Stash so can re-raise from main thread later.
1518 self._threadexception = e
1519 except util.empty:
1520 if not self._running:
1521 break
1522
1523 def close(self, fh):
1524 """Schedule a file for closing."""
1525 if not self._entered:
1526 raise error.Abort(_('can only call close() when context manager '
1527 'active'))
1528
1529 # If a background thread encountered an exception, raise now so we fail
1530 # fast. Otherwise we may potentially go on for minutes until the error
1531 # is acted on.
1532 if self._threadexception:
1533 e = self._threadexception
1534 self._threadexception = None
1535 raise e
1536
1537 # If we're not actively running, close synchronously.
1538 if not self._running:
1539 fh.close()
1540 return
1541
1542 self._queue.put(fh, block=True, timeout=None)
1543
1544 class checkambigatclosing(closewrapbase):
1545 """Proxy for a file object, to avoid ambiguity of file stat
1546
1547 See also util.filestat for detail about "ambiguity of file stat".
1548
1549 This proxy is useful only if the target file is guarded by any
1550 lock (e.g. repo.lock or repo.wlock)
1551
1552 Do not instantiate outside of the vfs layer.
1553 """
1554 def __init__(self, fh):
1555 super(checkambigatclosing, self).__init__(fh)
1556 object.__setattr__(self, '_oldstat', util.filestat(fh.name))
1557
1558 def _checkambig(self):
1559 oldstat = self._oldstat
1560 if oldstat.stat:
1561 newstat = util.filestat(self._origfh.name)
1562 if newstat.isambig(oldstat):
1563 # stat of changed file is ambiguous to original one
1564 newstat.avoidambig(self._origfh.name, oldstat)
1565
1566 def __exit__(self, exc_type, exc_value, exc_tb):
1567 self._origfh.__exit__(exc_type, exc_value, exc_tb)
1568 self._checkambig()
1569
1570 def close(self):
1571 self._origfh.close()
1572 self._checkambig()
This diff has been collapsed as it changes many lines, (938 lines changed) Show them Hide them
@@ -1,341 +1,28 b''
1 # scmutil.py - Mercurial core utility functions
1 # vfs.py - Mercurial 'vfs' classes
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7
8 7 from __future__ import absolute_import
9 8
10 9 import contextlib
11 10 import errno
12 import glob
13 import hashlib
14 11 import os
15 import re
16 12 import shutil
17 import socket
18 13 import stat
19 14 import tempfile
20 15 import threading
21 16
22 17 from .i18n import _
23 from .node import wdirrev
24 18 from . import (
25 encoding,
26 19 error,
27 match as matchmod,
28 20 osutil,
29 21 pathutil,
30 phases,
31 22 pycompat,
32 revsetlang,
33 similar,
34 23 util,
35 24 )
36 25
37 if pycompat.osname == 'nt':
38 from . import scmwindows as scmplatform
39 else:
40 from . import scmposix as scmplatform
41
42 systemrcpath = scmplatform.systemrcpath
43 userrcpath = scmplatform.userrcpath
44 termsize = scmplatform.termsize
45
46 class status(tuple):
47 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
48 and 'ignored' properties are only relevant to the working copy.
49 '''
50
51 __slots__ = ()
52
53 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
54 clean):
55 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
56 ignored, clean))
57
58 @property
59 def modified(self):
60 '''files that have been modified'''
61 return self[0]
62
63 @property
64 def added(self):
65 '''files that have been added'''
66 return self[1]
67
68 @property
69 def removed(self):
70 '''files that have been removed'''
71 return self[2]
72
73 @property
74 def deleted(self):
75 '''files that are in the dirstate, but have been deleted from the
76 working copy (aka "missing")
77 '''
78 return self[3]
79
80 @property
81 def unknown(self):
82 '''files not in the dirstate that are not ignored'''
83 return self[4]
84
85 @property
86 def ignored(self):
87 '''files not in the dirstate that are ignored (by _dirignore())'''
88 return self[5]
89
90 @property
91 def clean(self):
92 '''files that have not been modified'''
93 return self[6]
94
95 def __repr__(self, *args, **kwargs):
96 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
97 'unknown=%r, ignored=%r, clean=%r>') % self)
98
99 def itersubrepos(ctx1, ctx2):
100 """find subrepos in ctx1 or ctx2"""
101 # Create a (subpath, ctx) mapping where we prefer subpaths from
102 # ctx1. The subpaths from ctx2 are important when the .hgsub file
103 # has been modified (in ctx2) but not yet committed (in ctx1).
104 subpaths = dict.fromkeys(ctx2.substate, ctx2)
105 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
106
107 missing = set()
108
109 for subpath in ctx2.substate:
110 if subpath not in ctx1.substate:
111 del subpaths[subpath]
112 missing.add(subpath)
113
114 for subpath, ctx in sorted(subpaths.iteritems()):
115 yield subpath, ctx.sub(subpath)
116
117 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
118 # status and diff will have an accurate result when it does
119 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
120 # against itself.
121 for subpath in missing:
122 yield subpath, ctx2.nullsub(subpath, ctx1)
123
124 def nochangesfound(ui, repo, excluded=None):
125 '''Report no changes for push/pull, excluded is None or a list of
126 nodes excluded from the push/pull.
127 '''
128 secretlist = []
129 if excluded:
130 for n in excluded:
131 if n not in repo:
132 # discovery should not have included the filtered revision,
133 # we have to explicitly exclude it until discovery is cleanup.
134 continue
135 ctx = repo[n]
136 if ctx.phase() >= phases.secret and not ctx.extinct():
137 secretlist.append(n)
138
139 if secretlist:
140 ui.status(_("no changes found (ignored %d secret changesets)\n")
141 % len(secretlist))
142 else:
143 ui.status(_("no changes found\n"))
144
145 def callcatch(ui, func):
146 """call func() with global exception handling
147
148 return func() if no exception happens. otherwise do some error handling
149 and return an exit code accordingly. does not handle all exceptions.
150 """
151 try:
152 return func()
153 # Global exception handling, alphabetically
154 # Mercurial-specific first, followed by built-in and library exceptions
155 except error.LockHeld as inst:
156 if inst.errno == errno.ETIMEDOUT:
157 reason = _('timed out waiting for lock held by %s') % inst.locker
158 else:
159 reason = _('lock held by %s') % inst.locker
160 ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
161 except error.LockUnavailable as inst:
162 ui.warn(_("abort: could not lock %s: %s\n") %
163 (inst.desc or inst.filename, inst.strerror))
164 except error.OutOfBandError as inst:
165 if inst.args:
166 msg = _("abort: remote error:\n")
167 else:
168 msg = _("abort: remote error\n")
169 ui.warn(msg)
170 if inst.args:
171 ui.warn(''.join(inst.args))
172 if inst.hint:
173 ui.warn('(%s)\n' % inst.hint)
174 except error.RepoError as inst:
175 ui.warn(_("abort: %s!\n") % inst)
176 if inst.hint:
177 ui.warn(_("(%s)\n") % inst.hint)
178 except error.ResponseError as inst:
179 ui.warn(_("abort: %s") % inst.args[0])
180 if not isinstance(inst.args[1], basestring):
181 ui.warn(" %r\n" % (inst.args[1],))
182 elif not inst.args[1]:
183 ui.warn(_(" empty string\n"))
184 else:
185 ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
186 except error.CensoredNodeError as inst:
187 ui.warn(_("abort: file censored %s!\n") % inst)
188 except error.RevlogError as inst:
189 ui.warn(_("abort: %s!\n") % inst)
190 except error.SignalInterrupt:
191 ui.warn(_("killed!\n"))
192 except error.InterventionRequired as inst:
193 ui.warn("%s\n" % inst)
194 if inst.hint:
195 ui.warn(_("(%s)\n") % inst.hint)
196 return 1
197 except error.Abort as inst:
198 ui.warn(_("abort: %s\n") % inst)
199 if inst.hint:
200 ui.warn(_("(%s)\n") % inst.hint)
201 except ImportError as inst:
202 ui.warn(_("abort: %s!\n") % inst)
203 m = str(inst).split()[-1]
204 if m in "mpatch bdiff".split():
205 ui.warn(_("(did you forget to compile extensions?)\n"))
206 elif m in "zlib".split():
207 ui.warn(_("(is your Python install correct?)\n"))
208 except IOError as inst:
209 if util.safehasattr(inst, "code"):
210 ui.warn(_("abort: %s\n") % inst)
211 elif util.safehasattr(inst, "reason"):
212 try: # usually it is in the form (errno, strerror)
213 reason = inst.reason.args[1]
214 except (AttributeError, IndexError):
215 # it might be anything, for example a string
216 reason = inst.reason
217 if isinstance(reason, unicode):
218 # SSLError of Python 2.7.9 contains a unicode
219 reason = reason.encode(encoding.encoding, 'replace')
220 ui.warn(_("abort: error: %s\n") % reason)
221 elif (util.safehasattr(inst, "args")
222 and inst.args and inst.args[0] == errno.EPIPE):
223 pass
224 elif getattr(inst, "strerror", None):
225 if getattr(inst, "filename", None):
226 ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
227 else:
228 ui.warn(_("abort: %s\n") % inst.strerror)
229 else:
230 raise
231 except OSError as inst:
232 if getattr(inst, "filename", None) is not None:
233 ui.warn(_("abort: %s: '%s'\n") % (inst.strerror, inst.filename))
234 else:
235 ui.warn(_("abort: %s\n") % inst.strerror)
236 except MemoryError:
237 ui.warn(_("abort: out of memory\n"))
238 except SystemExit as inst:
239 # Commands shouldn't sys.exit directly, but give a return code.
240 # Just in case catch this and and pass exit code to caller.
241 return inst.code
242 except socket.error as inst:
243 ui.warn(_("abort: %s\n") % inst.args[-1])
244
245 return -1
246
247 def checknewlabel(repo, lbl, kind):
248 # Do not use the "kind" parameter in ui output.
249 # It makes strings difficult to translate.
250 if lbl in ['tip', '.', 'null']:
251 raise error.Abort(_("the name '%s' is reserved") % lbl)
252 for c in (':', '\0', '\n', '\r'):
253 if c in lbl:
254 raise error.Abort(_("%r cannot be used in a name") % c)
255 try:
256 int(lbl)
257 raise error.Abort(_("cannot use an integer as a name"))
258 except ValueError:
259 pass
260
261 def checkfilename(f):
262 '''Check that the filename f is an acceptable filename for a tracked file'''
263 if '\r' in f or '\n' in f:
264 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
265
266 def checkportable(ui, f):
267 '''Check if filename f is portable and warn or abort depending on config'''
268 checkfilename(f)
269 abort, warn = checkportabilityalert(ui)
270 if abort or warn:
271 msg = util.checkwinfilename(f)
272 if msg:
273 msg = "%s: %r" % (msg, f)
274 if abort:
275 raise error.Abort(msg)
276 ui.warn(_("warning: %s\n") % msg)
277
278 def checkportabilityalert(ui):
279 '''check if the user's config requests nothing, a warning, or abort for
280 non-portable filenames'''
281 val = ui.config('ui', 'portablefilenames', 'warn')
282 lval = val.lower()
283 bval = util.parsebool(val)
284 abort = pycompat.osname == 'nt' or lval == 'abort'
285 warn = bval or lval == 'warn'
286 if bval is None and not (warn or abort or lval == 'ignore'):
287 raise error.ConfigError(
288 _("ui.portablefilenames value is invalid ('%s')") % val)
289 return abort, warn
290
291 class casecollisionauditor(object):
292 def __init__(self, ui, abort, dirstate):
293 self._ui = ui
294 self._abort = abort
295 allfiles = '\0'.join(dirstate._map)
296 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
297 self._dirstate = dirstate
298 # The purpose of _newfiles is so that we don't complain about
299 # case collisions if someone were to call this object with the
300 # same filename twice.
301 self._newfiles = set()
302
303 def __call__(self, f):
304 if f in self._newfiles:
305 return
306 fl = encoding.lower(f)
307 if fl in self._loweredfiles and f not in self._dirstate:
308 msg = _('possible case-folding collision for %s') % f
309 if self._abort:
310 raise error.Abort(msg)
311 self._ui.warn(_("warning: %s\n") % msg)
312 self._loweredfiles.add(fl)
313 self._newfiles.add(f)
314
315 def filteredhash(repo, maxrev):
316 """build hash of filtered revisions in the current repoview.
317
318 Multiple caches perform up-to-date validation by checking that the
319 tiprev and tipnode stored in the cache file match the current repository.
320 However, this is not sufficient for validating repoviews because the set
321 of revisions in the view may change without the repository tiprev and
322 tipnode changing.
323
324 This function hashes all the revs filtered from the view and returns
325 that SHA-1 digest.
326 """
327 cl = repo.changelog
328 if not cl.filteredrevs:
329 return None
330 key = None
331 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
332 if revs:
333 s = hashlib.sha1()
334 for rev in revs:
335 s.update('%s;' % rev)
336 key = s.digest()
337 return key
338
339 26 class abstractvfs(object):
340 27 """Abstract base class; cannot be instantiated"""
341 28
@@ -786,629 +473,6 b' class readonlyvfs(abstractvfs, auditvfs)'
786 473 def join(self, path, *insidef):
787 474 return self.vfs.join(path, *insidef)
788 475
789 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
790 '''yield every hg repository under path, always recursively.
791 The recurse flag will only control recursion into repo working dirs'''
792 def errhandler(err):
793 if err.filename == path:
794 raise err
795 samestat = getattr(os.path, 'samestat', None)
796 if followsym and samestat is not None:
797 def adddir(dirlst, dirname):
798 match = False
799 dirstat = os.stat(dirname)
800 for lstdirstat in dirlst:
801 if samestat(dirstat, lstdirstat):
802 match = True
803 break
804 if not match:
805 dirlst.append(dirstat)
806 return not match
807 else:
808 followsym = False
809
810 if (seen_dirs is None) and followsym:
811 seen_dirs = []
812 adddir(seen_dirs, path)
813 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
814 dirs.sort()
815 if '.hg' in dirs:
816 yield root # found a repository
817 qroot = os.path.join(root, '.hg', 'patches')
818 if os.path.isdir(os.path.join(qroot, '.hg')):
819 yield qroot # we have a patch queue repo here
820 if recurse:
821 # avoid recursing inside the .hg directory
822 dirs.remove('.hg')
823 else:
824 dirs[:] = [] # don't descend further
825 elif followsym:
826 newdirs = []
827 for d in dirs:
828 fname = os.path.join(root, d)
829 if adddir(seen_dirs, fname):
830 if os.path.islink(fname):
831 for hgname in walkrepos(fname, True, seen_dirs):
832 yield hgname
833 else:
834 newdirs.append(d)
835 dirs[:] = newdirs
836
837 def osrcpath():
838 '''return default os-specific hgrc search path'''
839 path = []
840 defaultpath = os.path.join(util.datapath, 'default.d')
841 if os.path.isdir(defaultpath):
842 for f, kind in osutil.listdir(defaultpath):
843 if f.endswith('.rc'):
844 path.append(os.path.join(defaultpath, f))
845 path.extend(systemrcpath())
846 path.extend(userrcpath())
847 path = [os.path.normpath(f) for f in path]
848 return path
849
850 _rcpath = None
851
852 def rcpath():
853 '''return hgrc search path. if env var HGRCPATH is set, use it.
854 for each item in path, if directory, use files ending in .rc,
855 else use item.
856 make HGRCPATH empty to only look in .hg/hgrc of current repo.
857 if no HGRCPATH, use default os-specific path.'''
858 global _rcpath
859 if _rcpath is None:
860 if 'HGRCPATH' in encoding.environ:
861 _rcpath = []
862 for p in encoding.environ['HGRCPATH'].split(pycompat.ospathsep):
863 if not p:
864 continue
865 p = util.expandpath(p)
866 if os.path.isdir(p):
867 for f, kind in osutil.listdir(p):
868 if f.endswith('.rc'):
869 _rcpath.append(os.path.join(p, f))
870 else:
871 _rcpath.append(p)
872 else:
873 _rcpath = osrcpath()
874 return _rcpath
875
876 def intrev(rev):
877 """Return integer for a given revision that can be used in comparison or
878 arithmetic operation"""
879 if rev is None:
880 return wdirrev
881 return rev
882
883 def revsingle(repo, revspec, default='.'):
884 if not revspec and revspec != 0:
885 return repo[default]
886
887 l = revrange(repo, [revspec])
888 if not l:
889 raise error.Abort(_('empty revision set'))
890 return repo[l.last()]
891
892 def _pairspec(revspec):
893 tree = revsetlang.parse(revspec)
894 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
895
896 def revpair(repo, revs):
897 if not revs:
898 return repo.dirstate.p1(), None
899
900 l = revrange(repo, revs)
901
902 if not l:
903 first = second = None
904 elif l.isascending():
905 first = l.min()
906 second = l.max()
907 elif l.isdescending():
908 first = l.max()
909 second = l.min()
910 else:
911 first = l.first()
912 second = l.last()
913
914 if first is None:
915 raise error.Abort(_('empty revision range'))
916 if (first == second and len(revs) >= 2
917 and not all(revrange(repo, [r]) for r in revs)):
918 raise error.Abort(_('empty revision on one side of range'))
919
920 # if top-level is range expression, the result must always be a pair
921 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
922 return repo.lookup(first), None
923
924 return repo.lookup(first), repo.lookup(second)
925
926 def revrange(repo, specs):
927 """Execute 1 to many revsets and return the union.
928
929 This is the preferred mechanism for executing revsets using user-specified
930 config options, such as revset aliases.
931
932 The revsets specified by ``specs`` will be executed via a chained ``OR``
933 expression. If ``specs`` is empty, an empty result is returned.
934
935 ``specs`` can contain integers, in which case they are assumed to be
936 revision numbers.
937
938 It is assumed the revsets are already formatted. If you have arguments
939 that need to be expanded in the revset, call ``revsetlang.formatspec()``
940 and pass the result as an element of ``specs``.
941
942 Specifying a single revset is allowed.
943
944 Returns a ``revset.abstractsmartset`` which is a list-like interface over
945 integer revisions.
946 """
947 allspecs = []
948 for spec in specs:
949 if isinstance(spec, int):
950 spec = revsetlang.formatspec('rev(%d)', spec)
951 allspecs.append(spec)
952 return repo.anyrevs(allspecs, user=True)
953
954 def meaningfulparents(repo, ctx):
955 """Return list of meaningful (or all if debug) parentrevs for rev.
956
957 For merges (two non-nullrev revisions) both parents are meaningful.
958 Otherwise the first parent revision is considered meaningful if it
959 is not the preceding revision.
960 """
961 parents = ctx.parents()
962 if len(parents) > 1:
963 return parents
964 if repo.ui.debugflag:
965 return [parents[0], repo['null']]
966 if parents[0].rev() >= intrev(ctx.rev()) - 1:
967 return []
968 return parents
969
970 def expandpats(pats):
971 '''Expand bare globs when running on windows.
972 On posix we assume it already has already been done by sh.'''
973 if not util.expandglobs:
974 return list(pats)
975 ret = []
976 for kindpat in pats:
977 kind, pat = matchmod._patsplit(kindpat, None)
978 if kind is None:
979 try:
980 globbed = glob.glob(pat)
981 except re.error:
982 globbed = [pat]
983 if globbed:
984 ret.extend(globbed)
985 continue
986 ret.append(kindpat)
987 return ret
988
989 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
990 badfn=None):
991 '''Return a matcher and the patterns that were used.
992 The matcher will warn about bad matches, unless an alternate badfn callback
993 is provided.'''
994 if pats == ("",):
995 pats = []
996 if opts is None:
997 opts = {}
998 if not globbed and default == 'relpath':
999 pats = expandpats(pats or [])
1000
1001 def bad(f, msg):
1002 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
1003
1004 if badfn is None:
1005 badfn = bad
1006
1007 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
1008 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
1009
1010 if m.always():
1011 pats = []
1012 return m, pats
1013
1014 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
1015 badfn=None):
1016 '''Return a matcher that will warn about bad matches.'''
1017 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
1018
1019 def matchall(repo):
1020 '''Return a matcher that will efficiently match everything.'''
1021 return matchmod.always(repo.root, repo.getcwd())
1022
1023 def matchfiles(repo, files, badfn=None):
1024 '''Return a matcher that will efficiently match exactly these files.'''
1025 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
1026
1027 def origpath(ui, repo, filepath):
1028 '''customize where .orig files are created
1029
1030 Fetch user defined path from config file: [ui] origbackuppath = <path>
1031 Fall back to default (filepath) if not specified
1032 '''
1033 origbackuppath = ui.config('ui', 'origbackuppath', None)
1034 if origbackuppath is None:
1035 return filepath + ".orig"
1036
1037 filepathfromroot = os.path.relpath(filepath, start=repo.root)
1038 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
1039
1040 origbackupdir = repo.vfs.dirname(fullorigpath)
1041 if not repo.vfs.exists(origbackupdir):
1042 ui.note(_('creating directory: %s\n') % origbackupdir)
1043 util.makedirs(origbackupdir)
1044
1045 return fullorigpath + ".orig"
1046
1047 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
1048 if opts is None:
1049 opts = {}
1050 m = matcher
1051 if dry_run is None:
1052 dry_run = opts.get('dry_run')
1053 if similarity is None:
1054 similarity = float(opts.get('similarity') or 0)
1055
1056 ret = 0
1057 join = lambda f: os.path.join(prefix, f)
1058
1059 wctx = repo[None]
1060 for subpath in sorted(wctx.substate):
1061 submatch = matchmod.subdirmatcher(subpath, m)
1062 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1063 sub = wctx.sub(subpath)
1064 try:
1065 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
1066 ret = 1
1067 except error.LookupError:
1068 repo.ui.status(_("skipping missing subrepository: %s\n")
1069 % join(subpath))
1070
1071 rejected = []
1072 def badfn(f, msg):
1073 if f in m.files():
1074 m.bad(f, msg)
1075 rejected.append(f)
1076
1077 badmatch = matchmod.badmatch(m, badfn)
1078 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1079 badmatch)
1080
1081 unknownset = set(unknown + forgotten)
1082 toprint = unknownset.copy()
1083 toprint.update(deleted)
1084 for abs in sorted(toprint):
1085 if repo.ui.verbose or not m.exact(abs):
1086 if abs in unknownset:
1087 status = _('adding %s\n') % m.uipath(abs)
1088 else:
1089 status = _('removing %s\n') % m.uipath(abs)
1090 repo.ui.status(status)
1091
1092 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1093 similarity)
1094
1095 if not dry_run:
1096 _markchanges(repo, unknown + forgotten, deleted, renames)
1097
1098 for f in rejected:
1099 if f in m.files():
1100 return 1
1101 return ret
1102
1103 def marktouched(repo, files, similarity=0.0):
1104 '''Assert that files have somehow been operated upon. files are relative to
1105 the repo root.'''
1106 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1107 rejected = []
1108
1109 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1110
1111 if repo.ui.verbose:
1112 unknownset = set(unknown + forgotten)
1113 toprint = unknownset.copy()
1114 toprint.update(deleted)
1115 for abs in sorted(toprint):
1116 if abs in unknownset:
1117 status = _('adding %s\n') % abs
1118 else:
1119 status = _('removing %s\n') % abs
1120 repo.ui.status(status)
1121
1122 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1123 similarity)
1124
1125 _markchanges(repo, unknown + forgotten, deleted, renames)
1126
1127 for f in rejected:
1128 if f in m.files():
1129 return 1
1130 return 0
1131
1132 def _interestingfiles(repo, matcher):
1133 '''Walk dirstate with matcher, looking for files that addremove would care
1134 about.
1135
1136 This is different from dirstate.status because it doesn't care about
1137 whether files are modified or clean.'''
1138 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1139 audit_path = pathutil.pathauditor(repo.root)
1140
1141 ctx = repo[None]
1142 dirstate = repo.dirstate
1143 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1144 full=False)
1145 for abs, st in walkresults.iteritems():
1146 dstate = dirstate[abs]
1147 if dstate == '?' and audit_path.check(abs):
1148 unknown.append(abs)
1149 elif dstate != 'r' and not st:
1150 deleted.append(abs)
1151 elif dstate == 'r' and st:
1152 forgotten.append(abs)
1153 # for finding renames
1154 elif dstate == 'r' and not st:
1155 removed.append(abs)
1156 elif dstate == 'a':
1157 added.append(abs)
1158
1159 return added, unknown, deleted, removed, forgotten
1160
1161 def _findrenames(repo, matcher, added, removed, similarity):
1162 '''Find renames from removed files to added ones.'''
1163 renames = {}
1164 if similarity > 0:
1165 for old, new, score in similar.findrenames(repo, added, removed,
1166 similarity):
1167 if (repo.ui.verbose or not matcher.exact(old)
1168 or not matcher.exact(new)):
1169 repo.ui.status(_('recording removal of %s as rename to %s '
1170 '(%d%% similar)\n') %
1171 (matcher.rel(old), matcher.rel(new),
1172 score * 100))
1173 renames[new] = old
1174 return renames
1175
1176 def _markchanges(repo, unknown, deleted, renames):
1177 '''Marks the files in unknown as added, the files in deleted as removed,
1178 and the files in renames as copied.'''
1179 wctx = repo[None]
1180 with repo.wlock():
1181 wctx.forget(deleted)
1182 wctx.add(unknown)
1183 for new, old in renames.iteritems():
1184 wctx.copy(old, new)
1185
1186 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1187 """Update the dirstate to reflect the intent of copying src to dst. For
1188 different reasons it might not end with dst being marked as copied from src.
1189 """
1190 origsrc = repo.dirstate.copied(src) or src
1191 if dst == origsrc: # copying back a copy?
1192 if repo.dirstate[dst] not in 'mn' and not dryrun:
1193 repo.dirstate.normallookup(dst)
1194 else:
1195 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1196 if not ui.quiet:
1197 ui.warn(_("%s has not been committed yet, so no copy "
1198 "data will be stored for %s.\n")
1199 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1200 if repo.dirstate[dst] in '?r' and not dryrun:
1201 wctx.add([dst])
1202 elif not dryrun:
1203 wctx.copy(origsrc, dst)
1204
1205 def readrequires(opener, supported):
1206 '''Reads and parses .hg/requires and checks if all entries found
1207 are in the list of supported features.'''
1208 requirements = set(opener.read("requires").splitlines())
1209 missings = []
1210 for r in requirements:
1211 if r not in supported:
1212 if not r or not r[0].isalnum():
1213 raise error.RequirementError(_(".hg/requires file is corrupt"))
1214 missings.append(r)
1215 missings.sort()
1216 if missings:
1217 raise error.RequirementError(
1218 _("repository requires features unknown to this Mercurial: %s")
1219 % " ".join(missings),
1220 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1221 " for more information"))
1222 return requirements
1223
1224 def writerequires(opener, requirements):
1225 with opener('requires', 'w') as fp:
1226 for r in sorted(requirements):
1227 fp.write("%s\n" % r)
1228
1229 class filecachesubentry(object):
1230 def __init__(self, path, stat):
1231 self.path = path
1232 self.cachestat = None
1233 self._cacheable = None
1234
1235 if stat:
1236 self.cachestat = filecachesubentry.stat(self.path)
1237
1238 if self.cachestat:
1239 self._cacheable = self.cachestat.cacheable()
1240 else:
1241 # None means we don't know yet
1242 self._cacheable = None
1243
1244 def refresh(self):
1245 if self.cacheable():
1246 self.cachestat = filecachesubentry.stat(self.path)
1247
1248 def cacheable(self):
1249 if self._cacheable is not None:
1250 return self._cacheable
1251
1252 # we don't know yet, assume it is for now
1253 return True
1254
1255 def changed(self):
1256 # no point in going further if we can't cache it
1257 if not self.cacheable():
1258 return True
1259
1260 newstat = filecachesubentry.stat(self.path)
1261
1262 # we may not know if it's cacheable yet, check again now
1263 if newstat and self._cacheable is None:
1264 self._cacheable = newstat.cacheable()
1265
1266 # check again
1267 if not self._cacheable:
1268 return True
1269
1270 if self.cachestat != newstat:
1271 self.cachestat = newstat
1272 return True
1273 else:
1274 return False
1275
1276 @staticmethod
1277 def stat(path):
1278 try:
1279 return util.cachestat(path)
1280 except OSError as e:
1281 if e.errno != errno.ENOENT:
1282 raise
1283
1284 class filecacheentry(object):
1285 def __init__(self, paths, stat=True):
1286 self._entries = []
1287 for path in paths:
1288 self._entries.append(filecachesubentry(path, stat))
1289
1290 def changed(self):
1291 '''true if any entry has changed'''
1292 for entry in self._entries:
1293 if entry.changed():
1294 return True
1295 return False
1296
1297 def refresh(self):
1298 for entry in self._entries:
1299 entry.refresh()
1300
1301 class filecache(object):
1302 '''A property like decorator that tracks files under .hg/ for updates.
1303
1304 Records stat info when called in _filecache.
1305
1306 On subsequent calls, compares old stat info with new info, and recreates the
1307 object when any of the files changes, updating the new stat info in
1308 _filecache.
1309
1310 Mercurial either atomic renames or appends for files under .hg,
1311 so to ensure the cache is reliable we need the filesystem to be able
1312 to tell us if a file has been replaced. If it can't, we fallback to
1313 recreating the object on every call (essentially the same behavior as
1314 propertycache).
1315
1316 '''
1317 def __init__(self, *paths):
1318 self.paths = paths
1319
1320 def join(self, obj, fname):
1321 """Used to compute the runtime path of a cached file.
1322
1323 Users should subclass filecache and provide their own version of this
1324 function to call the appropriate join function on 'obj' (an instance
1325 of the class that its member function was decorated).
1326 """
1327 return obj.join(fname)
1328
1329 def __call__(self, func):
1330 self.func = func
1331 self.name = func.__name__
1332 return self
1333
1334 def __get__(self, obj, type=None):
1335 # if accessed on the class, return the descriptor itself.
1336 if obj is None:
1337 return self
1338 # do we need to check if the file changed?
1339 if self.name in obj.__dict__:
1340 assert self.name in obj._filecache, self.name
1341 return obj.__dict__[self.name]
1342
1343 entry = obj._filecache.get(self.name)
1344
1345 if entry:
1346 if entry.changed():
1347 entry.obj = self.func(obj)
1348 else:
1349 paths = [self.join(obj, path) for path in self.paths]
1350
1351 # We stat -before- creating the object so our cache doesn't lie if
1352 # a writer modified between the time we read and stat
1353 entry = filecacheentry(paths, True)
1354 entry.obj = self.func(obj)
1355
1356 obj._filecache[self.name] = entry
1357
1358 obj.__dict__[self.name] = entry.obj
1359 return entry.obj
1360
1361 def __set__(self, obj, value):
1362 if self.name not in obj._filecache:
1363 # we add an entry for the missing value because X in __dict__
1364 # implies X in _filecache
1365 paths = [self.join(obj, path) for path in self.paths]
1366 ce = filecacheentry(paths, False)
1367 obj._filecache[self.name] = ce
1368 else:
1369 ce = obj._filecache[self.name]
1370
1371 ce.obj = value # update cached copy
1372 obj.__dict__[self.name] = value # update copy returned by obj.x
1373
1374 def __delete__(self, obj):
1375 try:
1376 del obj.__dict__[self.name]
1377 except KeyError:
1378 raise AttributeError(self.name)
1379
1380 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1381 if lock is None:
1382 raise error.LockInheritanceContractViolation(
1383 'lock can only be inherited while held')
1384 if environ is None:
1385 environ = {}
1386 with lock.inherit() as locker:
1387 environ[envvar] = locker
1388 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1389
1390 def wlocksub(repo, cmd, *args, **kwargs):
1391 """run cmd as a subprocess that allows inheriting repo's wlock
1392
1393 This can only be called while the wlock is held. This takes all the
1394 arguments that ui.system does, and returns the exit code of the
1395 subprocess."""
1396 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1397 **kwargs)
1398
1399 def gdinitconfig(ui):
1400 """helper function to know if a repo should be created as general delta
1401 """
1402 # experimental config: format.generaldelta
1403 return (ui.configbool('format', 'generaldelta', False)
1404 or ui.configbool('format', 'usegeneraldelta', True))
1405
1406 def gddeltaconfig(ui):
1407 """helper function to know if incoming delta should be optimised
1408 """
1409 # experimental config: format.generaldelta
1410 return ui.configbool('format', 'generaldelta', False)
1411
1412 476 class closewrapbase(object):
1413 477 """Base class of wrapper, which hooks closing
1414 478
General Comments 0
You need to be logged in to leave comments. Login now