util.py
3425 lines
| 96.6 KiB
| text/x-python
|
PythonLexer
/ mercurial / util.py
timeless@mozdev.org
|
r17515 | # util.py - Mercurial utility functions and platform specific implementations | ||
Martin Geisler
|
r8226 | # | ||
# Copyright 2005 K. Thananchayan <thananck@yahoo.com> | ||||
Raphaël Gomès
|
r47575 | # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com> | ||
Martin Geisler
|
r8226 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> | ||
# | ||||
# This software may be used and distributed according to the terms of the | ||||
Matt Mackall
|
r10263 | # GNU General Public License version 2 or any later version. | ||
mpm@selenic.com
|
r1082 | |||
timeless@mozdev.org
|
r17515 | """Mercurial utility functions and platform specific implementations. | ||
mpm@selenic.com
|
r1082 | |||
Martin Geisler
|
r8227 | This contains helper routines that are independent of the SCM core and | ||
hide platform-specific details from the core. | ||||
mpm@selenic.com
|
r1082 | """ | ||
mpm@selenic.com
|
r419 | |||
Matt Harbison
|
r52756 | from __future__ import annotations | ||
Gregory Szorc
|
r27358 | |||
Martin von Zweigbergk
|
r33790 | import abc | ||
Gregory Szorc
|
r27358 | import collections | ||
Martin von Zweigbergk
|
r33446 | import contextlib | ||
Gregory Szorc
|
r27358 | import errno | ||
import gc | ||||
import hashlib | ||||
Gregory Szorc
|
r49728 | import io | ||
Mark Thomas
|
r34555 | import itertools | ||
Manuel Jacob
|
r45550 | import locale | ||
Mark Thomas
|
r34296 | import mmap | ||
Gregory Szorc
|
r27358 | import os | ||
Matt Harbison
|
r49850 | import pickle # provides util.pickle symbol | ||
Siddharth Agarwal
|
r21907 | import re as remod | ||
Gregory Szorc
|
r27358 | import shutil | ||
Jun Wu
|
r30418 | import stat | ||
Gregory Szorc
|
r27358 | import sys | ||
import time | ||||
import traceback | ||||
Matt Harbison
|
r52614 | import typing | ||
Pierre-Yves David
|
r31950 | import warnings | ||
Matt Mackall
|
r3769 | |||
r52178 | from typing import ( | |||
r52442 | Any, | |||
Matt Harbison
|
r52614 | BinaryIO, | ||
Callable, | ||||
r52178 | Iterable, | |||
Iterator, | ||||
List, | ||||
Optional, | ||||
Tuple, | ||||
Matt Harbison
|
r52780 | Type, | ||
TypeVar, | ||||
r52178 | ) | |||
Joerg Sonnenberger
|
r47771 | from .node import hex | ||
Augie Fackler
|
r43346 | from .thirdparty import attr | ||
Matt Harbison
|
r52622 | |||
# Force pytype to use the non-vendored package | ||||
if typing.TYPE_CHECKING: | ||||
# noinspection PyPackageRequirements | ||||
import attr | ||||
Augie Fackler
|
r39295 | from hgdemandimport import tracing | ||
Gregory Szorc
|
r27358 | from . import ( | ||
encoding, | ||||
error, | ||||
i18n, | ||||
Yuya Nishihara
|
r32367 | policy, | ||
timeless
|
r28818 | pycompat, | ||
Matt Harbison
|
r52614 | typelib, | ||
Augie Fackler
|
r34468 | urllibcompat, | ||
Gregory Szorc
|
r27358 | ) | ||
Matt Harbison
|
r52831 | from .interfaces import ( | ||
modules as intmod, | ||||
) | ||||
Yuya Nishihara
|
r37101 | from .utils import ( | ||
r42208 | compression, | |||
Augie Fackler
|
r44517 | hashutil, | ||
Yuya Nishihara
|
r37136 | procutil, | ||
Yuya Nishihara
|
r37101 | stringutil, | ||
) | ||||
Matt Mackall
|
r3769 | |||
r52178 | # keeps pyflakes happy | |||
assert [ | ||||
Iterable, | ||||
Iterator, | ||||
List, | ||||
Optional, | ||||
Tuple, | ||||
] | ||||
Matt Harbison
|
r47391 | |||
Matt Harbison
|
r52831 | base85: intmod.Base85 = policy.importmod('base85') | ||
Augie Fackler
|
r43906 | osutil = policy.importmod('osutil') | ||
Yuya Nishihara
|
r32367 | |||
Yuya Nishihara
|
r32200 | b85decode = base85.b85decode | ||
b85encode = base85.b85encode | ||||
Gregory Szorc
|
r31934 | cookielib = pycompat.cookielib | ||
Yuya Nishihara
|
r30471 | httplib = pycompat.httplib | ||
Yuya Nishihara
|
r37117 | safehasattr = pycompat.safehasattr | ||
Yuya Nishihara
|
r30471 | socketserver = pycompat.socketserver | ||
Gregory Szorc
|
r49728 | bytesio = io.BytesIO | ||
Gregory Szorc
|
r36976 | # TODO deprecate stringio name, as it is a lie on Python 3. | ||
stringio = bytesio | ||||
Yuya Nishihara
|
r30471 | xmlrpclib = pycompat.xmlrpclib | ||
timeless
|
r28883 | |||
Augie Fackler
|
r34468 | httpserver = urllibcompat.httpserver | ||
urlerr = urllibcompat.urlerr | ||||
urlreq = urllibcompat.urlreq | ||||
FUJIWARA Katsunori
|
r32566 | # workaround for win32mbcs | ||
_filenamebytestr = pycompat.bytestr | ||||
Jun Wu
|
r34646 | if pycompat.iswindows: | ||
Gregory Szorc
|
r27358 | from . import windows as platform | ||
Adrian Buehlmann
|
r14912 | else: | ||
Gregory Szorc
|
r27358 | from . import posix as platform | ||
Adrian Buehlmann
|
r14926 | |||
Gregory Szorc
|
r27358 | _ = i18n._ | ||
Adrian Buehlmann
|
r14926 | |||
r48422 | abspath = platform.abspath | |||
Yuya Nishihara
|
r29530 | bindunixsocket = platform.bindunixsocket | ||
Idan Kamara
|
r14927 | cachestat = platform.cachestat | ||
Adrian Buehlmann
|
r14926 | checkexec = platform.checkexec | ||
checklink = platform.checklink | ||||
Adrian Buehlmann
|
r15011 | copymode = platform.copymode | ||
Adrian Buehlmann
|
r14926 | expandglobs = platform.expandglobs | ||
Matt Harbison
|
r35531 | getfsmountpoint = platform.getfsmountpoint | ||
Matt Harbison
|
r35527 | getfstype = platform.getfstype | ||
Matt Harbison
|
r47949 | get_password = platform.get_password | ||
Adrian Buehlmann
|
r14926 | groupmembers = platform.groupmembers | ||
groupname = platform.groupname | ||||
isexec = platform.isexec | ||||
isowner = platform.isowner | ||||
Yuya Nishihara
|
r32203 | listdir = osutil.listdir | ||
Adrian Buehlmann
|
r14926 | localpath = platform.localpath | ||
lookupreg = platform.lookupreg | ||||
makedir = platform.makedir | ||||
nlinks = platform.nlinks | ||||
normpath = platform.normpath | ||||
Matt Mackall
|
r15488 | normcase = platform.normcase | ||
Siddharth Agarwal
|
r24605 | normcasespec = platform.normcasespec | ||
normcasefallback = platform.normcasefallback | ||||
Adrian Buehlmann
|
r14926 | openhardlinks = platform.openhardlinks | ||
oslink = platform.oslink | ||||
parsepatchoutput = platform.parsepatchoutput | ||||
pconvert = platform.pconvert | ||||
Pierre-Yves David
|
r25420 | poll = platform.poll | ||
Adrian Buehlmann
|
r14926 | posixfile = platform.posixfile | ||
Matt Harbison
|
r39940 | readlink = platform.readlink | ||
Adrian Buehlmann
|
r14926 | rename = platform.rename | ||
FUJIWARA Katsunori
|
r24692 | removedirs = platform.removedirs | ||
Adrian Buehlmann
|
r14926 | samedevice = platform.samedevice | ||
samefile = platform.samefile | ||||
samestat = platform.samestat | ||||
setflags = platform.setflags | ||||
Bryan O'Sullivan
|
r17560 | split = platform.split | ||
Bryan O'Sullivan
|
r18026 | statfiles = getattr(osutil, 'statfiles', platform.statfiles) | ||
Bryan O'Sullivan
|
r18868 | statisexec = platform.statisexec | ||
statislink = platform.statislink | ||||
Adrian Buehlmann
|
r14926 | umask = platform.umask | ||
unlink = platform.unlink | ||||
username = platform.username | ||||
Adrian Buehlmann
|
r14912 | |||
Pulkit Goyal
|
r45119 | |||
Matt Harbison
|
r52780 | if typing.TYPE_CHECKING: | ||
_Tfilestat = TypeVar('_Tfilestat', bound='filestat') | ||||
r52180 | def setumask(val: int) -> None: | |||
Kyle Lippincott
|
r47856 | '''updates the umask. used by chg server''' | ||
Pulkit Goyal
|
r45119 | if pycompat.iswindows: | ||
return | ||||
os.umask(val) | ||||
global umask | ||||
platform.umask = umask = val & 0o777 | ||||
r42208 | # small compat layer | |||
compengines = compression.compengines | ||||
SERVERROLE = compression.SERVERROLE | ||||
CLIENTROLE = compression.CLIENTROLE | ||||
Dirkjan Ochtman
|
r6470 | # Python compatibility | ||
Matt Mackall
|
r3769 | |||
Matt Mackall
|
r15656 | _notset = object() | ||
Augie Fackler
|
r43346 | |||
Remi Chaintron
|
r30745 | def bitsfrom(container): | ||
bits = 0 | ||||
for bit in container: | ||||
bits |= bit | ||||
return bits | ||||
Augie Fackler
|
r43346 | |||
Pierre-Yves David
|
r31950 | # python 2.6 still have deprecation warning enabled by default. We do not want | ||
# to display anything to standard user so detect if we are running test and | ||||
# only use python deprecation warning in this case. | ||||
Augie Fackler
|
r43347 | _dowarn = bool(encoding.environ.get(b'HGEMITWARNINGS')) | ||
Pierre-Yves David
|
r31950 | if _dowarn: | ||
# explicitly unfilter our warning for python 2.7 | ||||
# | ||||
# The option of setting PYTHONWARNINGS in the test runner was investigated. | ||||
# However, module name set through PYTHONWARNINGS was exactly matched, so | ||||
# we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This | ||||
# makes the whole PYTHONWARNINGS thing useless for our usecase. | ||||
Augie Fackler
|
r43906 | warnings.filterwarnings('default', '', DeprecationWarning, 'mercurial') | ||
warnings.filterwarnings('default', '', DeprecationWarning, 'hgext') | ||||
warnings.filterwarnings('default', '', DeprecationWarning, 'hgext3rd') | ||||
Gregory Szorc
|
r49752 | if _dowarn: | ||
Yuya Nishihara
|
r36606 | # silence warning emitted by passing user string to re.sub() | ||
Augie Fackler
|
r43346 | warnings.filterwarnings( | ||
Augie Fackler
|
r43906 | 'ignore', 'bad escape', DeprecationWarning, 'mercurial' | ||
Augie Fackler
|
r43346 | ) | ||
warnings.filterwarnings( | ||||
Augie Fackler
|
r43906 | 'ignore', 'invalid escape sequence', DeprecationWarning, 'mercurial' | ||
Augie Fackler
|
r43346 | ) | ||
Yuya Nishihara
|
r37473 | # TODO: reinvent imp.is_frozen() | ||
Augie Fackler
|
r43346 | warnings.filterwarnings( | ||
Augie Fackler
|
r43906 | 'ignore', | ||
'the imp module is deprecated', | ||||
Augie Fackler
|
r43346 | DeprecationWarning, | ||
Augie Fackler
|
r43906 | 'mercurial', | ||
Augie Fackler
|
r43346 | ) | ||
Pierre-Yves David
|
r31950 | |||
def nouideprecwarn(msg, version, stacklevel=1): | ||||
"""Issue an python native deprecation warning | ||||
This is a noop outside of tests, use 'ui.deprecwarn' when possible. | ||||
""" | ||||
if _dowarn: | ||||
Augie Fackler
|
r43346 | msg += ( | ||
Augie Fackler
|
r43347 | b"\n(compatibility will be dropped after Mercurial-%s," | ||
b" update your code.)" | ||||
Augie Fackler
|
r43346 | ) % version | ||
Augie Fackler
|
r36145 | warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1) | ||
Pulkit Goyal
|
r45522 | # on python 3 with chg, we will need to explicitly flush the output | ||
sys.stderr.flush() | ||||
Pierre-Yves David
|
r31950 | |||
Augie Fackler
|
r43346 | |||
Mike Hommey
|
r22962 | DIGESTS = { | ||
Augie Fackler
|
r43347 | b'md5': hashlib.md5, | ||
Augie Fackler
|
r44517 | b'sha1': hashutil.sha1, | ||
Augie Fackler
|
r43347 | b'sha512': hashlib.sha512, | ||
Mike Hommey
|
r22962 | } | ||
# List of digest types from strongest to weakest | ||||
Augie Fackler
|
r43347 | DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5'] | ||
Mike Hommey
|
r22962 | |||
for k in DIGESTS_BY_STRENGTH: | ||||
assert k in DIGESTS | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r49801 | class digester: | ||
Mike Hommey
|
r22962 | """helper to compute digests. | ||
This helper can be used to compute one or more digests given their name. | ||||
Yuya Nishihara
|
r34133 | >>> d = digester([b'md5', b'sha1']) | ||
>>> d.update(b'foo') | ||||
Mike Hommey
|
r22962 | >>> [k for k in sorted(d)] | ||
['md5', 'sha1'] | ||||
Yuya Nishihara
|
r34133 | >>> d[b'md5'] | ||
Mike Hommey
|
r22962 | 'acbd18db4cc2f85cedef654fccc4a4d8' | ||
Yuya Nishihara
|
r34133 | >>> d[b'sha1'] | ||
Mike Hommey
|
r22962 | '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33' | ||
Yuya Nishihara
|
r34133 | >>> digester.preferred([b'md5', b'sha1']) | ||
Mike Hommey
|
r22962 | 'sha1' | ||
""" | ||||
Augie Fackler
|
r43347 | def __init__(self, digests, s=b''): | ||
Mike Hommey
|
r22962 | self._hashes = {} | ||
for k in digests: | ||||
if k not in DIGESTS: | ||||
Augie Fackler
|
r43347 | raise error.Abort(_(b'unknown digest type: %s') % k) | ||
Mike Hommey
|
r22962 | self._hashes[k] = DIGESTS[k]() | ||
if s: | ||||
self.update(s) | ||||
def update(self, data): | ||||
for h in self._hashes.values(): | ||||
h.update(data) | ||||
def __getitem__(self, key): | ||||
if key not in DIGESTS: | ||||
Augie Fackler
|
r43347 | raise error.Abort(_(b'unknown digest type: %s') % k) | ||
Joerg Sonnenberger
|
r46729 | return hex(self._hashes[key].digest()) | ||
Mike Hommey
|
r22962 | |||
def __iter__(self): | ||||
return iter(self._hashes) | ||||
@staticmethod | ||||
def preferred(supported): | ||||
"""returns the strongest digest type in both supported and DIGESTS.""" | ||||
for k in DIGESTS_BY_STRENGTH: | ||||
if k in supported: | ||||
return k | ||||
return None | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r49801 | class digestchecker: | ||
Mike Hommey
|
r22963 | """file handle wrapper that additionally checks content against a given | ||
size and digests. | ||||
d = digestchecker(fh, size, {'md5': '...'}) | ||||
When multiple digests are given, all of them are validated. | ||||
""" | ||||
def __init__(self, fh, size, digests): | ||||
self._fh = fh | ||||
self._size = size | ||||
self._got = 0 | ||||
self._digests = dict(digests) | ||||
self._digester = digester(self._digests.keys()) | ||||
def read(self, length=-1): | ||||
content = self._fh.read(length) | ||||
self._digester.update(content) | ||||
self._got += len(content) | ||||
return content | ||||
def validate(self): | ||||
if self._size != self._got: | ||||
Augie Fackler
|
r43346 | raise error.Abort( | ||
Augie Fackler
|
r43347 | _(b'size mismatch: expected %d, got %d') | ||
Augie Fackler
|
r43346 | % (self._size, self._got) | ||
) | ||||
Mike Hommey
|
r22963 | for k, v in self._digests.items(): | ||
if v != self._digester[k]: | ||||
Wagner Bruna
|
r23076 | # i18n: first parameter is a digest name | ||
Augie Fackler
|
r43346 | raise error.Abort( | ||
Augie Fackler
|
r43347 | _(b'%s mismatch: expected %s, got %s') | ||
Augie Fackler
|
r43346 | % (k, v, self._digester[k]) | ||
) | ||||
Mike Hommey
|
r22963 | |||
Renato Cunha
|
r11565 | try: | ||
Matt Harbison
|
r47391 | buffer = buffer # pytype: disable=name-error | ||
Renato Cunha
|
r11565 | except NameError: | ||
Augie Fackler
|
r43346 | |||
Alex Gaynor
|
r33549 | def buffer(sliceable, offset=0, length=None): | ||
if length is not None: | ||||
Raphaël Gomès
|
r52743 | view = memoryview(sliceable)[offset : offset + length] | ||
else: | ||||
view = memoryview(sliceable)[offset:] | ||||
return view.toreadonly() | ||||
Ronny Pfannschmidt
|
r10756 | |||
Augie Fackler
|
r43346 | |||
Pierre-Yves David
|
r25406 | _chunksize = 4096 | ||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r49801 | class bufferedinputpipe: | ||
Pierre-Yves David
|
r25406 | """a manually buffered input pipe | ||
Python will not let us use buffered IO and lazy reading with 'polling' at | ||||
the same time. We cannot probe the buffer state and select will not detect | ||||
that data are ready to read if they are already buffered. | ||||
This class let us work around that by implementing its own buffering | ||||
(allowing efficient readline) while offering a way to know if the buffer is | ||||
empty from the output (allowing collaboration of the buffer with polling). | ||||
This class lives in the 'util' module because it makes use of the 'os' | ||||
module from the python stdlib. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r36542 | def __new__(cls, fh): | ||
# If we receive a fileobjectproxy, we need to use a variation of this | ||||
# class that notifies observers about activity. | ||||
if isinstance(fh, fileobjectproxy): | ||||
cls = observedbufferedinputpipe | ||||
return super(bufferedinputpipe, cls).__new__(cls) | ||||
Pierre-Yves David
|
r25406 | |||
def __init__(self, input): | ||||
self._input = input | ||||
self._buffer = [] | ||||
self._eof = False | ||||
Pierre-Yves David
|
r25672 | self._lenbuf = 0 | ||
Pierre-Yves David
|
r25406 | |||
@property | ||||
def hasbuffer(self): | ||||
"""True is any data is currently buffered | ||||
This will be used externally a pre-step for polling IO. If there is | ||||
already data then no polling should be set in place.""" | ||||
return bool(self._buffer) | ||||
@property | ||||
def closed(self): | ||||
return self._input.closed | ||||
def fileno(self): | ||||
return self._input.fileno() | ||||
def close(self): | ||||
return self._input.close() | ||||
def read(self, size): | ||||
while (not self._eof) and (self._lenbuf < size): | ||||
self._fillbuffer() | ||||
return self._frombuffer(size) | ||||
Joerg Sonnenberger
|
r38735 | def unbufferedread(self, size): | ||
if not self._eof and self._lenbuf == 0: | ||||
self._fillbuffer(max(size, _chunksize)) | ||||
return self._frombuffer(min(self._lenbuf, size)) | ||||
Pierre-Yves David
|
r25406 | def readline(self, *args, **kwargs): | ||
Martin von Zweigbergk
|
r40065 | if len(self._buffer) > 1: | ||
Pierre-Yves David
|
r25406 | # this should not happen because both read and readline end with a | ||
# _frombuffer call that collapse it. | ||||
Augie Fackler
|
r43347 | self._buffer = [b''.join(self._buffer)] | ||
Pierre-Yves David
|
r25672 | self._lenbuf = len(self._buffer[0]) | ||
Pierre-Yves David
|
r25406 | lfi = -1 | ||
if self._buffer: | ||||
Augie Fackler
|
r43347 | lfi = self._buffer[-1].find(b'\n') | ||
Pierre-Yves David
|
r25406 | while (not self._eof) and lfi < 0: | ||
self._fillbuffer() | ||||
if self._buffer: | ||||
Augie Fackler
|
r43347 | lfi = self._buffer[-1].find(b'\n') | ||
Pierre-Yves David
|
r25406 | size = lfi + 1 | ||
Augie Fackler
|
r43346 | if lfi < 0: # end of file | ||
Pierre-Yves David
|
r25406 | size = self._lenbuf | ||
Martin von Zweigbergk
|
r40065 | elif len(self._buffer) > 1: | ||
Pierre-Yves David
|
r25406 | # we need to take previous chunks into account | ||
size += self._lenbuf - len(self._buffer[-1]) | ||||
return self._frombuffer(size) | ||||
def _frombuffer(self, size): | ||||
"""return at most 'size' data from the buffer | ||||
The data are removed from the buffer.""" | ||||
if size == 0 or not self._buffer: | ||||
Augie Fackler
|
r43347 | return b'' | ||
Pierre-Yves David
|
r25406 | buf = self._buffer[0] | ||
Martin von Zweigbergk
|
r40065 | if len(self._buffer) > 1: | ||
Augie Fackler
|
r43347 | buf = b''.join(self._buffer) | ||
Pierre-Yves David
|
r25406 | |||
data = buf[:size] | ||||
Augie Fackler
|
r43346 | buf = buf[len(data) :] | ||
Pierre-Yves David
|
r25406 | if buf: | ||
self._buffer = [buf] | ||||
Pierre-Yves David
|
r25672 | self._lenbuf = len(buf) | ||
Pierre-Yves David
|
r25406 | else: | ||
self._buffer = [] | ||||
Pierre-Yves David
|
r25672 | self._lenbuf = 0 | ||
Pierre-Yves David
|
r25406 | return data | ||
Joerg Sonnenberger
|
r38735 | def _fillbuffer(self, size=_chunksize): | ||
Pierre-Yves David
|
r25406 | """read data to the buffer""" | ||
Joerg Sonnenberger
|
r38735 | data = os.read(self._input.fileno(), size) | ||
Pierre-Yves David
|
r25406 | if not data: | ||
self._eof = True | ||||
else: | ||||
Pierre-Yves David
|
r25672 | self._lenbuf += len(data) | ||
Pierre-Yves David
|
r25406 | self._buffer.append(data) | ||
Gregory Szorc
|
r36542 | return data | ||
Augie Fackler
|
r43346 | |||
r52576 | def has_mmap_populate(): | |||
r52635 | return hasattr(osutil, "background_mmap_populate") or hasattr( | |||
mmap, 'MAP_POPULATE' | ||||
) | ||||
r52576 | ||||
r52574 | def mmapread(fp, size=None, pre_populate=True): | |||
r52545 | """Read a file content using mmap | |||
Matt Harbison
|
r52824 | The responsibility of checking the file system is mmap safe is the | ||
responsibility of the caller (see `vfs.is_mmap_safe`). | ||||
r52545 | ||||
In some case, a normal string might be returned. | ||||
r52574 | ||||
If `pre_populate` is True (the default), the mmapped data will be | ||||
pre-populated in memory if the system support this option, this slow down | ||||
Matt Harbison
|
r52824 | the initial mmapping but avoid potentially crippling page fault on later | ||
r52574 | access. If this is not the desired behavior, set `pre_populate` to False. | |||
r52545 | """ | |||
r44487 | if size == 0: | |||
# size of 0 to mmap.mmap() means "all data" | ||||
# rather than "zero bytes", so special case that. | ||||
return b'' | ||||
elif size is None: | ||||
size = 0 | ||||
Matt Harbison
|
r48823 | fd = getattr(fp, 'fileno', lambda: fp)() | ||
Matt Harbison
|
r52823 | |||
if pycompat.iswindows: | ||||
_mmap = lambda fd, size: mmap.mmap(fd, size, access=mmap.ACCESS_READ) | ||||
else: | ||||
flags = mmap.MAP_PRIVATE | ||||
bg_populate = hasattr(osutil, "background_mmap_populate") | ||||
if pre_populate and not bg_populate: | ||||
flags |= getattr(mmap, 'MAP_POPULATE', 0) | ||||
def _mmap(fd, size) -> mmap.mmap: | ||||
m = mmap.mmap(fd, size, flags=flags, prot=mmap.PROT_READ) | ||||
if pre_populate and bg_populate: | ||||
osutil.background_mmap_populate(m) | ||||
return m | ||||
Mark Thomas
|
r34296 | try: | ||
Matt Harbison
|
r52823 | return _mmap(fd, size) | ||
Mark Thomas
|
r34296 | except ValueError: | ||
# Empty files cannot be mmapped, but mmapread should still work. Check | ||||
# if the file is empty, and if so, return an empty buffer. | ||||
if os.fstat(fd).st_size == 0: | ||||
Augie Fackler
|
r43347 | return b'' | ||
Mark Thomas
|
r34296 | raise | ||
Augie Fackler
|
r43346 | |||
r52995 | class uncacheable_cachestat: | |||
stat: Optional[os.stat_result] | ||||
def __init__(self) -> None: | ||||
self.stat = None | ||||
def cacheable(self) -> bool: | ||||
return False | ||||
Gregory Szorc
|
r49801 | class fileobjectproxy: | ||
Gregory Szorc
|
r36541 | """A proxy around file objects that tells a watcher when events occur. | ||
This type is intended to only be used for testing purposes. Think hard | ||||
before using it in important code. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r36541 | __slots__ = ( | ||
Augie Fackler
|
r43906 | '_orig', | ||
'_observer', | ||||
Gregory Szorc
|
r36541 | ) | ||
def __init__(self, fh, observer): | ||||
Augie Fackler
|
r43906 | object.__setattr__(self, '_orig', fh) | ||
object.__setattr__(self, '_observer', observer) | ||||
Gregory Szorc
|
r36541 | |||
def __getattribute__(self, name): | ||||
ours = { | ||||
Augie Fackler
|
r43906 | '_observer', | ||
Gregory Szorc
|
r36541 | # IOBase | ||
Augie Fackler
|
r43906 | 'close', | ||
Gregory Szorc
|
r36541 | # closed if a property | ||
Augie Fackler
|
r43906 | 'fileno', | ||
'flush', | ||||
'isatty', | ||||
'readable', | ||||
'readline', | ||||
'readlines', | ||||
'seek', | ||||
'seekable', | ||||
'tell', | ||||
'truncate', | ||||
'writable', | ||||
'writelines', | ||||
Gregory Szorc
|
r36541 | # RawIOBase | ||
Augie Fackler
|
r43906 | 'read', | ||
'readall', | ||||
'readinto', | ||||
'write', | ||||
Gregory Szorc
|
r36541 | # BufferedIOBase | ||
# raw is a property | ||||
Augie Fackler
|
r43906 | 'detach', | ||
Gregory Szorc
|
r36541 | # read defined above | ||
Augie Fackler
|
r43906 | 'read1', | ||
Gregory Szorc
|
r36541 | # readinto defined above | ||
# write defined above | ||||
} | ||||
# We only observe some methods. | ||||
if name in ours: | ||||
return object.__getattribute__(self, name) | ||||
Augie Fackler
|
r43906 | return getattr(object.__getattribute__(self, '_orig'), name) | ||
Gregory Szorc
|
r36541 | |||
Matt Harbison
|
r36850 | def __nonzero__(self): | ||
Augie Fackler
|
r43906 | return bool(object.__getattribute__(self, '_orig')) | ||
Matt Harbison
|
r36850 | |||
__bool__ = __nonzero__ | ||||
Gregory Szorc
|
r36541 | def __delattr__(self, name): | ||
Augie Fackler
|
r43906 | return delattr(object.__getattribute__(self, '_orig'), name) | ||
Gregory Szorc
|
r36541 | |||
def __setattr__(self, name, value): | ||||
Augie Fackler
|
r43906 | return setattr(object.__getattribute__(self, '_orig'), name, value) | ||
Gregory Szorc
|
r36541 | |||
def __iter__(self): | ||||
Augie Fackler
|
r43906 | return object.__getattribute__(self, '_orig').__iter__() | ||
Gregory Szorc
|
r36541 | |||
def _observedcall(self, name, *args, **kwargs): | ||||
# Call the original object. | ||||
Augie Fackler
|
r43906 | orig = object.__getattribute__(self, '_orig') | ||
Gregory Szorc
|
r36541 | res = getattr(orig, name)(*args, **kwargs) | ||
# Call a method on the observer of the same name with arguments | ||||
# so it can react, log, etc. | ||||
Augie Fackler
|
r43906 | observer = object.__getattribute__(self, '_observer') | ||
Gregory Szorc
|
r36541 | fn = getattr(observer, name, None) | ||
if fn: | ||||
fn(res, *args, **kwargs) | ||||
return res | ||||
def close(self, *args, **kwargs): | ||||
Augie Fackler
|
r43906 | return object.__getattribute__(self, '_observedcall')( | ||
'close', *args, **kwargs | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r36541 | |||
def fileno(self, *args, **kwargs): | ||||
Augie Fackler
|
r43906 | return object.__getattribute__(self, '_observedcall')( | ||
'fileno', *args, **kwargs | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r36541 | |||
def flush(self, *args, **kwargs): | ||||
Augie Fackler
|
r43906 | return object.__getattribute__(self, '_observedcall')( | ||
'flush', *args, **kwargs | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r36541 | |||
def isatty(self, *args, **kwargs): | ||||
Augie Fackler
|
r43906 | return object.__getattribute__(self, '_observedcall')( | ||
'isatty', *args, **kwargs | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r36541 | |||
def readable(self, *args, **kwargs): | ||||
Augie Fackler
|
r43906 | return object.__getattribute__(self, '_observedcall')( | ||
'readable', *args, **kwargs | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r36541 | |||
def readline(self, *args, **kwargs): | ||||
Augie Fackler
|
r43906 | return object.__getattribute__(self, '_observedcall')( | ||
'readline', *args, **kwargs | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r36541 | |||
def readlines(self, *args, **kwargs): | ||||
Augie Fackler
|
r43906 | return object.__getattribute__(self, '_observedcall')( | ||
'readlines', *args, **kwargs | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r36541 | |||
def seek(self, *args, **kwargs): | ||||
Augie Fackler
|
r43906 | return object.__getattribute__(self, '_observedcall')( | ||
'seek', *args, **kwargs | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r36541 | |||
def seekable(self, *args, **kwargs): | ||||
Augie Fackler
|
r43906 | return object.__getattribute__(self, '_observedcall')( | ||
'seekable', *args, **kwargs | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r36541 | |||
def tell(self, *args, **kwargs): | ||||
Augie Fackler
|
r43906 | return object.__getattribute__(self, '_observedcall')( | ||
'tell', *args, **kwargs | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r36541 | |||
def truncate(self, *args, **kwargs): | ||||
Augie Fackler
|
r43906 | return object.__getattribute__(self, '_observedcall')( | ||
'truncate', *args, **kwargs | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r36541 | |||
def writable(self, *args, **kwargs): | ||||
Augie Fackler
|
r43906 | return object.__getattribute__(self, '_observedcall')( | ||
'writable', *args, **kwargs | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r36541 | |||
def writelines(self, *args, **kwargs): | ||||
Augie Fackler
|
r43906 | return object.__getattribute__(self, '_observedcall')( | ||
'writelines', *args, **kwargs | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r36541 | |||
def read(self, *args, **kwargs): | ||||
Augie Fackler
|
r43906 | return object.__getattribute__(self, '_observedcall')( | ||
'read', *args, **kwargs | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r36541 | |||
def readall(self, *args, **kwargs): | ||||
Augie Fackler
|
r43906 | return object.__getattribute__(self, '_observedcall')( | ||
'readall', *args, **kwargs | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r36541 | |||
def readinto(self, *args, **kwargs): | ||||
Augie Fackler
|
r43906 | return object.__getattribute__(self, '_observedcall')( | ||
'readinto', *args, **kwargs | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r36541 | |||
def write(self, *args, **kwargs): | ||||
Augie Fackler
|
r43906 | return object.__getattribute__(self, '_observedcall')( | ||
'write', *args, **kwargs | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r36541 | |||
def detach(self, *args, **kwargs): | ||||
Augie Fackler
|
r43906 | return object.__getattribute__(self, '_observedcall')( | ||
'detach', *args, **kwargs | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r36541 | |||
def read1(self, *args, **kwargs): | ||||
Augie Fackler
|
r43906 | return object.__getattribute__(self, '_observedcall')( | ||
'read1', *args, **kwargs | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r36541 | |||
Gregory Szorc
|
r36542 | class observedbufferedinputpipe(bufferedinputpipe): | ||
"""A variation of bufferedinputpipe that is aware of fileobjectproxy. | ||||
``bufferedinputpipe`` makes low-level calls to ``os.read()`` that | ||||
bypass ``fileobjectproxy``. Because of this, we need to make | ||||
``bufferedinputpipe`` aware of these operations. | ||||
This variation of ``bufferedinputpipe`` can notify observers about | ||||
``os.read()`` events. It also re-publishes other events, such as | ||||
``read()`` and ``readline()``. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r50709 | def _fillbuffer(self, size=_chunksize): | ||
res = super(observedbufferedinputpipe, self)._fillbuffer(size=size) | ||||
Gregory Szorc
|
r36542 | |||
Gregory Szorc
|
r43373 | fn = getattr(self._input._observer, 'osread', None) | ||
Gregory Szorc
|
r36542 | if fn: | ||
Matt Harbison
|
r50709 | fn(res, size) | ||
Gregory Szorc
|
r36542 | |||
return res | ||||
# We use different observer methods because the operation isn't | ||||
# performed on the actual file object but on us. | ||||
def read(self, size): | ||||
res = super(observedbufferedinputpipe, self).read(size) | ||||
Gregory Szorc
|
r43373 | fn = getattr(self._input._observer, 'bufferedread', None) | ||
Gregory Szorc
|
r36542 | if fn: | ||
fn(res, size) | ||||
return res | ||||
def readline(self, *args, **kwargs): | ||||
res = super(observedbufferedinputpipe, self).readline(*args, **kwargs) | ||||
Gregory Szorc
|
r43373 | fn = getattr(self._input._observer, 'bufferedreadline', None) | ||
Gregory Szorc
|
r36542 | if fn: | ||
fn(res) | ||||
return res | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r37028 | PROXIED_SOCKET_METHODS = { | ||
Augie Fackler
|
r43906 | 'makefile', | ||
'recv', | ||||
'recvfrom', | ||||
'recvfrom_into', | ||||
'recv_into', | ||||
'send', | ||||
'sendall', | ||||
'sendto', | ||||
'setblocking', | ||||
'settimeout', | ||||
'gettimeout', | ||||
'setsockopt', | ||||
Gregory Szorc
|
r37028 | } | ||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r49801 | class socketproxy: | ||
Gregory Szorc
|
r37028 | """A proxy around a socket that tells a watcher when events occur. | ||
This is like ``fileobjectproxy`` except for sockets. | ||||
This type is intended to only be used for testing purposes. Think hard | ||||
before using it in important code. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r37028 | __slots__ = ( | ||
Augie Fackler
|
r43906 | '_orig', | ||
'_observer', | ||||
Gregory Szorc
|
r37028 | ) | ||
def __init__(self, sock, observer): | ||||
Augie Fackler
|
r43906 | object.__setattr__(self, '_orig', sock) | ||
object.__setattr__(self, '_observer', observer) | ||||
Gregory Szorc
|
r37028 | |||
def __getattribute__(self, name): | ||||
if name in PROXIED_SOCKET_METHODS: | ||||
return object.__getattribute__(self, name) | ||||
Augie Fackler
|
r43906 | return getattr(object.__getattribute__(self, '_orig'), name) | ||
Gregory Szorc
|
r37028 | |||
def __delattr__(self, name): | ||||
Augie Fackler
|
r43906 | return delattr(object.__getattribute__(self, '_orig'), name) | ||
Gregory Szorc
|
r37028 | |||
def __setattr__(self, name, value): | ||||
Augie Fackler
|
r43906 | return setattr(object.__getattribute__(self, '_orig'), name, value) | ||
Gregory Szorc
|
r37028 | |||
def __nonzero__(self): | ||||
Augie Fackler
|
r43906 | return bool(object.__getattribute__(self, '_orig')) | ||
Gregory Szorc
|
r37028 | |||
__bool__ = __nonzero__ | ||||
def _observedcall(self, name, *args, **kwargs): | ||||
# Call the original object. | ||||
Augie Fackler
|
r43906 | orig = object.__getattribute__(self, '_orig') | ||
Gregory Szorc
|
r37028 | res = getattr(orig, name)(*args, **kwargs) | ||
# Call a method on the observer of the same name with arguments | ||||
# so it can react, log, etc. | ||||
Augie Fackler
|
r43906 | observer = object.__getattribute__(self, '_observer') | ||
Gregory Szorc
|
r37028 | fn = getattr(observer, name, None) | ||
if fn: | ||||
fn(res, *args, **kwargs) | ||||
return res | ||||
def makefile(self, *args, **kwargs): | ||||
Augie Fackler
|
r43906 | res = object.__getattribute__(self, '_observedcall')( | ||
'makefile', *args, **kwargs | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r37028 | |||
# The file object may be used for I/O. So we turn it into a | ||||
# proxy using our observer. | ||||
Augie Fackler
|
r43906 | observer = object.__getattribute__(self, '_observer') | ||
Augie Fackler
|
r43346 | return makeloggingfileobject( | ||
observer.fh, | ||||
res, | ||||
observer.name, | ||||
reads=observer.reads, | ||||
writes=observer.writes, | ||||
logdata=observer.logdata, | ||||
logdataapis=observer.logdataapis, | ||||
) | ||||
Gregory Szorc
|
r37028 | |||
def recv(self, *args, **kwargs): | ||||
Augie Fackler
|
r43906 | return object.__getattribute__(self, '_observedcall')( | ||
'recv', *args, **kwargs | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r37028 | |||
def recvfrom(self, *args, **kwargs): | ||||
Augie Fackler
|
r43906 | return object.__getattribute__(self, '_observedcall')( | ||
'recvfrom', *args, **kwargs | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r37028 | |||
def recvfrom_into(self, *args, **kwargs): | ||||
Augie Fackler
|
r43906 | return object.__getattribute__(self, '_observedcall')( | ||
'recvfrom_into', *args, **kwargs | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r37028 | |||
def recv_into(self, *args, **kwargs): | ||||
Augie Fackler
|
r43906 | return object.__getattribute__(self, '_observedcall')( | ||
'recv_info', *args, **kwargs | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r37028 | |||
def send(self, *args, **kwargs): | ||||
Augie Fackler
|
r43906 | return object.__getattribute__(self, '_observedcall')( | ||
'send', *args, **kwargs | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r37028 | |||
def sendall(self, *args, **kwargs): | ||||
Augie Fackler
|
r43906 | return object.__getattribute__(self, '_observedcall')( | ||
'sendall', *args, **kwargs | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r37028 | |||
def sendto(self, *args, **kwargs): | ||||
Augie Fackler
|
r43906 | return object.__getattribute__(self, '_observedcall')( | ||
'sendto', *args, **kwargs | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r37028 | |||
def setblocking(self, *args, **kwargs): | ||||
Augie Fackler
|
r43906 | return object.__getattribute__(self, '_observedcall')( | ||
'setblocking', *args, **kwargs | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r37028 | |||
def settimeout(self, *args, **kwargs): | ||||
Augie Fackler
|
r43906 | return object.__getattribute__(self, '_observedcall')( | ||
'settimeout', *args, **kwargs | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r37028 | |||
def gettimeout(self, *args, **kwargs): | ||||
Augie Fackler
|
r43906 | return object.__getattribute__(self, '_observedcall')( | ||
'gettimeout', *args, **kwargs | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r37028 | |||
def setsockopt(self, *args, **kwargs): | ||||
Augie Fackler
|
r43906 | return object.__getattribute__(self, '_observedcall')( | ||
'setsockopt', *args, **kwargs | ||||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r37028 | |||
Gregory Szorc
|
r49801 | class baseproxyobserver: | ||
Matt Harbison
|
r44372 | def __init__(self, fh, name, logdata, logdataapis): | ||
self.fh = fh | ||||
self.name = name | ||||
self.logdata = logdata | ||||
self.logdataapis = logdataapis | ||||
Gregory Szorc
|
r36541 | def _writedata(self, data): | ||
if not self.logdata: | ||||
Gregory Szorc
|
r37062 | if self.logdataapis: | ||
Augie Fackler
|
r43347 | self.fh.write(b'\n') | ||
Gregory Szorc
|
r37062 | self.fh.flush() | ||
Gregory Szorc
|
r36541 | return | ||
# Simple case writes all data on a single line. | ||||
if b'\n' not in data: | ||||
Gregory Szorc
|
r37062 | if self.logdataapis: | ||
Augie Fackler
|
r43347 | self.fh.write(b': %s\n' % stringutil.escapestr(data)) | ||
Gregory Szorc
|
r37062 | else: | ||
Augie Fackler
|
r43346 | self.fh.write( | ||
Augie Fackler
|
r43347 | b'%s> %s\n' % (self.name, stringutil.escapestr(data)) | ||
Augie Fackler
|
r43346 | ) | ||
Matt Harbison
|
r37006 | self.fh.flush() | ||
Gregory Szorc
|
r36541 | return | ||
# Data with newlines is written to multiple lines. | ||||
Gregory Szorc
|
r37062 | if self.logdataapis: | ||
Augie Fackler
|
r43347 | self.fh.write(b':\n') | ||
Gregory Szorc
|
r37062 | |||
Gregory Szorc
|
r36541 | lines = data.splitlines(True) | ||
for line in lines: | ||||
Augie Fackler
|
r43346 | self.fh.write( | ||
Augie Fackler
|
r43347 | b'%s> %s\n' % (self.name, stringutil.escapestr(line)) | ||
Augie Fackler
|
r43346 | ) | ||
Matt Harbison
|
r37006 | self.fh.flush() | ||
Gregory Szorc
|
r36541 | |||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r37028 | class fileobjectobserver(baseproxyobserver): | ||
"""Logs file object activity.""" | ||||
Augie Fackler
|
r43346 | |||
def __init__( | ||||
self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True | ||||
): | ||||
Matt Harbison
|
r44372 | super(fileobjectobserver, self).__init__(fh, name, logdata, logdataapis) | ||
Gregory Szorc
|
r37028 | self.reads = reads | ||
self.writes = writes | ||||
Gregory Szorc
|
r36541 | def read(self, res, size=-1): | ||
if not self.reads: | ||||
return | ||||
Augie Fackler
|
r36603 | # Python 3 can return None from reads at EOF instead of empty strings. | ||
if res is None: | ||||
Augie Fackler
|
r43347 | res = b'' | ||
if size == -1 and res == b'': | ||||
Augie Fackler
|
r38334 | # Suppress pointless read(-1) calls that return | ||
# nothing. These happen _a lot_ on Python 3, and there | ||||
# doesn't seem to be a better workaround to have matching | ||||
# Python 2 and 3 behavior. :( | ||||
return | ||||
Gregory Szorc
|
r37062 | if self.logdataapis: | ||
Augie Fackler
|
r43347 | self.fh.write(b'%s> read(%d) -> %d' % (self.name, size, len(res))) | ||
Gregory Szorc
|
r37062 | |||
Gregory Szorc
|
r36541 | self._writedata(res) | ||
def readline(self, res, limit=-1): | ||||
if not self.reads: | ||||
return | ||||
Gregory Szorc
|
r37062 | if self.logdataapis: | ||
Augie Fackler
|
r43347 | self.fh.write(b'%s> readline() -> %d' % (self.name, len(res))) | ||
Gregory Szorc
|
r37062 | |||
Gregory Szorc
|
r36541 | self._writedata(res) | ||
Gregory Szorc
|
r36648 | def readinto(self, res, dest): | ||
if not self.reads: | ||||
return | ||||
Gregory Szorc
|
r37062 | if self.logdataapis: | ||
Augie Fackler
|
r43346 | self.fh.write( | ||
Augie Fackler
|
r43347 | b'%s> readinto(%d) -> %r' % (self.name, len(dest), res) | ||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r37062 | |||
Gregory Szorc
|
r36648 | data = dest[0:res] if res is not None else b'' | ||
Gregory Szorc
|
r41429 | |||
# _writedata() uses "in" operator and is confused by memoryview because | ||||
# characters are ints on Python 3. | ||||
if isinstance(data, memoryview): | ||||
data = data.tobytes() | ||||
Gregory Szorc
|
r36648 | self._writedata(data) | ||
Gregory Szorc
|
r36541 | def write(self, res, data): | ||
if not self.writes: | ||||
return | ||||
Gregory Szorc
|
r36649 | # Python 2 returns None from some write() calls. Python 3 (reasonably) | ||
# returns the integer bytes written. | ||||
if res is None and data: | ||||
res = len(data) | ||||
Gregory Szorc
|
r37062 | if self.logdataapis: | ||
Augie Fackler
|
r43347 | self.fh.write(b'%s> write(%d) -> %r' % (self.name, len(data), res)) | ||
Gregory Szorc
|
r37062 | |||
Gregory Szorc
|
r36541 | self._writedata(data) | ||
def flush(self, res): | ||||
if not self.writes: | ||||
return | ||||
Augie Fackler
|
r43347 | self.fh.write(b'%s> flush() -> %r\n' % (self.name, res)) | ||
Gregory Szorc
|
r36541 | |||
Gregory Szorc
|
r36542 | # For observedbufferedinputpipe. | ||
def bufferedread(self, res, size): | ||||
Gregory Szorc
|
r37062 | if not self.reads: | ||
return | ||||
if self.logdataapis: | ||||
Augie Fackler
|
r43346 | self.fh.write( | ||
Augie Fackler
|
r43347 | b'%s> bufferedread(%d) -> %d' % (self.name, size, len(res)) | ||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r37062 | |||
Gregory Szorc
|
r36542 | self._writedata(res) | ||
def bufferedreadline(self, res): | ||||
Gregory Szorc
|
r37062 | if not self.reads: | ||
return | ||||
if self.logdataapis: | ||||
Augie Fackler
|
r43346 | self.fh.write( | ||
Augie Fackler
|
r43347 | b'%s> bufferedreadline() -> %d' % (self.name, len(res)) | ||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r37062 | |||
Gregory Szorc
|
r36542 | self._writedata(res) | ||
Augie Fackler
|
r43346 | |||
def makeloggingfileobject( | ||||
logh, fh, name, reads=True, writes=True, logdata=False, logdataapis=True | ||||
): | ||||
Gregory Szorc
|
r36541 | """Turn a file object into a logging file object.""" | ||
Augie Fackler
|
r43346 | observer = fileobjectobserver( | ||
logh, | ||||
name, | ||||
reads=reads, | ||||
writes=writes, | ||||
logdata=logdata, | ||||
logdataapis=logdataapis, | ||||
) | ||||
Gregory Szorc
|
r36541 | return fileobjectproxy(fh, observer) | ||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r37028 | class socketobserver(baseproxyobserver): | ||
"""Logs socket activity.""" | ||||
Augie Fackler
|
r43346 | |||
def __init__( | ||||
self, | ||||
fh, | ||||
name, | ||||
reads=True, | ||||
writes=True, | ||||
states=True, | ||||
logdata=False, | ||||
logdataapis=True, | ||||
): | ||||
Matt Harbison
|
r44372 | super(socketobserver, self).__init__(fh, name, logdata, logdataapis) | ||
Gregory Szorc
|
r37028 | self.reads = reads | ||
self.writes = writes | ||||
self.states = states | ||||
def makefile(self, res, mode=None, bufsize=None): | ||||
if not self.states: | ||||
return | ||||
Augie Fackler
|
r43347 | self.fh.write(b'%s> makefile(%r, %r)\n' % (self.name, mode, bufsize)) | ||
Gregory Szorc
|
r37028 | |||
def recv(self, res, size, flags=0): | ||||
if not self.reads: | ||||
return | ||||
Gregory Szorc
|
r37062 | if self.logdataapis: | ||
Augie Fackler
|
r43346 | self.fh.write( | ||
Augie Fackler
|
r43347 | b'%s> recv(%d, %d) -> %d' % (self.name, size, flags, len(res)) | ||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r37028 | self._writedata(res) | ||
def recvfrom(self, res, size, flags=0): | ||||
if not self.reads: | ||||
return | ||||
Gregory Szorc
|
r37062 | if self.logdataapis: | ||
Augie Fackler
|
r43346 | self.fh.write( | ||
Augie Fackler
|
r43347 | b'%s> recvfrom(%d, %d) -> %d' | ||
Augie Fackler
|
r43346 | % (self.name, size, flags, len(res[0])) | ||
) | ||||
Gregory Szorc
|
r37062 | |||
Gregory Szorc
|
r37028 | self._writedata(res[0]) | ||
def recvfrom_into(self, res, buf, size, flags=0): | ||||
if not self.reads: | ||||
return | ||||
Gregory Szorc
|
r37062 | if self.logdataapis: | ||
Augie Fackler
|
r43346 | self.fh.write( | ||
Augie Fackler
|
r43347 | b'%s> recvfrom_into(%d, %d) -> %d' | ||
Augie Fackler
|
r43346 | % (self.name, size, flags, res[0]) | ||
) | ||||
self._writedata(buf[0 : res[0]]) | ||||
Gregory Szorc
|
r37028 | |||
def recv_into(self, res, buf, size=0, flags=0): | ||||
if not self.reads: | ||||
return | ||||
Gregory Szorc
|
r37062 | if self.logdataapis: | ||
Augie Fackler
|
r43346 | self.fh.write( | ||
Augie Fackler
|
r43347 | b'%s> recv_into(%d, %d) -> %d' % (self.name, size, flags, res) | ||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r37062 | |||
Gregory Szorc
|
r37028 | self._writedata(buf[0:res]) | ||
def send(self, res, data, flags=0): | ||||
if not self.writes: | ||||
return | ||||
Augie Fackler
|
r43346 | self.fh.write( | ||
Augie Fackler
|
r43347 | b'%s> send(%d, %d) -> %d' % (self.name, len(data), flags, len(res)) | ||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r37028 | self._writedata(data) | ||
def sendall(self, res, data, flags=0): | ||||
if not self.writes: | ||||
return | ||||
Gregory Szorc
|
r37062 | if self.logdataapis: | ||
# Returns None on success. So don't bother reporting return value. | ||||
Augie Fackler
|
r43347 | self.fh.write( | ||
b'%s> sendall(%d, %d)' % (self.name, len(data), flags) | ||||
) | ||||
Gregory Szorc
|
r37062 | |||
Gregory Szorc
|
r37028 | self._writedata(data) | ||
def sendto(self, res, data, flagsoraddress, address=None): | ||||
if not self.writes: | ||||
return | ||||
if address: | ||||
flags = flagsoraddress | ||||
else: | ||||
flags = 0 | ||||
Gregory Szorc
|
r37062 | if self.logdataapis: | ||
Augie Fackler
|
r43346 | self.fh.write( | ||
Augie Fackler
|
r43347 | b'%s> sendto(%d, %d, %r) -> %d' | ||
Augie Fackler
|
r43346 | % (self.name, len(data), flags, address, res) | ||
) | ||||
Gregory Szorc
|
r37062 | |||
Gregory Szorc
|
r37028 | self._writedata(data) | ||
def setblocking(self, res, flag): | ||||
if not self.states: | ||||
return | ||||
Augie Fackler
|
r43347 | self.fh.write(b'%s> setblocking(%r)\n' % (self.name, flag)) | ||
Gregory Szorc
|
r37028 | |||
def settimeout(self, res, value): | ||||
if not self.states: | ||||
return | ||||
Augie Fackler
|
r43347 | self.fh.write(b'%s> settimeout(%r)\n' % (self.name, value)) | ||
Gregory Szorc
|
r37028 | |||
def gettimeout(self, res): | ||||
if not self.states: | ||||
return | ||||
Augie Fackler
|
r43347 | self.fh.write(b'%s> gettimeout() -> %f\n' % (self.name, res)) | ||
Gregory Szorc
|
r37028 | |||
Augie Fackler
|
r39095 | def setsockopt(self, res, level, optname, value): | ||
Gregory Szorc
|
r37028 | if not self.states: | ||
return | ||||
Augie Fackler
|
r43346 | self.fh.write( | ||
Augie Fackler
|
r43347 | b'%s> setsockopt(%r, %r, %r) -> %r\n' | ||
Augie Fackler
|
r43346 | % (self.name, level, optname, value, res) | ||
) | ||||
def makeloggingsocket( | ||||
logh, | ||||
fh, | ||||
name, | ||||
reads=True, | ||||
writes=True, | ||||
states=True, | ||||
logdata=False, | ||||
logdataapis=True, | ||||
): | ||||
Gregory Szorc
|
r37028 | """Turn a socket into a logging socket.""" | ||
Augie Fackler
|
r43346 | observer = socketobserver( | ||
logh, | ||||
name, | ||||
reads=reads, | ||||
writes=writes, | ||||
states=states, | ||||
logdata=logdata, | ||||
logdataapis=logdataapis, | ||||
) | ||||
Gregory Szorc
|
r37028 | return socketproxy(fh, observer) | ||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r7632 | def version(): | ||
"""Return version information if available.""" | ||||
try: | ||||
Matt Harbison
|
r52624 | from . import __version__ # pytype: disable=import-error | ||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r7632 | return __version__.version | ||
except ImportError: | ||||
Augie Fackler
|
r43347 | return b'unknown' | ||
Matt Mackall
|
r7632 | |||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r27112 | def versiontuple(v=None, n=4): | ||
"""Parses a Mercurial version string into an N-tuple. | ||||
The version string to be parsed is specified with the ``v`` argument. | ||||
If it isn't defined, the current Mercurial version string will be parsed. | ||||
``n`` can be 2, 3, or 4. Here is how some version strings map to | ||||
returned values: | ||||
Yuya Nishihara
|
r34133 | >>> v = b'3.6.1+190-df9b73d2d444' | ||
Gregory Szorc
|
r27112 | >>> versiontuple(v, 2) | ||
(3, 6) | ||||
>>> versiontuple(v, 3) | ||||
(3, 6, 1) | ||||
>>> versiontuple(v, 4) | ||||
(3, 6, 1, '190-df9b73d2d444') | ||||
Yuya Nishihara
|
r34133 | >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118') | ||
Gregory Szorc
|
r27112 | (3, 6, 1, '190-df9b73d2d444+20151118') | ||
Yuya Nishihara
|
r34133 | >>> v = b'3.6' | ||
Gregory Szorc
|
r27112 | >>> versiontuple(v, 2) | ||
(3, 6) | ||||
>>> versiontuple(v, 3) | ||||
(3, 6, None) | ||||
>>> versiontuple(v, 4) | ||||
(3, 6, None, None) | ||||
Gregory Szorc
|
r29613 | |||
Yuya Nishihara
|
r34133 | >>> v = b'3.9-rc' | ||
Gregory Szorc
|
r29613 | >>> versiontuple(v, 2) | ||
(3, 9) | ||||
>>> versiontuple(v, 3) | ||||
(3, 9, None) | ||||
>>> versiontuple(v, 4) | ||||
(3, 9, None, 'rc') | ||||
Yuya Nishihara
|
r34133 | >>> v = b'3.9-rc+2-02a8fea4289b' | ||
Gregory Szorc
|
r29613 | >>> versiontuple(v, 2) | ||
(3, 9) | ||||
>>> versiontuple(v, 3) | ||||
(3, 9, None) | ||||
>>> versiontuple(v, 4) | ||||
(3, 9, None, 'rc+2-02a8fea4289b') | ||||
Yuya Nishihara
|
r37819 | |||
>>> versiontuple(b'4.6rc0') | ||||
(4, 6, None, 'rc0') | ||||
>>> versiontuple(b'4.6rc0+12-425d55e54f98') | ||||
(4, 6, None, 'rc0+12-425d55e54f98') | ||||
>>> versiontuple(b'.1.2.3') | ||||
(None, None, None, '.1.2.3') | ||||
>>> versiontuple(b'12.34..5') | ||||
(12, 34, None, '..5') | ||||
>>> versiontuple(b'1.2.3.4.5.6') | ||||
(1, 2, 3, '.4.5.6') | ||||
Gregory Szorc
|
r27112 | """ | ||
if not v: | ||||
v = version() | ||||
Matt Harbison
|
r44473 | m = remod.match(br'(\d+(?:\.\d+){,2})[+-]?(.*)', v) | ||
Yuya Nishihara
|
r37819 | if not m: | ||
Augie Fackler
|
r43347 | vparts, extra = b'', v | ||
Yuya Nishihara
|
r37819 | elif m.group(2): | ||
vparts, extra = m.groups() | ||||
Gregory Szorc
|
r27112 | else: | ||
Yuya Nishihara
|
r37819 | vparts, extra = m.group(1), None | ||
Gregory Szorc
|
r27112 | |||
Matt Harbison
|
r44334 | assert vparts is not None # help pytype | ||
Gregory Szorc
|
r27112 | vints = [] | ||
Augie Fackler
|
r43347 | for i in vparts.split(b'.'): | ||
Gregory Szorc
|
r27112 | try: | ||
vints.append(int(i)) | ||||
except ValueError: | ||||
break | ||||
# (3, 6) -> (3, 6, None) | ||||
while len(vints) < 3: | ||||
vints.append(None) | ||||
if n == 2: | ||||
return (vints[0], vints[1]) | ||||
if n == 3: | ||||
return (vints[0], vints[1], vints[2]) | ||||
if n == 4: | ||||
return (vints[0], vints[1], vints[2], extra) | ||||
Matt Harbison
|
r48826 | raise error.ProgrammingError(b"invalid version part request: %d" % n) | ||
Augie Fackler
|
r43346 | |||
Brendan Cully
|
r3145 | def cachefunc(func): | ||
'''cache the result of function calls''' | ||||
Benoit Boissinot
|
r3147 | # XXX doesn't handle keywords args | ||
timeless
|
r28832 | if func.__code__.co_argcount == 0: | ||
Matt Harbison
|
r44332 | listcache = [] | ||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r20835 | def f(): | ||
Matt Harbison
|
r44332 | if len(listcache) == 0: | ||
listcache.append(func()) | ||||
return listcache[0] | ||||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r20835 | return f | ||
Brendan Cully
|
r3145 | cache = {} | ||
timeless
|
r28832 | if func.__code__.co_argcount == 1: | ||
Benoit Boissinot
|
r3147 | # we gain a small amount of time because | ||
# we don't need to pack/unpack the list | ||||
Brendan Cully
|
r3145 | def f(arg): | ||
if arg not in cache: | ||||
cache[arg] = func(arg) | ||||
return cache[arg] | ||||
Augie Fackler
|
r43346 | |||
Brendan Cully
|
r3145 | else: | ||
Augie Fackler
|
r43346 | |||
Brendan Cully
|
r3145 | def f(*args): | ||
if args not in cache: | ||||
cache[args] = func(*args) | ||||
return cache[args] | ||||
return f | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r49801 | class cow: | ||
Jun Wu
|
r34353 | """helper class to make copy-on-write easier | ||
Call preparewrite before doing any writes. | ||||
""" | ||||
def preparewrite(self): | ||||
"""call this before writes, return self or a copied new object""" | ||||
if getattr(self, '_copied', 0): | ||||
self._copied -= 1 | ||||
Matt Harbison
|
r47663 | # Function cow.__init__ expects 1 arg(s), got 2 [wrong-arg-count] | ||
return self.__class__(self) # pytype: disable=wrong-arg-count | ||||
Jun Wu
|
r34353 | return self | ||
def copy(self): | ||||
"""always do a cheap copy""" | ||||
self._copied = getattr(self, '_copied', 0) + 1 | ||||
return self | ||||
Augie Fackler
|
r43346 | |||
Martin von Zweigbergk
|
r32300 | class sortdict(collections.OrderedDict): | ||
Augie Fackler
|
r46554 | """a simple sorted dictionary | ||
Yuya Nishihara
|
r32306 | |||
Yuya Nishihara
|
r34133 | >>> d1 = sortdict([(b'a', 0), (b'b', 1)]) | ||
Yuya Nishihara
|
r32306 | >>> d2 = d1.copy() | ||
Mads Kiilerich
|
r51646 | >>> list(d2.items()) | ||
[('a', 0), ('b', 1)] | ||||
Yuya Nishihara
|
r34133 | >>> d2.update([(b'a', 2)]) | ||
Yuya Nishihara
|
r34141 | >>> list(d2.keys()) # should still be in last-set order | ||
Yuya Nishihara
|
r32306 | ['b', 'a'] | ||
Martin von Zweigbergk
|
r44359 | >>> d1.insert(1, b'a.5', 0.5) | ||
Mads Kiilerich
|
r51646 | >>> list(d1.items()) | ||
[('a', 0), ('a.5', 0.5), ('b', 1)] | ||||
Augie Fackler
|
r46554 | """ | ||
Yuya Nishihara
|
r32306 | |||
Martin von Zweigbergk
|
r32300 | def __setitem__(self, key, value): | ||
Angel Ezquerra
|
r21813 | if key in self: | ||
Martin von Zweigbergk
|
r32300 | del self[key] | ||
super(sortdict, self).__setitem__(key, value) | ||||
Angel Ezquerra
|
r21813 | |||
Yuya Nishihara
|
r33628 | if pycompat.ispypy: | ||
# __setitem__() isn't called as of PyPy 5.8.0 | ||||
Matt Harbison
|
r47662 | def update(self, src, **f): | ||
Yuya Nishihara
|
r33628 | if isinstance(src, dict): | ||
Gregory Szorc
|
r49768 | src = src.items() | ||
Yuya Nishihara
|
r33628 | for k, v in src: | ||
self[k] = v | ||||
Matt Harbison
|
r47662 | for k in f: | ||
self[k] = f[k] | ||||
Yuya Nishihara
|
r33628 | |||
Martin von Zweigbergk
|
r44359 | def insert(self, position, key, value): | ||
Raphaël Gomès
|
r52596 | for i, (k, v) in enumerate(list(self.items())): | ||
Martin von Zweigbergk
|
r44359 | if i == position: | ||
self[key] = value | ||||
if i >= position: | ||||
del self[k] | ||||
self[k] = v | ||||
Augie Fackler
|
r43346 | |||
Jun Wu
|
r34353 | class cowdict(cow, dict): | ||
"""copy-on-write dict | ||||
Be sure to call d = d.preparewrite() before writing to d. | ||||
>>> a = cowdict() | ||||
>>> a is a.preparewrite() | ||||
True | ||||
>>> b = a.copy() | ||||
>>> b is a | ||||
True | ||||
>>> c = b.copy() | ||||
>>> c is a | ||||
True | ||||
>>> a = a.preparewrite() | ||||
>>> b is a | ||||
False | ||||
>>> a is a.preparewrite() | ||||
True | ||||
>>> c = c.preparewrite() | ||||
>>> b is c | ||||
False | ||||
>>> b is b.preparewrite() | ||||
True | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Jun Wu
|
r34353 | class cowsortdict(cow, sortdict): | ||
"""copy-on-write sortdict | ||||
Be sure to call d = d.preparewrite() before writing to d. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r49801 | class transactional: # pytype: disable=ignored-metaclass | ||
Martin von Zweigbergk
|
r33790 | """Base class for making a transactional type into a context manager.""" | ||
Augie Fackler
|
r43346 | |||
Martin von Zweigbergk
|
r33790 | __metaclass__ = abc.ABCMeta | ||
@abc.abstractmethod | ||||
def close(self): | ||||
"""Successfully closes the transaction.""" | ||||
@abc.abstractmethod | ||||
def release(self): | ||||
"""Marks the end of the transaction. | ||||
If the transaction has not been closed, it will be aborted. | ||||
""" | ||||
def __enter__(self): | ||||
return self | ||||
def __exit__(self, exc_type, exc_val, exc_tb): | ||||
try: | ||||
if exc_type is None: | ||||
self.close() | ||||
finally: | ||||
self.release() | ||||
Augie Fackler
|
r43346 | |||
Martin von Zweigbergk
|
r33446 | @contextlib.contextmanager | ||
def acceptintervention(tr=None): | ||||
"""A context manager that closes the transaction on InterventionRequired | ||||
If no transaction was provided, this simply runs the body and returns | ||||
""" | ||||
if not tr: | ||||
yield | ||||
return | ||||
try: | ||||
yield | ||||
tr.close() | ||||
except error.InterventionRequired: | ||||
tr.close() | ||||
raise | ||||
finally: | ||||
tr.release() | ||||
Augie Fackler
|
r43346 | |||
Durham Goode
|
r33621 | @contextlib.contextmanager | ||
Manuel Jacob
|
r45706 | def nullcontextmanager(enter_result=None): | ||
yield enter_result | ||||
Durham Goode
|
r33621 | |||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r49801 | class _lrucachenode: | ||
Gregory Szorc
|
r27371 | """A node in a doubly linked list. | ||
Holds a reference to nodes on either side as well as a key-value | ||||
pair for the dictionary entry. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Augie Fackler
|
r43906 | __slots__ = ('next', 'prev', 'key', 'value', 'cost') | ||
Gregory Szorc
|
r27371 | |||
def __init__(self): | ||||
Matt Harbison
|
r47663 | self.next = self | ||
self.prev = self | ||||
Gregory Szorc
|
r27371 | |||
self.key = _notset | ||||
self.value = None | ||||
Gregory Szorc
|
r39603 | self.cost = 0 | ||
Gregory Szorc
|
r27371 | |||
def markempty(self): | ||||
"""Mark the node as emptied.""" | ||||
self.key = _notset | ||||
Gregory Szorc
|
r39603 | self.value = None | ||
self.cost = 0 | ||||
Gregory Szorc
|
r27371 | |||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r49801 | class lrucachedict: | ||
Gregory Szorc
|
r27371 | """Dict that caches most recent accesses and sets. | ||
The dict consists of an actual backing dict - indexed by original | ||||
key - and a doubly linked circular list defining the order of entries in | ||||
the cache. | ||||
The head node is the newest entry in the cache. If the cache is full, | ||||
we recycle head.prev and make it the new head. Cache accesses result in | ||||
the node being moved to before the existing head and being marked as the | ||||
new head node. | ||||
Gregory Szorc
|
r39603 | |||
Items in the cache can be inserted with an optional "cost" value. This is | ||||
simply an integer that is specified by the caller. The cache can be queried | ||||
for the total cost of all items presently in the cache. | ||||
Gregory Szorc
|
r39604 | |||
The cache can also define a maximum cost. If a cache insertion would | ||||
cause the total cost of the cache to go beyond the maximum cost limit, | ||||
nodes will be evicted to make room for the new code. This can be used | ||||
to e.g. set a max memory limit and associate an estimated bytes size | ||||
cost to each item in the cache. By default, no maximum cost is enforced. | ||||
Gregory Szorc
|
r27371 | """ | ||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r39604 | def __init__(self, max, maxcost=0): | ||
Siddharth Agarwal
|
r18603 | self._cache = {} | ||
Gregory Szorc
|
r27371 | |||
Matt Harbison
|
r47663 | self._head = _lrucachenode() | ||
Gregory Szorc
|
r27371 | self._size = 1 | ||
Gregory Szorc
|
r39600 | self.capacity = max | ||
Gregory Szorc
|
r39603 | self.totalcost = 0 | ||
Gregory Szorc
|
r39604 | self.maxcost = maxcost | ||
Gregory Szorc
|
r27371 | |||
def __len__(self): | ||||
return len(self._cache) | ||||
def __contains__(self, k): | ||||
return k in self._cache | ||||
Siddharth Agarwal
|
r18603 | |||
Gregory Szorc
|
r27371 | def __iter__(self): | ||
# We don't have to iterate in cache order, but why not. | ||||
n = self._head | ||||
for i in range(len(self._cache)): | ||||
yield n.key | ||||
n = n.next | ||||
def __getitem__(self, k): | ||||
node = self._cache[k] | ||||
self._movetohead(node) | ||||
return node.value | ||||
Gregory Szorc
|
r39603 | def insert(self, k, v, cost=0): | ||
"""Insert a new item in the cache with optional cost value.""" | ||||
Gregory Szorc
|
r27371 | node = self._cache.get(k) | ||
# Replace existing value and mark as newest. | ||||
if node is not None: | ||||
Gregory Szorc
|
r39603 | self.totalcost -= node.cost | ||
Gregory Szorc
|
r27371 | node.value = v | ||
Gregory Szorc
|
r39603 | node.cost = cost | ||
self.totalcost += cost | ||||
Gregory Szorc
|
r27371 | self._movetohead(node) | ||
Gregory Szorc
|
r39604 | |||
if self.maxcost: | ||||
self._enforcecostlimit() | ||||
Gregory Szorc
|
r27371 | return | ||
Gregory Szorc
|
r39600 | if self._size < self.capacity: | ||
Gregory Szorc
|
r27371 | node = self._addcapacity() | ||
else: | ||||
# Grab the last/oldest item. | ||||
node = self._head.prev | ||||
Siddharth Agarwal
|
r18603 | |||
Gregory Szorc
|
r27371 | # At capacity. Kill the old entry. | ||
if node.key is not _notset: | ||||
Gregory Szorc
|
r39603 | self.totalcost -= node.cost | ||
Gregory Szorc
|
r27371 | del self._cache[node.key] | ||
node.key = k | ||||
node.value = v | ||||
Gregory Szorc
|
r39603 | node.cost = cost | ||
self.totalcost += cost | ||||
Gregory Szorc
|
r27371 | self._cache[k] = node | ||
# And mark it as newest entry. No need to adjust order since it | ||||
# is already self._head.prev. | ||||
self._head = node | ||||
Siddharth Agarwal
|
r18603 | |||
Gregory Szorc
|
r39604 | if self.maxcost: | ||
self._enforcecostlimit() | ||||
Gregory Szorc
|
r39603 | def __setitem__(self, k, v): | ||
self.insert(k, v) | ||||
Gregory Szorc
|
r27371 | def __delitem__(self, k): | ||
Yuya Nishihara
|
r40916 | self.pop(k) | ||
def pop(self, k, default=_notset): | ||||
try: | ||||
node = self._cache.pop(k) | ||||
except KeyError: | ||||
if default is _notset: | ||||
raise | ||||
return default | ||||
Matt Harbison
|
r44333 | |||
Yuya Nishihara
|
r40916 | value = node.value | ||
Gregory Szorc
|
r39603 | self.totalcost -= node.cost | ||
Gregory Szorc
|
r27371 | node.markempty() | ||
# Temporarily mark as newest item before re-adjusting head to make | ||||
# this node the oldest item. | ||||
self._movetohead(node) | ||||
self._head = node.next | ||||
Yuya Nishihara
|
r40916 | return value | ||
Gregory Szorc
|
r27371 | # Additional dict methods. | ||
def get(self, k, default=None): | ||||
try: | ||||
Gregory Szorc
|
r39607 | return self.__getitem__(k) | ||
Gregory Szorc
|
r27371 | except KeyError: | ||
return default | ||||
Siddharth Agarwal
|
r18603 | |||
Yuya Nishihara
|
r40915 | def peek(self, k, default=_notset): | ||
"""Get the specified item without moving it to the head | ||||
Unlike get(), this doesn't mutate the internal state. But be aware | ||||
that it doesn't mean peek() is thread safe. | ||||
""" | ||||
try: | ||||
node = self._cache[k] | ||||
return node.value | ||||
except KeyError: | ||||
if default is _notset: | ||||
raise | ||||
return default | ||||
Siddharth Agarwal
|
r19710 | def clear(self): | ||
Gregory Szorc
|
r27371 | n = self._head | ||
while n.key is not _notset: | ||||
Gregory Szorc
|
r39603 | self.totalcost -= n.cost | ||
Gregory Szorc
|
r27371 | n.markempty() | ||
n = n.next | ||||
Siddharth Agarwal
|
r19710 | self._cache.clear() | ||
Gregory Szorc
|
r27371 | |||
Gregory Szorc
|
r39604 | def copy(self, capacity=None, maxcost=0): | ||
Gregory Szorc
|
r39601 | """Create a new cache as a copy of the current one. | ||
By default, the new cache has the same capacity as the existing one. | ||||
But, the cache capacity can be changed as part of performing the | ||||
copy. | ||||
Items in the copy have an insertion/access order matching this | ||||
instance. | ||||
""" | ||||
capacity = capacity or self.capacity | ||||
Gregory Szorc
|
r39604 | maxcost = maxcost or self.maxcost | ||
result = lrucachedict(capacity, maxcost=maxcost) | ||||
Gregory Szorc
|
r39599 | |||
# We copy entries by iterating in oldest-to-newest order so the copy | ||||
# has the correct ordering. | ||||
# Find the first non-empty entry. | ||||
Eric Sumner
|
r27576 | n = self._head.prev | ||
Gregory Szorc
|
r39599 | while n.key is _notset and n is not self._head: | ||
n = n.prev | ||||
Gregory Szorc
|
r39601 | # We could potentially skip the first N items when decreasing capacity. | ||
# But let's keep it simple unless it is a performance problem. | ||||
Eric Sumner
|
r27576 | for i in range(len(self._cache)): | ||
Gregory Szorc
|
r39603 | result.insert(n.key, n.value, cost=n.cost) | ||
Eric Sumner
|
r27576 | n = n.prev | ||
Gregory Szorc
|
r39599 | |||
Eric Sumner
|
r27576 | return result | ||
Gregory Szorc
|
r39602 | def popoldest(self): | ||
"""Remove the oldest item from the cache. | ||||
Returns the (key, value) describing the removed cache entry. | ||||
""" | ||||
if not self._cache: | ||||
return | ||||
# Walk the linked list backwards starting at tail node until we hit | ||||
# a non-empty node. | ||||
n = self._head.prev | ||||
Matt Harbison
|
r47663 | |||
Gregory Szorc
|
r39602 | while n.key is _notset: | ||
n = n.prev | ||||
key, value = n.key, n.value | ||||
# And remove it from the cache and mark it as empty. | ||||
del self._cache[n.key] | ||||
Gregory Szorc
|
r39603 | self.totalcost -= n.cost | ||
Gregory Szorc
|
r39602 | n.markempty() | ||
return key, value | ||||
r52191 | def _movetohead(self, node: _lrucachenode): | |||
Gregory Szorc
|
r27371 | """Mark a node as the newest, making it the new head. | ||
When a node is accessed, it becomes the freshest entry in the LRU | ||||
list, which is denoted by self._head. | ||||
Visually, let's make ``N`` the new head node (* denotes head): | ||||
previous/oldest <-> head <-> next/next newest | ||||
----<->--- A* ---<->----- | ||||
| | | ||||
E <-> D <-> N <-> C <-> B | ||||
To: | ||||
----<->--- N* ---<->----- | ||||
| | | ||||
E <-> D <-> C <-> B <-> A | ||||
This requires the following moves: | ||||
C.next = D (node.prev.next = node.next) | ||||
D.prev = C (node.next.prev = node.prev) | ||||
E.next = N (head.prev.next = node) | ||||
N.prev = E (node.prev = head.prev) | ||||
N.next = A (node.next = head) | ||||
A.prev = N (head.prev = node) | ||||
""" | ||||
head = self._head | ||||
# C.next = D | ||||
node.prev.next = node.next | ||||
# D.prev = C | ||||
node.next.prev = node.prev | ||||
# N.prev = E | ||||
node.prev = head.prev | ||||
# N.next = A | ||||
# It is tempting to do just "head" here, however if node is | ||||
# adjacent to head, this will do bad things. | ||||
node.next = head.prev.next | ||||
# E.next = N | ||||
node.next.prev = node | ||||
# A.prev = N | ||||
node.prev.next = node | ||||
self._head = node | ||||
r52191 | def _addcapacity(self) -> _lrucachenode: | |||
Gregory Szorc
|
r27371 | """Add a node to the circular linked list. | ||
The new node is inserted before the head node. | ||||
""" | ||||
head = self._head | ||||
node = _lrucachenode() | ||||
head.prev.next = node | ||||
node.prev = head.prev | ||||
node.next = head | ||||
head.prev = node | ||||
self._size += 1 | ||||
return node | ||||
Siddharth Agarwal
|
r19710 | |||
Gregory Szorc
|
r39604 | def _enforcecostlimit(self): | ||
# This should run after an insertion. It should only be called if total | ||||
# cost limits are being enforced. | ||||
# The most recently inserted node is never evicted. | ||||
Gregory Szorc
|
r39605 | if len(self) <= 1 or self.totalcost <= self.maxcost: | ||
return | ||||
# This is logically equivalent to calling popoldest() until we | ||||
# free up enough cost. We don't do that since popoldest() needs | ||||
# to walk the linked list and doing this in a loop would be | ||||
# quadratic. So we find the first non-empty node and then | ||||
# walk nodes until we free up enough capacity. | ||||
Gregory Szorc
|
r39606 | # | ||
# If we only removed the minimum number of nodes to free enough | ||||
# cost at insert time, chances are high that the next insert would | ||||
# also require pruning. This would effectively constitute quadratic | ||||
# behavior for insert-heavy workloads. To mitigate this, we set a | ||||
# target cost that is a percentage of the max cost. This will tend | ||||
# to free more nodes when the high water mark is reached, which | ||||
# lowers the chances of needing to prune on the subsequent insert. | ||||
targetcost = int(self.maxcost * 0.75) | ||||
Gregory Szorc
|
r39605 | n = self._head.prev | ||
while n.key is _notset: | ||||
n = n.prev | ||||
Gregory Szorc
|
r39606 | while len(self) > 1 and self.totalcost > targetcost: | ||
Gregory Szorc
|
r39605 | del self._cache[n.key] | ||
self.totalcost -= n.cost | ||||
n.markempty() | ||||
n = n.prev | ||||
Gregory Szorc
|
r39604 | |||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r9097 | def lrucachefunc(func): | ||
'''cache most recent results of function calls''' | ||||
cache = {} | ||||
Martin von Zweigbergk
|
r25113 | order = collections.deque() | ||
timeless
|
r28832 | if func.__code__.co_argcount == 1: | ||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r9097 | def f(arg): | ||
if arg not in cache: | ||||
if len(cache) > 20: | ||||
Bryan O'Sullivan
|
r16803 | del cache[order.popleft()] | ||
Matt Mackall
|
r9097 | cache[arg] = func(arg) | ||
else: | ||||
order.remove(arg) | ||||
order.append(arg) | ||||
return cache[arg] | ||||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r9097 | else: | ||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r9097 | def f(*args): | ||
if args not in cache: | ||||
if len(cache) > 20: | ||||
Bryan O'Sullivan
|
r16803 | del cache[order.popleft()] | ||
Matt Mackall
|
r9097 | cache[args] = func(*args) | ||
else: | ||||
order.remove(args) | ||||
order.append(args) | ||||
return cache[args] | ||||
return f | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r49801 | class propertycache: | ||
Matt Mackall
|
r8207 | def __init__(self, func): | ||
self.func = func | ||||
self.name = func.__name__ | ||||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r8207 | def __get__(self, obj, type=None): | ||
result = self.func(obj) | ||||
Pierre-Yves David
|
r18013 | self.cachevalue(obj, result) | ||
Matt Mackall
|
r8207 | return result | ||
Pierre-Yves David
|
r18013 | def cachevalue(self, obj, value): | ||
Mads Kiilerich
|
r19951 | # __dict__ assignment required to bypass __setattr__ (eg: repoview) | ||
Pierre-Yves David
|
r19845 | obj.__dict__[self.name] = value | ||
Pierre-Yves David
|
r18013 | |||
Augie Fackler
|
r43346 | |||
Mark Thomas
|
r35021 | def clearcachedproperty(obj, prop): | ||
'''clear a cached property value, if one has been set''' | ||||
Yuya Nishihara
|
r40725 | prop = pycompat.sysstr(prop) | ||
Mark Thomas
|
r35021 | if prop in obj.__dict__: | ||
del obj.__dict__[prop] | ||||
Augie Fackler
|
r43346 | |||
Brendan Cully
|
r7396 | def increasingchunks(source, min=1024, max=65536): | ||
Augie Fackler
|
r46554 | """return no less than min bytes per chunk while data remains, | ||
doubling min after each chunk until it reaches max""" | ||||
Augie Fackler
|
r43346 | |||
Brendan Cully
|
r7396 | def log2(x): | ||
if not x: | ||||
return 0 | ||||
i = 0 | ||||
while x: | ||||
x >>= 1 | ||||
i += 1 | ||||
return i - 1 | ||||
buf = [] | ||||
blen = 0 | ||||
for chunk in source: | ||||
buf.append(chunk) | ||||
blen += len(chunk) | ||||
if blen >= min: | ||||
if min < max: | ||||
min = min << 1 | ||||
nmin = 1 << log2(blen) | ||||
if nmin > min: | ||||
min = nmin | ||||
if min > max: | ||||
min = max | ||||
Augie Fackler
|
r43347 | yield b''.join(buf) | ||
Brendan Cully
|
r7396 | blen = 0 | ||
buf = [] | ||||
if buf: | ||||
Augie Fackler
|
r43347 | yield b''.join(buf) | ||
Brendan Cully
|
r7396 | |||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r10282 | def always(fn): | ||
return True | ||||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r10282 | def never(fn): | ||
return False | ||||
Bryan O'Sullivan
|
r724 | |||
Augie Fackler
|
r43346 | |||
r52442 | def nogc(func=None) -> Any: | |||
Pierre-Yves David
|
r23495 | """disable garbage collector | ||
Python's garbage collector triggers a GC each time a certain number of | ||||
container objects (the number being defined by gc.get_threshold()) are | ||||
allocated even when marked not to be tracked by the collector. Tracking has | ||||
no effect on when GCs are triggered, only on what objects the GC looks | ||||
Mads Kiilerich
|
r23543 | into. As a workaround, disable GC while building complex (huge) | ||
Pierre-Yves David
|
r23495 | containers. | ||
Jun Wu
|
r33796 | This garbage collector issue have been fixed in 2.7. But it still affect | ||
CPython's performance. | ||||
Pierre-Yves David
|
r23495 | """ | ||
r52442 | if func is None: | |||
return _nogc_context() | ||||
else: | ||||
return _nogc_decorator(func) | ||||
@contextlib.contextmanager | ||||
def _nogc_context(): | ||||
gcenabled = gc.isenabled() | ||||
gc.disable() | ||||
try: | ||||
yield | ||||
finally: | ||||
if gcenabled: | ||||
gc.enable() | ||||
def _nogc_decorator(func): | ||||
Pierre-Yves David
|
r23495 | def wrapper(*args, **kwargs): | ||
r52442 | with _nogc_context(): | |||
Pierre-Yves David
|
r23495 | return func(*args, **kwargs) | ||
Augie Fackler
|
r43346 | |||
Pierre-Yves David
|
r23495 | return wrapper | ||
Augie Fackler
|
r43346 | |||
Jun Wu
|
r33796 | if pycompat.ispypy: | ||
# PyPy runs slower with gc disabled | ||||
nogc = lambda x: x | ||||
Augie Fackler
|
r43346 | |||
r52180 | def pathto(root: bytes, n1: bytes, n2: bytes) -> bytes: | |||
Augie Fackler
|
r46554 | """return the relative path from one place to another. | ||
Alexis S. L. Carvalho
|
r4229 | root should use os.sep to separate directories | ||
Alexis S. L. Carvalho
|
r3669 | n1 should use os.sep to separate directories | ||
n2 should use "/" to separate directories | ||||
returns an os.sep-separated path. | ||||
Alexis S. L. Carvalho
|
r4229 | |||
If n1 is a relative path, it's assumed it's | ||||
relative to root. | ||||
n2 should always be relative to root. | ||||
Augie Fackler
|
r46554 | """ | ||
Matt Mackall
|
r10282 | if not n1: | ||
return localpath(n2) | ||||
Alexis S. L. Carvalho
|
r4230 | if os.path.isabs(n1): | ||
if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]: | ||||
return os.path.join(root, localpath(n2)) | ||||
Augie Fackler
|
r43347 | n2 = b'/'.join((pconvert(root), n2)) | ||
a, b = splitpath(n1), n2.split(b'/') | ||||
twaldmann@thinkmo.de
|
r1541 | a.reverse() | ||
b.reverse() | ||||
Bryan O'Sullivan
|
r884 | while a and b and a[-1] == b[-1]: | ||
twaldmann@thinkmo.de
|
r1541 | a.pop() | ||
b.pop() | ||||
Bryan O'Sullivan
|
r884 | b.reverse() | ||
Augie Fackler
|
r43347 | return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.' | ||
Bryan O'Sullivan
|
r884 | |||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r44918 | def checksignature(func, depth=1): | ||
Matt Mackall
|
r7388 | '''wrap a function with code to check for calling errors''' | ||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r7388 | def check(*args, **kwargs): | ||
try: | ||||
return func(*args, **kwargs) | ||||
except TypeError: | ||||
Matt Harbison
|
r44918 | if len(traceback.extract_tb(sys.exc_info()[2])) == depth: | ||
Matt Mackall
|
r7646 | raise error.SignatureError | ||
Matt Mackall
|
r7388 | raise | ||
return check | ||||
Augie Fackler
|
r43346 | |||
Jun Wu
|
r31575 | # a whilelist of known filesystems where hardlink works reliably | ||
Martin von Zweigbergk
|
r32291 | _hardlinkfswhitelist = { | ||
Augie Fackler
|
r43347 | b'apfs', | ||
b'btrfs', | ||||
b'ext2', | ||||
b'ext3', | ||||
b'ext4', | ||||
b'hfs', | ||||
b'jfs', | ||||
b'NTFS', | ||||
b'reiserfs', | ||||
b'tmpfs', | ||||
b'ufs', | ||||
b'xfs', | ||||
b'zfs', | ||||
Martin von Zweigbergk
|
r32291 | } | ||
Jun Wu
|
r31575 | |||
Augie Fackler
|
r43346 | |||
r48192 | def copyfile( | |||
r48238 | src, | |||
dest, | ||||
hardlink=False, | ||||
copystat=False, | ||||
checkambig=False, | ||||
nb_bytes=None, | ||||
no_hardlink_cb=None, | ||||
r48239 | check_fs_hardlink=True, | |||
r48192 | ): | |||
Augie Fackler
|
r46554 | """copy a file, preserving mode and optionally other stat info like | ||
FUJIWARA Katsunori
|
r29367 | atime/mtime | ||
checkambig argument is used with filestat, and is useful only if | ||||
destination file is guarded by any lock (e.g. repo.lock or | ||||
repo.wlock). | ||||
copystat and checkambig should be exclusive. | ||||
r48192 | ||||
nb_bytes: if set only copy the first `nb_bytes` of the source file. | ||||
Augie Fackler
|
r46554 | """ | ||
FUJIWARA Katsunori
|
r29204 | assert not (copystat and checkambig) | ||
oldstat = None | ||||
Mads Kiilerich
|
r18326 | if os.path.lexists(dest): | ||
FUJIWARA Katsunori
|
r29204 | if checkambig: | ||
Siddharth Agarwal
|
r32772 | oldstat = checkambig and filestat.frompath(dest) | ||
Mads Kiilerich
|
r18326 | unlink(dest) | ||
r48239 | if hardlink and check_fs_hardlink: | |||
Jun Wu
|
r31575 | # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks | ||
# unless we are confident that dest is on a whitelisted filesystem. | ||||
Yuya Nishihara
|
r31678 | try: | ||
fstype = getfstype(os.path.dirname(dest)) | ||||
except OSError: | ||||
fstype = None | ||||
Jun Wu
|
r31575 | if fstype not in _hardlinkfswhitelist: | ||
r48238 | if no_hardlink_cb is not None: | |||
no_hardlink_cb() | ||||
Jun Wu
|
r31575 | hardlink = False | ||
Jun Wu
|
r31577 | if hardlink: | ||
Pierre-Yves David
|
r23899 | try: | ||
oslink(src, dest) | ||||
r48192 | if nb_bytes is not None: | |||
m = "the `nb_bytes` argument is incompatible with `hardlink`" | ||||
raise error.ProgrammingError(m) | ||||
Pierre-Yves David
|
r23899 | return | ||
r48238 | except (IOError, OSError) as exc: | |||
if exc.errno != errno.EEXIST and no_hardlink_cb is not None: | ||||
no_hardlink_cb() | ||||
# fall back to normal copy | ||||
Eric St-Jean
|
r4271 | if os.path.islink(src): | ||
os.symlink(os.readlink(src), dest) | ||||
Siddharth Agarwal
|
r27369 | # copytime is ignored for symlinks, but in general copytime isn't needed | ||
# for them anyway | ||||
r48192 | if nb_bytes is not None: | |||
m = "cannot use `nb_bytes` on a symlink" | ||||
raise error.ProgrammingError(m) | ||||
Eric St-Jean
|
r4271 | else: | ||
try: | ||||
shutil.copyfile(src, dest) | ||||
Siddharth Agarwal
|
r27369 | if copystat: | ||
# copystat also copies mode | ||||
shutil.copystat(src, dest) | ||||
else: | ||||
shutil.copymode(src, dest) | ||||
FUJIWARA Katsunori
|
r29204 | if oldstat and oldstat.stat: | ||
Siddharth Agarwal
|
r32772 | newstat = filestat.frompath(dest) | ||
FUJIWARA Katsunori
|
r29204 | if newstat.isambig(oldstat): | ||
# stat of copied file is ambiguous to original one | ||||
Augie Fackler
|
r36799 | advanced = ( | ||
Augie Fackler
|
r43346 | oldstat.stat[stat.ST_MTIME] + 1 | ||
) & 0x7FFFFFFF | ||||
FUJIWARA Katsunori
|
r29204 | os.utime(dest, (advanced, advanced)) | ||
r48192 | # We could do something smarter using `copy_file_range` call or similar | |||
if nb_bytes is not None: | ||||
with open(dest, mode='r+') as f: | ||||
f.truncate(nb_bytes) | ||||
Gregory Szorc
|
r25660 | except shutil.Error as inst: | ||
Matt Harbison
|
r44128 | raise error.Abort(stringutil.forcebytestr(inst)) | ||
Matt Mackall
|
r3629 | |||
Augie Fackler
|
r43346 | |||
Martin von Zweigbergk
|
r38399 | def copyfiles(src, dst, hardlink=None, progress=None): | ||
Augie Fackler
|
r24439 | """Copy a directory tree using hardlinks if possible.""" | ||
num = 0 | ||||
Stephen Darnell
|
r1241 | |||
Martin von Zweigbergk
|
r38399 | def settopic(): | ||
if progress: | ||||
Augie Fackler
|
r43347 | progress.topic = _(b'linking') if hardlink else _(b'copying') | ||
Thomas Arendsen Hein
|
r698 | |||
mpm@selenic.com
|
r1207 | if os.path.isdir(src): | ||
Jun Wu
|
r31719 | if hardlink is None: | ||
Augie Fackler
|
r43346 | hardlink = ( | ||
os.stat(src).st_dev == os.stat(os.path.dirname(dst)).st_dev | ||||
) | ||||
Martin von Zweigbergk
|
r38399 | settopic() | ||
mpm@selenic.com
|
r1207 | os.mkdir(dst) | ||
Yuya Nishihara
|
r32203 | for name, kind in listdir(src): | ||
mpm@selenic.com
|
r1207 | srcname = os.path.join(src, name) | ||
dstname = os.path.join(dst, name) | ||||
Martin von Zweigbergk
|
r38399 | hardlink, n = copyfiles(srcname, dstname, hardlink, progress) | ||
Adrian Buehlmann
|
r11251 | num += n | ||
mpm@selenic.com
|
r1207 | else: | ||
Jun Wu
|
r31719 | if hardlink is None: | ||
Augie Fackler
|
r43346 | hardlink = ( | ||
os.stat(os.path.dirname(src)).st_dev | ||||
== os.stat(os.path.dirname(dst)).st_dev | ||||
) | ||||
Martin von Zweigbergk
|
r38399 | settopic() | ||
Jun Wu
|
r31719 | |||
Stephen Darnell
|
r1241 | if hardlink: | ||
try: | ||||
Adrian Buehlmann
|
r14235 | oslink(src, dst) | ||
r48210 | except (IOError, OSError) as exc: | |||
if exc.errno != errno.EEXIST: | ||||
hardlink = False | ||||
# XXX maybe try to relink if the file exist ? | ||||
Benoit Boissinot
|
r1591 | shutil.copy(src, dst) | ||
Stephen Darnell
|
r1241 | else: | ||
Benoit Boissinot
|
r1591 | shutil.copy(src, dst) | ||
Adrian Buehlmann
|
r11251 | num += 1 | ||
Martin von Zweigbergk
|
r38399 | if progress: | ||
progress.increment() | ||||
Thomas Arendsen Hein
|
r698 | |||
Adrian Buehlmann
|
r11251 | return hardlink, num | ||
Adrian Buehlmann
|
r11254 | |||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r34054 | _winreservednames = { | ||
Augie Fackler
|
r43347 | b'con', | ||
b'prn', | ||||
b'aux', | ||||
b'nul', | ||||
b'com1', | ||||
b'com2', | ||||
b'com3', | ||||
b'com4', | ||||
b'com5', | ||||
b'com6', | ||||
b'com7', | ||||
b'com8', | ||||
b'com9', | ||||
b'lpt1', | ||||
b'lpt2', | ||||
b'lpt3', | ||||
b'lpt4', | ||||
b'lpt5', | ||||
b'lpt6', | ||||
b'lpt7', | ||||
b'lpt8', | ||||
b'lpt9', | ||||
Gregory Szorc
|
r34054 | } | ||
Augie Fackler
|
r43347 | _winreservedchars = b':*?"<>|' | ||
Augie Fackler
|
r43346 | |||
r52180 | def checkwinfilename(path: bytes) -> Optional[bytes]: | |||
Augie Fackler
|
r46554 | r"""Check that the base-relative path is a valid filename on Windows. | ||
Adrian Buehlmann
|
r13916 | Returns None if the path is ok, or a UI string describing the problem. | ||
Yuya Nishihara
|
r34133 | >>> checkwinfilename(b"just/a/normal/path") | ||
>>> checkwinfilename(b"foo/bar/con.xml") | ||||
Adrian Buehlmann
|
r13916 | "filename contains 'con', which is reserved on Windows" | ||
Yuya Nishihara
|
r34133 | >>> checkwinfilename(b"foo/con.xml/bar") | ||
Adrian Buehlmann
|
r13916 | "filename contains 'con', which is reserved on Windows" | ||
Yuya Nishihara
|
r34133 | >>> checkwinfilename(b"foo/bar/xml.con") | ||
>>> checkwinfilename(b"foo/bar/AUX/bla.txt") | ||||
Adrian Buehlmann
|
r13916 | "filename contains 'AUX', which is reserved on Windows" | ||
Yuya Nishihara
|
r34133 | >>> checkwinfilename(b"foo/bar/bla:.txt") | ||
Adrian Buehlmann
|
r13916 | "filename contains ':', which is reserved on Windows" | ||
Yuya Nishihara
|
r34133 | >>> checkwinfilename(b"foo/bar/b\07la.txt") | ||
Mads Kiilerich
|
r20000 | "filename contains '\\x07', which is invalid on Windows" | ||
Yuya Nishihara
|
r34133 | >>> checkwinfilename(b"foo/bar/bla ") | ||
Adrian Buehlmann
|
r13916 | "filename ends with ' ', which is not allowed on Windows" | ||
Yuya Nishihara
|
r34133 | >>> checkwinfilename(b"../bar") | ||
>>> checkwinfilename(b"foo\\") | ||||
Mads Kiilerich
|
r20000 | "filename ends with '\\', which is invalid on Windows" | ||
Yuya Nishihara
|
r34133 | >>> checkwinfilename(b"foo\\/bar") | ||
Mads Kiilerich
|
r20000 | "directory name ends with '\\', which is invalid on Windows" | ||
Augie Fackler
|
r46554 | """ | ||
Augie Fackler
|
r43347 | if path.endswith(b'\\'): | ||
return _(b"filename ends with '\\', which is invalid on Windows") | ||||
if b'\\/' in path: | ||||
return _(b"directory name ends with '\\', which is invalid on Windows") | ||||
for n in path.replace(b'\\', b'/').split(b'/'): | ||||
Adrian Buehlmann
|
r13916 | if not n: | ||
continue | ||||
FUJIWARA Katsunori
|
r32566 | for c in _filenamebytestr(n): | ||
Adrian Buehlmann
|
r14262 | if c in _winreservedchars: | ||
Augie Fackler
|
r43346 | return ( | ||
Augie Fackler
|
r43347 | _( | ||
b"filename contains '%s', which is reserved " | ||||
b"on Windows" | ||||
) | ||||
Augie Fackler
|
r43346 | % c | ||
) | ||||
Adrian Buehlmann
|
r13916 | if ord(c) <= 31: | ||
Augie Fackler
|
r43346 | return _( | ||
Martin von Zweigbergk
|
r43387 | b"filename contains '%s', which is invalid on Windows" | ||
Augie Fackler
|
r43346 | ) % stringutil.escapestr(c) | ||
Augie Fackler
|
r43347 | base = n.split(b'.')[0] | ||
Adrian Buehlmann
|
r14262 | if base and base.lower() in _winreservednames: | ||
Augie Fackler
|
r43346 | return ( | ||
Martin von Zweigbergk
|
r43387 | _(b"filename contains '%s', which is reserved on Windows") | ||
Augie Fackler
|
r43346 | % base | ||
) | ||||
Yuya Nishihara
|
r34357 | t = n[-1:] | ||
Augie Fackler
|
r43347 | if t in b'. ' and n not in b'..': | ||
Augie Fackler
|
r43346 | return ( | ||
Augie Fackler
|
r43347 | _( | ||
b"filename ends with '%s', which is not allowed " | ||||
b"on Windows" | ||||
) | ||||
Augie Fackler
|
r43346 | % t | ||
) | ||||
Adrian Buehlmann
|
r13916 | |||
Matt Harbison
|
r44470 | timer = getattr(time, "perf_counter", None) | ||
Jun Wu
|
r34646 | if pycompat.iswindows: | ||
Adrian Buehlmann
|
r13916 | checkosfilename = checkwinfilename | ||
Matt Harbison
|
r44470 | if not timer: | ||
r52187 | timer = time.clock # pytype: disable=module-attr | |||
Matt Mackall
|
r7890 | else: | ||
Matt Harbison
|
r44335 | # mercurial.windows doesn't have platform.checkosfilename | ||
checkosfilename = platform.checkosfilename # pytype: disable=module-attr | ||||
Matt Harbison
|
r44470 | if not timer: | ||
timer = time.time | ||||
Matt Mackall
|
r7890 | |||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r52782 | def makelock(info: bytes, pathname: bytes) -> None: | ||
Yuya Nishihara
|
r36717 | """Create a lock file atomically if possible | ||
This may leave a stale lock file if symlink isn't supported and signal | ||||
interrupt is enabled. | ||||
""" | ||||
Matt Mackall
|
r7890 | try: | ||
return os.symlink(info, pathname) | ||||
Gregory Szorc
|
r25660 | except OSError as why: | ||
Matt Mackall
|
r7890 | if why.errno == errno.EEXIST: | ||
raise | ||||
Augie Fackler
|
r43346 | except AttributeError: # no symlink in os | ||
Matt Mackall
|
r7890 | pass | ||
Yuya Nishihara
|
r36801 | flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0) | ||
ld = os.open(pathname, flags) | ||||
Matt Harbison
|
r52781 | try: | ||
os.write(ld, info) | ||||
finally: | ||||
os.close(ld) | ||||
Thomas Arendsen Hein
|
r704 | |||
Augie Fackler
|
r43346 | |||
r52180 | def readlock(pathname: bytes) -> bytes: | |||
Matt Mackall
|
r7890 | try: | ||
Matt Harbison
|
r39940 | return readlink(pathname) | ||
Gregory Szorc
|
r25660 | except OSError as why: | ||
Matt Mackall
|
r7890 | if why.errno not in (errno.EINVAL, errno.ENOSYS): | ||
raise | ||||
Augie Fackler
|
r43346 | except AttributeError: # no symlink in os | ||
Matt Mackall
|
r7890 | pass | ||
Augie Fackler
|
r43347 | with posixfile(pathname, b'rb') as fp: | ||
Matt Harbison
|
r39941 | return fp.read() | ||
Thomas Arendsen Hein
|
r704 | |||
Augie Fackler
|
r43346 | |||
Vadim Gelfer
|
r2176 | def fstat(fp): | ||
'''stat file object that may not have fileno method.''' | ||||
try: | ||||
return os.fstat(fp.fileno()) | ||||
except AttributeError: | ||||
return os.stat(fp.name) | ||||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r3784 | # File system features | ||
Augie Fackler
|
r43346 | |||
r52180 | def fscasesensitive(path: bytes) -> bool: | |||
Matt Mackall
|
r3784 | """ | ||
Mads Kiilerich
|
r18911 | Return true if the given path is on a case-sensitive filesystem | ||
Matt Mackall
|
r3784 | |||
Requires a path (like /foo/.hg) ending with a foldable final | ||||
directory component. | ||||
""" | ||||
Siddharth Agarwal
|
r24902 | s1 = os.lstat(path) | ||
Matt Mackall
|
r3784 | d, b = os.path.split(path) | ||
FUJIWARA Katsunori
|
r15667 | b2 = b.upper() | ||
if b == b2: | ||||
b2 = b.lower() | ||||
if b == b2: | ||||
Augie Fackler
|
r43346 | return True # no evidence against case sensitivity | ||
FUJIWARA Katsunori
|
r15667 | p2 = os.path.join(d, b2) | ||
Matt Mackall
|
r3784 | try: | ||
Siddharth Agarwal
|
r24902 | s2 = os.lstat(p2) | ||
Matt Mackall
|
r3784 | if s2 == s1: | ||
return False | ||||
return True | ||||
Idan Kamara
|
r14004 | except OSError: | ||
Matt Mackall
|
r3784 | return True | ||
Augie Fackler
|
r43346 | |||
r47598 | _re2_input = lambda x: x | |||
r52503 | # google-re2 will need to be tell to not output error on its own | |||
_re2_options = None | ||||
Bryan O'Sullivan
|
r16943 | try: | ||
Matt Harbison
|
r44331 | import re2 # pytype: disable=import-error | ||
Augie Fackler
|
r43346 | |||
Bryan O'Sullivan
|
r16943 | _re2 = None | ||
except ImportError: | ||||
_re2 = False | ||||
Augie Fackler
|
r43346 | |||
r51582 | def has_re2(): | |||
"""return True is re2 is available, False otherwise""" | ||||
if _re2 is None: | ||||
_re._checkre2() | ||||
return _re2 | ||||
Gregory Szorc
|
r49801 | class _re: | ||
r51581 | @staticmethod | |||
def _checkre2(): | ||||
Siddharth Agarwal
|
r21913 | global _re2 | ||
r47598 | global _re2_input | |||
r52503 | global _re2_options | |||
r51580 | if _re2 is not None: | |||
# we already have the answer | ||||
return | ||||
Matt Harbison
|
r47688 | |||
check_pattern = br'\[([^\[]+)\]' | ||||
check_input = b'[ui]' | ||||
Siddharth Agarwal
|
r21913 | try: | ||
# check if match works, see issue3964 | ||||
r47598 | _re2 = bool(re2.match(check_pattern, check_input)) | |||
Siddharth Agarwal
|
r21913 | except ImportError: | ||
_re2 = False | ||||
r47598 | except TypeError: | |||
# the `pyre-2` project provides a re2 module that accept bytes | ||||
# the `fb-re2` project provides a re2 module that acccept sysstr | ||||
check_pattern = pycompat.sysstr(check_pattern) | ||||
check_input = pycompat.sysstr(check_input) | ||||
_re2 = bool(re2.match(check_pattern, check_input)) | ||||
_re2_input = pycompat.sysstr | ||||
r52503 | try: | |||
quiet = re2.Options() | ||||
quiet.log_errors = False | ||||
_re2_options = quiet | ||||
except AttributeError: | ||||
pass | ||||
Siddharth Agarwal
|
r21913 | |||
Siddharth Agarwal
|
r21908 | def compile(self, pat, flags=0): | ||
Augie Fackler
|
r46554 | """Compile a regular expression, using re2 if possible | ||
Bryan O'Sullivan
|
r16943 | |||
Siddharth Agarwal
|
r21908 | For best performance, use only re2-compatible regexp features. The | ||
only flags from the re module that are re2-compatible are | ||||
Augie Fackler
|
r46554 | IGNORECASE and MULTILINE.""" | ||
Siddharth Agarwal
|
r21908 | if _re2 is None: | ||
Siddharth Agarwal
|
r21913 | self._checkre2() | ||
Siddharth Agarwal
|
r21908 | if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0: | ||
if flags & remod.IGNORECASE: | ||||
Augie Fackler
|
r43347 | pat = b'(?i)' + pat | ||
Siddharth Agarwal
|
r21908 | if flags & remod.MULTILINE: | ||
Augie Fackler
|
r43347 | pat = b'(?m)' + pat | ||
Siddharth Agarwal
|
r21908 | try: | ||
r52503 | input_regex = _re2_input(pat) | |||
if _re2_options is not None: | ||||
compiled = re2.compile(input_regex, options=_re2_options) | ||||
else: | ||||
compiled = re2.compile(input_regex) | ||||
return compiled | ||||
Siddharth Agarwal
|
r21908 | except re2.error: | ||
pass | ||||
return remod.compile(pat, flags) | ||||
Siddharth Agarwal
|
r21914 | @propertycache | ||
def escape(self): | ||||
Augie Fackler
|
r46554 | """Return the version of escape corresponding to self.compile. | ||
Siddharth Agarwal
|
r21914 | |||
This is imperfect because whether re2 or re is used for a particular | ||||
function depends on the flags, etc, but it's the best we can do. | ||||
Augie Fackler
|
r46554 | """ | ||
Siddharth Agarwal
|
r21914 | global _re2 | ||
if _re2 is None: | ||||
self._checkre2() | ||||
if _re2: | ||||
return re2.escape | ||||
else: | ||||
return remod.escape | ||||
Augie Fackler
|
r43346 | |||
Siddharth Agarwal
|
r21908 | re = _re() | ||
Bryan O'Sullivan
|
r16943 | |||
Paul Moore
|
r6676 | _fspathcache = {} | ||
Augie Fackler
|
r43346 | |||
r52180 | def fspath(name: bytes, root: bytes) -> bytes: | |||
Augie Fackler
|
r46554 | """Get name in the case stored in the filesystem | ||
Paul Moore
|
r6676 | |||
FUJIWARA Katsunori
|
r15710 | The name should be relative to root, and be normcase-ed for efficiency. | ||
Note that this function is unnecessary, and should not be | ||||
Paul Moore
|
r6676 | called, for case-sensitive filesystems (simply because it's expensive). | ||
FUJIWARA Katsunori
|
r15670 | |||
FUJIWARA Katsunori
|
r15710 | The root should be normcase-ed, too. | ||
Augie Fackler
|
r46554 | """ | ||
Augie Fackler
|
r43346 | |||
Siddharth Agarwal
|
r23097 | def _makefspathcacheentry(dir): | ||
Augie Fackler
|
r44937 | return {normcase(n): n for n in os.listdir(dir)} | ||
FUJIWARA Katsunori
|
r15709 | |||
Pulkit Goyal
|
r30613 | seps = pycompat.ossep | ||
Pulkit Goyal
|
r30625 | if pycompat.osaltsep: | ||
seps = seps + pycompat.osaltsep | ||||
Paul Moore
|
r6676 | # Protect backslashes. This gets silly very quickly. | ||
Augie Fackler
|
r43347 | seps.replace(b'\\', b'\\\\') | ||
Augie Fackler
|
r31496 | pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps)) | ||
FUJIWARA Katsunori
|
r15669 | dir = os.path.normpath(root) | ||
Paul Moore
|
r6676 | result = [] | ||
for part, sep in pattern.findall(name): | ||||
if sep: | ||||
result.append(sep) | ||||
continue | ||||
FUJIWARA Katsunori
|
r15719 | if dir not in _fspathcache: | ||
Siddharth Agarwal
|
r23097 | _fspathcache[dir] = _makefspathcacheentry(dir) | ||
FUJIWARA Katsunori
|
r15719 | contents = _fspathcache[dir] | ||
Paul Moore
|
r6676 | |||
Siddharth Agarwal
|
r23097 | found = contents.get(part) | ||
FUJIWARA Katsunori
|
r15709 | if not found: | ||
FUJIWARA Katsunori
|
r15720 | # retry "once per directory" per "dirstate.walk" which | ||
# may take place for each patches of "hg qpush", for example | ||||
Siddharth Agarwal
|
r23097 | _fspathcache[dir] = contents = _makefspathcacheentry(dir) | ||
found = contents.get(part) | ||||
FUJIWARA Katsunori
|
r15709 | |||
result.append(found or part) | ||||
FUJIWARA Katsunori
|
r15669 | dir = os.path.join(dir, part) | ||
Paul Moore
|
r6676 | |||
Augie Fackler
|
r43347 | return b''.join(result) | ||
Paul Moore
|
r6676 | |||
Augie Fackler
|
r43346 | |||
r52180 | def checknlink(testfile: bytes) -> bool: | |||
Adrian Buehlmann
|
r12938 | '''check whether hardlink count reporting works properly''' | ||
Adrian Buehlmann
|
r13204 | # testfile may be open, so we need a separate file for checking to | ||
# work around issue2543 (or testfile may get lost on Samba shares) | ||||
Jun Wu
|
r34086 | f1, f2, fp = None, None, None | ||
Adrian Buehlmann
|
r12938 | try: | ||
Augie Fackler
|
r43346 | fd, f1 = pycompat.mkstemp( | ||
Augie Fackler
|
r43347 | prefix=b'.%s-' % os.path.basename(testfile), | ||
suffix=b'1~', | ||||
Augie Fackler
|
r43346 | dir=os.path.dirname(testfile), | ||
) | ||||
Jun Wu
|
r34081 | os.close(fd) | ||
Augie Fackler
|
r43347 | f2 = b'%s2~' % f1[:-2] | ||
Jun Wu
|
r34081 | |||
Matt Mackall
|
r25088 | oslink(f1, f2) | ||
Adrian Buehlmann
|
r12938 | # nlinks() may behave differently for files on Windows shares if | ||
# the file is open. | ||||
Jun Wu
|
r34086 | fp = posixfile(f2) | ||
Adrian Buehlmann
|
r13204 | return nlinks(f2) > 1 | ||
Matt Mackall
|
r25088 | except OSError: | ||
return False | ||||
Adrian Buehlmann
|
r12938 | finally: | ||
Jun Wu
|
r34086 | if fp is not None: | ||
fp.close() | ||||
Adrian Buehlmann
|
r13204 | for f in (f1, f2): | ||
try: | ||||
Jun Wu
|
r34081 | if f is not None: | ||
os.unlink(f) | ||||
Adrian Buehlmann
|
r13204 | except OSError: | ||
pass | ||||
Adrian Buehlmann
|
r12938 | |||
Augie Fackler
|
r43346 | |||
r52180 | def endswithsep(path: bytes) -> bool: | |||
Shun-ichi GOTO
|
r5843 | '''Check path ends with os.sep or os.altsep.''' | ||
Matt Harbison
|
r47391 | return bool( # help pytype | ||
Augie Fackler
|
r43346 | path.endswith(pycompat.ossep) | ||
or pycompat.osaltsep | ||||
and path.endswith(pycompat.osaltsep) | ||||
) | ||||
Shun-ichi GOTO
|
r5843 | |||
r52180 | def splitpath(path: bytes) -> List[bytes]: | |||
Augie Fackler
|
r46554 | """Split path by os.sep. | ||
Shun-ichi GOTO
|
r5844 | Note that this function does not use os.altsep because this is | ||
an alternative of simple "xxx.split(os.sep)". | ||||
It is recommended to use os.path.normpath() before using this | ||||
Augie Fackler
|
r46554 | function if need.""" | ||
Pulkit Goyal
|
r30613 | return path.split(pycompat.ossep) | ||
Shun-ichi GOTO
|
r5844 | |||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r52693 | def mktempcopy( | ||
name: bytes, | ||||
emptyok: bool = False, | ||||
createmode: Optional[int] = None, | ||||
enforcewritable: bool = False, | ||||
) -> bytes: | ||||
Alexis S. L. Carvalho
|
r4827 | """Create a temporary file with the same contents from name | ||
The permission bits are copied from the original file. | ||||
If the temporary file is going to be truncated immediately, you | ||||
can use emptyok=True as an optimization. | ||||
Returns the name of the temporary file. | ||||
Vadim Gelfer
|
r2176 | """ | ||
Alexis S. L. Carvalho
|
r4827 | d, fn = os.path.split(name) | ||
Augie Fackler
|
r43347 | fd, temp = pycompat.mkstemp(prefix=b'.%s-' % fn, suffix=b'~', dir=d) | ||
Alexis S. L. Carvalho
|
r4827 | os.close(fd) | ||
# Temporary files are created with mode 0600, which is usually not | ||||
# what we want. If the original file already exists, just copy | ||||
# its mode. Otherwise, manually obey umask. | ||||
Boris Feld
|
r41325 | copymode(name, temp, createmode, enforcewritable) | ||
Alexis S. L. Carvalho
|
r4827 | if emptyok: | ||
return temp | ||||
try: | ||||
try: | ||||
Augie Fackler
|
r43347 | ifp = posixfile(name, b"rb") | ||
Gregory Szorc
|
r25660 | except IOError as inst: | ||
Alexis S. L. Carvalho
|
r4827 | if inst.errno == errno.ENOENT: | ||
return temp | ||||
if not getattr(inst, 'filename', None): | ||||
inst.filename = name | ||||
raise | ||||
Augie Fackler
|
r43347 | ofp = posixfile(temp, b"wb") | ||
Alexis S. L. Carvalho
|
r4827 | for chunk in filechunkiter(ifp): | ||
ofp.write(chunk) | ||||
ifp.close() | ||||
ofp.close() | ||||
Augie Fackler
|
r43346 | except: # re-raises | ||
Alex Gaynor
|
r34436 | try: | ||
os.unlink(temp) | ||||
except OSError: | ||||
pass | ||||
Alexis S. L. Carvalho
|
r4827 | raise | ||
return temp | ||||
Vadim Gelfer
|
r2176 | |||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r49801 | class filestat: | ||
FUJIWARA Katsunori
|
r29200 | """help to exactly detect change of a file | ||
'stat' attribute is result of 'os.stat()' if specified 'path' | ||||
exists. Otherwise, it is None. This can avoid preparative | ||||
'exists()' examination on client side of this class. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r52780 | def __init__(self, stat: Optional[os.stat_result]) -> None: | ||
Siddharth Agarwal
|
r32772 | self.stat = stat | ||
@classmethod | ||||
Matt Harbison
|
r52780 | def frompath(cls: Type[_Tfilestat], path: bytes) -> _Tfilestat: | ||
FUJIWARA Katsunori
|
r29200 | try: | ||
Siddharth Agarwal
|
r32772 | stat = os.stat(path) | ||
Manuel Jacob
|
r50201 | except FileNotFoundError: | ||
Siddharth Agarwal
|
r32772 | stat = None | ||
return cls(stat) | ||||
FUJIWARA Katsunori
|
r29200 | |||
Siddharth Agarwal
|
r32816 | @classmethod | ||
Matt Harbison
|
r52780 | def fromfp(cls: Type[_Tfilestat], fp: BinaryIO) -> _Tfilestat: | ||
Siddharth Agarwal
|
r32816 | stat = os.fstat(fp.fileno()) | ||
return cls(stat) | ||||
FUJIWARA Katsunori
|
r29200 | __hash__ = object.__hash__ | ||
Matt Harbison
|
r52780 | def __eq__(self, old) -> bool: | ||
FUJIWARA Katsunori
|
r29200 | try: | ||
# if ambiguity between stat of new and old file is | ||||
Mads Kiilerich
|
r30332 | # avoided, comparison of size, ctime and mtime is enough | ||
FUJIWARA Katsunori
|
r29200 | # to exactly detect change of a file regardless of platform | ||
Augie Fackler
|
r43346 | return ( | ||
self.stat.st_size == old.stat.st_size | ||||
and self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME] | ||||
and self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME] | ||||
) | ||||
FUJIWARA Katsunori
|
r29200 | except AttributeError: | ||
FUJIWARA Katsunori
|
r32749 | pass | ||
try: | ||||
return self.stat is None and old.stat is None | ||||
except AttributeError: | ||||
FUJIWARA Katsunori
|
r29200 | return False | ||
Matt Harbison
|
r52780 | def isambig(self, old: _Tfilestat) -> bool: | ||
FUJIWARA Katsunori
|
r29200 | """Examine whether new (= self) stat is ambiguous against old one | ||
"S[N]" below means stat of a file at N-th change: | ||||
- S[n-1].ctime < S[n].ctime: can detect change of a file | ||||
- S[n-1].ctime == S[n].ctime | ||||
- S[n-1].ctime < S[n].mtime: means natural advancing (*1) | ||||
- S[n-1].ctime == S[n].mtime: is ambiguous (*2) | ||||
- S[n-1].ctime > S[n].mtime: never occurs naturally (don't care) | ||||
- S[n-1].ctime > S[n].ctime: never occurs naturally (don't care) | ||||
Case (*2) above means that a file was changed twice or more at | ||||
same time in sec (= S[n-1].ctime), and comparison of timestamp | ||||
is ambiguous. | ||||
Base idea to avoid such ambiguity is "advance mtime 1 sec, if | ||||
timestamp is ambiguous". | ||||
But advancing mtime only in case (*2) doesn't work as | ||||
expected, because naturally advanced S[n].mtime in case (*1) | ||||
might be equal to manually advanced S[n-1 or earlier].mtime. | ||||
Therefore, all "S[n-1].ctime == S[n].ctime" cases should be | ||||
treated as ambiguous regardless of mtime, to avoid overlooking | ||||
by confliction between such mtime. | ||||
Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime != | ||||
S[n].mtime", even if size of a file isn't changed. | ||||
""" | ||||
try: | ||||
Augie Fackler
|
r43346 | return self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME] | ||
FUJIWARA Katsunori
|
r29200 | except AttributeError: | ||
return False | ||||
Matt Harbison
|
r52780 | def avoidambig(self, path: bytes, old: _Tfilestat) -> bool: | ||
FUJIWARA Katsunori
|
r30319 | """Change file stat of specified path to avoid ambiguity | ||
'old' should be previous filestat of 'path'. | ||||
This skips avoiding ambiguity, if a process doesn't have | ||||
FUJIWARA Katsunori
|
r32746 | appropriate privileges for 'path'. This returns False in this | ||
case. | ||||
Otherwise, this returns True, as "ambiguity is avoided". | ||||
FUJIWARA Katsunori
|
r30319 | """ | ||
Augie Fackler
|
r43346 | advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF | ||
FUJIWARA Katsunori
|
r30319 | try: | ||
os.utime(path, (advanced, advanced)) | ||||
Manuel Jacob
|
r50203 | except PermissionError: | ||
# utime() on the file created by another user causes EPERM, | ||||
# if a process doesn't have appropriate privileges | ||||
return False | ||||
FUJIWARA Katsunori
|
r32746 | return True | ||
FUJIWARA Katsunori
|
r30319 | |||
Matt Harbison
|
r52780 | def __ne__(self, other) -> bool: | ||
FUJIWARA Katsunori
|
r29298 | return not self == other | ||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r49801 | class atomictempfile: | ||
Augie Fackler
|
r46554 | """writable file object that atomically updates a file | ||
Alexis S. L. Carvalho
|
r4827 | |||
Greg Ward
|
r14008 | All writes will go to a temporary copy of the original file. Call | ||
Greg Ward
|
r15057 | close() when you are done writing, and atomictempfile will rename | ||
the temporary copy to the original name, making the changes | ||||
visible. If the object is destroyed without being closed, all your | ||||
writes are discarded. | ||||
FUJIWARA Katsunori
|
r29367 | |||
checkambig argument of constructor is used with filestat, and is | ||||
useful only if target file is guarded by any lock (e.g. repo.lock | ||||
or repo.wlock). | ||||
Augie Fackler
|
r46554 | """ | ||
Augie Fackler
|
r43346 | |||
Augie Fackler
|
r43347 | def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False): | ||
Augie Fackler
|
r43346 | self.__name = name # permanent name | ||
self._tempname = mktempcopy( | ||||
name, | ||||
Augie Fackler
|
r43347 | emptyok=(b'w' in mode), | ||
Augie Fackler
|
r43346 | createmode=createmode, | ||
Augie Fackler
|
r43347 | enforcewritable=(b'w' in mode), | ||
Augie Fackler
|
r43346 | ) | ||
Boris Feld
|
r41325 | |||
Greg Ward
|
r14007 | self._fp = posixfile(self._tempname, mode) | ||
FUJIWARA Katsunori
|
r29201 | self._checkambig = checkambig | ||
Bryan O'Sullivan
|
r8327 | |||
Greg Ward
|
r14007 | # delegated methods | ||
Martijn Pieters
|
r29393 | self.read = self._fp.read | ||
Greg Ward
|
r14007 | self.write = self._fp.write | ||
Matt Harbison
|
r50467 | self.writelines = self._fp.writelines | ||
Bryan O'Sullivan
|
r17237 | self.seek = self._fp.seek | ||
self.tell = self._fp.tell | ||||
Greg Ward
|
r14007 | self.fileno = self._fp.fileno | ||
Alexis S. L. Carvalho
|
r4827 | |||
Greg Ward
|
r15057 | def close(self): | ||
Benoit Boissinot
|
r8785 | if not self._fp.closed: | ||
Bryan O'Sullivan
|
r8327 | self._fp.close() | ||
FUJIWARA Katsunori
|
r29201 | filename = localpath(self.__name) | ||
Siddharth Agarwal
|
r32772 | oldstat = self._checkambig and filestat.frompath(filename) | ||
FUJIWARA Katsunori
|
r29201 | if oldstat and oldstat.stat: | ||
rename(self._tempname, filename) | ||||
Siddharth Agarwal
|
r32772 | newstat = filestat.frompath(filename) | ||
FUJIWARA Katsunori
|
r29201 | if newstat.isambig(oldstat): | ||
# stat of changed file is ambiguous to original one | ||||
Augie Fackler
|
r43346 | advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF | ||
FUJIWARA Katsunori
|
r29201 | os.utime(filename, (advanced, advanced)) | ||
else: | ||||
rename(self._tempname, filename) | ||||
Alexis S. L. Carvalho
|
r4827 | |||
Greg Ward
|
r15057 | def discard(self): | ||
Benoit Boissinot
|
r8785 | if not self._fp.closed: | ||
Alexis S. L. Carvalho
|
r4827 | try: | ||
Greg Ward
|
r14007 | os.unlink(self._tempname) | ||
except OSError: | ||||
pass | ||||
Bryan O'Sullivan
|
r8327 | self._fp.close() | ||
Alexis S. L. Carvalho
|
r4827 | |||
Dan Villiom Podlaski Christiansen
|
r13098 | def __del__(self): | ||
r51821 | if hasattr(self, '_fp'): # constructor actually did something | |||
Greg Ward
|
r15057 | self.discard() | ||
Dan Villiom Podlaski Christiansen
|
r13098 | |||
Martijn Pieters
|
r29394 | def __enter__(self): | ||
return self | ||||
def __exit__(self, exctype, excvalue, traceback): | ||||
if exctype is not None: | ||||
self.discard() | ||||
else: | ||||
self.close() | ||||
Augie Fackler
|
r43346 | |||
Arseniy Alekseyev
|
r50085 | def tryrmdir(f): | ||
try: | ||||
removedirs(f) | ||||
except OSError as e: | ||||
if e.errno != errno.ENOENT and e.errno != errno.ENOTEMPTY: | ||||
raise | ||||
r52180 | def unlinkpath( | |||
f: bytes, ignoremissing: bool = False, rmdir: bool = True | ||||
) -> None: | ||||
Ryan McElroy
|
r31539 | """unlink and remove the directory if it is empty""" | ||
Ryan McElroy
|
r31541 | if ignoremissing: | ||
tryunlink(f) | ||||
else: | ||||
Ryan McElroy
|
r31539 | unlink(f) | ||
Kyle Lippincott
|
r38512 | if rmdir: | ||
# try removing directories that might now be empty | ||||
try: | ||||
removedirs(os.path.dirname(f)) | ||||
except OSError: | ||||
pass | ||||
Ryan McElroy
|
r31539 | |||
Augie Fackler
|
r43346 | |||
Georges Racinet
|
r52323 | def tryunlink(f: bytes) -> bool: | ||
"""Attempt to remove a file, ignoring FileNotFoundError. | ||||
Returns False in case the file did not exit, True otherwise | ||||
""" | ||||
Ryan McElroy
|
r31540 | try: | ||
unlink(f) | ||||
Georges Racinet
|
r52323 | return True | ||
Manuel Jacob
|
r50201 | except FileNotFoundError: | ||
Georges Racinet
|
r52323 | return False | ||
Ryan McElroy
|
r31540 | |||
Augie Fackler
|
r43346 | |||
r52180 | def makedirs( | |||
name: bytes, mode: Optional[int] = None, notindexed: bool = False | ||||
) -> None: | ||||
Adam Simpkins
|
r29017 | """recursive directory creation with parent mode inheritance | ||
Newly created directories are marked as "not to be indexed by | ||||
the content indexing service", if ``notindexed`` is specified | ||||
for "write" mode access. | ||||
""" | ||||
Alexis S. L. Carvalho
|
r6062 | try: | ||
Angel Ezquerra
|
r18938 | makedir(name, notindexed) | ||
Gregory Szorc
|
r25660 | except OSError as err: | ||
Alexis S. L. Carvalho
|
r6062 | if err.errno == errno.EEXIST: | ||
return | ||||
Adrian Buehlmann
|
r15058 | if err.errno != errno.ENOENT or not name: | ||
raise | ||||
r48422 | parent = os.path.dirname(abspath(name)) | |||
Adrian Buehlmann
|
r15058 | if parent == name: | ||
Alexis S. L. Carvalho
|
r6062 | raise | ||
Angel Ezquerra
|
r18938 | makedirs(parent, mode, notindexed) | ||
Adam Simpkins
|
r29017 | try: | ||
makedir(name, notindexed) | ||||
except OSError as err: | ||||
# Catch EEXIST to handle races | ||||
if err.errno == errno.EEXIST: | ||||
return | ||||
raise | ||||
Bryan O'Sullivan
|
r18678 | if mode is not None: | ||
os.chmod(name, mode) | ||||
Bryan O'Sullivan
|
r18668 | |||
Augie Fackler
|
r43346 | |||
r52180 | def readfile(path: bytes) -> bytes: | |||
Matt Harbison
|
r53257 | with open(path, 'rb') as fp: | ||
Matt Mackall
|
r14100 | return fp.read() | ||
Dan Villiom Podlaski Christiansen
|
r14099 | |||
Augie Fackler
|
r43346 | |||
r52180 | def writefile(path: bytes, text: bytes) -> None: | |||
Matt Harbison
|
r53257 | with open(path, 'wb') as fp: | ||
Dan Villiom Podlaski Christiansen
|
r14167 | fp.write(text) | ||
Augie Fackler
|
r43346 | |||
r52180 | def appendfile(path: bytes, text: bytes) -> None: | |||
Matt Harbison
|
r53257 | with open(path, 'ab') as fp: | ||
Dan Villiom Podlaski Christiansen
|
r14099 | fp.write(text) | ||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r49801 | class chunkbuffer: | ||
Eric Hopper
|
r1199 | """Allow arbitrary sized chunks of data to be efficiently read from an | ||
iterator over chunks of arbitrary size.""" | ||||
Bryan O'Sullivan
|
r1200 | |||
Matt Mackall
|
r5446 | def __init__(self, in_iter): | ||
Martin von Zweigbergk
|
r32123 | """in_iter is the iterator that's iterating over the input chunks.""" | ||
Augie Fackler
|
r43346 | |||
Benoit Boissinot
|
r11670 | def splitbig(chunks): | ||
for chunk in chunks: | ||||
Raphaël Gomès
|
r52596 | if len(chunk) > 2**20: | ||
Benoit Boissinot
|
r11670 | pos = 0 | ||
while pos < len(chunk): | ||||
Raphaël Gomès
|
r52596 | end = pos + 2**18 | ||
Benoit Boissinot
|
r11670 | yield chunk[pos:end] | ||
pos = end | ||||
else: | ||||
yield chunk | ||||
Augie Fackler
|
r43346 | |||
Benoit Boissinot
|
r11670 | self.iter = splitbig(in_iter) | ||
Martin von Zweigbergk
|
r25113 | self._queue = collections.deque() | ||
Gregory Szorc
|
r26480 | self._chunkoffset = 0 | ||
Bryan O'Sullivan
|
r1200 | |||
Pierre-Yves David
|
r21018 | def read(self, l=None): | ||
Bryan O'Sullivan
|
r1200 | """Read L bytes of data from the iterator of chunks of data. | ||
Pierre-Yves David
|
r21018 | Returns less than L bytes if the iterator runs dry. | ||
Mads Kiilerich
|
r23139 | If size parameter is omitted, read everything""" | ||
Gregory Szorc
|
r26478 | if l is None: | ||
Augie Fackler
|
r43347 | return b''.join(self.iter) | ||
Gregory Szorc
|
r26478 | |||
Matt Mackall
|
r11758 | left = l | ||
Matt Mackall
|
r17962 | buf = [] | ||
Bryan O'Sullivan
|
r16873 | queue = self._queue | ||
Gregory Szorc
|
r26478 | while left > 0: | ||
Matt Mackall
|
r11758 | # refill the queue | ||
if not queue: | ||||
Raphaël Gomès
|
r52596 | target = 2**18 | ||
Matt Mackall
|
r11758 | for chunk in self.iter: | ||
queue.append(chunk) | ||||
target -= len(chunk) | ||||
if target <= 0: | ||||
break | ||||
if not queue: | ||||
Eric Hopper
|
r1199 | break | ||
Matt Mackall
|
r11758 | |||
Gregory Szorc
|
r26480 | # The easy way to do this would be to queue.popleft(), modify the | ||
# chunk (if necessary), then queue.appendleft(). However, for cases | ||||
# where we read partial chunk content, this incurs 2 dequeue | ||||
# mutations and creates a new str for the remaining chunk in the | ||||
# queue. Our code below avoids this overhead. | ||||
Gregory Szorc
|
r26479 | chunk = queue[0] | ||
chunkl = len(chunk) | ||||
Gregory Szorc
|
r26480 | offset = self._chunkoffset | ||
Gregory Szorc
|
r26479 | |||
# Use full chunk. | ||||
Gregory Szorc
|
r26480 | if offset == 0 and left >= chunkl: | ||
Gregory Szorc
|
r26479 | left -= chunkl | ||
queue.popleft() | ||||
buf.append(chunk) | ||||
Gregory Szorc
|
r26480 | # self._chunkoffset remains at 0. | ||
continue | ||||
chunkremaining = chunkl - offset | ||||
# Use all of unconsumed part of chunk. | ||||
if left >= chunkremaining: | ||||
left -= chunkremaining | ||||
queue.popleft() | ||||
# offset == 0 is enabled by block above, so this won't merely | ||||
# copy via ``chunk[0:]``. | ||||
buf.append(chunk[offset:]) | ||||
self._chunkoffset = 0 | ||||
Gregory Szorc
|
r26479 | # Partial chunk needed. | ||
else: | ||||
Augie Fackler
|
r43346 | buf.append(chunk[offset : offset + left]) | ||
Gregory Szorc
|
r26480 | self._chunkoffset += left | ||
left -= chunkremaining | ||||
Matt Mackall
|
r11758 | |||
Augie Fackler
|
r43347 | return b''.join(buf) | ||
Matt Mackall
|
r11758 | |||
Augie Fackler
|
r43346 | |||
Mads Kiilerich
|
r30181 | def filechunkiter(f, size=131072, limit=None): | ||
Vadim Gelfer
|
r2462 | """Create a generator that produces the data in the file size | ||
Mads Kiilerich
|
r30181 | (default 131072) bytes at a time, up to optional limit (default is | ||
Vadim Gelfer
|
r2462 | to read all data). Chunks may be less than size bytes if the | ||
chunk is the last chunk in the file, or the file is a socket or | ||||
some other type of file that sometimes reads less data than is | ||||
requested.""" | ||||
assert size >= 0 | ||||
assert limit is None or limit >= 0 | ||||
while True: | ||||
Matt Mackall
|
r10282 | if limit is None: | ||
nbytes = size | ||||
else: | ||||
nbytes = min(limit, size) | ||||
Vadim Gelfer
|
r2462 | s = nbytes and f.read(nbytes) | ||
Matt Mackall
|
r10282 | if not s: | ||
break | ||||
if limit: | ||||
limit -= len(s) | ||||
Eric Hopper
|
r1199 | yield s | ||
Bryan O'Sullivan
|
r1320 | |||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r49801 | class cappedreader: | ||
Gregory Szorc
|
r36382 | """A file object proxy that allows reading up to N bytes. | ||
Given a source file object, instances of this type allow reading up to | ||||
N bytes from that source file object. Attempts to read past the allowed | ||||
limit are treated as EOF. | ||||
It is assumed that I/O is not performed on the original file object | ||||
in addition to I/O that is performed by this instance. If there is, | ||||
state tracking will get out of sync and unexpected results will ensue. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r36382 | def __init__(self, fh, limit): | ||
"""Allow reading up to <limit> bytes from <fh>.""" | ||||
self._fh = fh | ||||
self._left = limit | ||||
def read(self, n=-1): | ||||
if not self._left: | ||||
return b'' | ||||
if n < 0: | ||||
n = self._left | ||||
data = self._fh.read(min(n, self._left)) | ||||
self._left -= len(data) | ||||
assert self._left >= 0 | ||||
return data | ||||
Gregory Szorc
|
r37070 | def readinto(self, b): | ||
res = self.read(len(b)) | ||||
if res is None: | ||||
return None | ||||
Augie Fackler
|
r43346 | b[0 : len(res)] = res | ||
Gregory Szorc
|
r37070 | return len(res) | ||
Augie Fackler
|
r43346 | |||
Bryan O'Sullivan
|
r18735 | def unitcountfn(*unittable): | ||
'''return a function that renders a readable count of some quantity''' | ||||
def go(count): | ||||
for multiplier, divisor, format in unittable: | ||||
Gábor Stefanik
|
r31946 | if abs(count) >= divisor * multiplier: | ||
Bryan O'Sullivan
|
r18735 | return format % (count / float(divisor)) | ||
return unittable[-1][2] % count | ||||
return go | ||||
Augie Fackler
|
r43346 | |||
r52180 | def processlinerange(fromline: int, toline: int) -> Tuple[int, int]: | |||
Denis Laxalde
|
r31662 | """Check that linerange <fromline>:<toline> makes sense and return a | ||
0-based range. | ||||
>>> processlinerange(10, 20) | ||||
(9, 20) | ||||
>>> processlinerange(2, 1) | ||||
Traceback (most recent call last): | ||||
... | ||||
ParseError: line range must be positive | ||||
>>> processlinerange(0, 5) | ||||
Traceback (most recent call last): | ||||
... | ||||
ParseError: fromline must be strictly positive | ||||
""" | ||||
if toline - fromline < 0: | ||||
Augie Fackler
|
r43347 | raise error.ParseError(_(b"line range must be positive")) | ||
Denis Laxalde
|
r31662 | if fromline < 1: | ||
Augie Fackler
|
r43347 | raise error.ParseError(_(b"fromline must be strictly positive")) | ||
Denis Laxalde
|
r31662 | return fromline - 1, toline | ||
Augie Fackler
|
r43346 | |||
Bryan O'Sullivan
|
r18735 | bytecount = unitcountfn( | ||
Augie Fackler
|
r43347 | (100, 1 << 30, _(b'%.0f GB')), | ||
(10, 1 << 30, _(b'%.1f GB')), | ||||
(1, 1 << 30, _(b'%.2f GB')), | ||||
(100, 1 << 20, _(b'%.0f MB')), | ||||
(10, 1 << 20, _(b'%.1f MB')), | ||||
(1, 1 << 20, _(b'%.2f MB')), | ||||
(100, 1 << 10, _(b'%.0f KB')), | ||||
(10, 1 << 10, _(b'%.1f KB')), | ||||
(1, 1 << 10, _(b'%.2f KB')), | ||||
(1, 1, _(b'%.0f bytes')), | ||||
Augie Fackler
|
r43346 | ) | ||
Matt Mackall
|
r16397 | |||
Matt Harbison
|
r52614 | class transformingwriter(typelib.BinaryIO_Proxy): | ||
Yuya Nishihara
|
r36855 | """Writable file wrapper to transform data by function""" | ||
Matt Harbison
|
r52614 | def __init__(self, fp: BinaryIO, encode: Callable[[bytes], bytes]) -> None: | ||
Yuya Nishihara
|
r36855 | self._fp = fp | ||
self._encode = encode | ||||
Matt Harbison
|
r52614 | def close(self) -> None: | ||
Yuya Nishihara
|
r36855 | self._fp.close() | ||
Matt Harbison
|
r52614 | def flush(self) -> None: | ||
Yuya Nishihara
|
r36855 | self._fp.flush() | ||
Matt Harbison
|
r52614 | def write(self, data: bytes) -> int: | ||
Yuya Nishihara
|
r36855 | return self._fp.write(self._encode(data)) | ||
Augie Fackler
|
r43346 | |||
Yuya Nishihara
|
r31776 | # Matches a single EOL which can either be a CRLF where repeated CR | ||
# are removed or a LF. We do not care about old Macintosh files, so a | ||||
# stray CR is an error. | ||||
_eolre = remod.compile(br'\r*\n') | ||||
Augie Fackler
|
r43346 | |||
r52180 | def tolf(s: bytes) -> bytes: | |||
Augie Fackler
|
r43347 | return _eolre.sub(b'\n', s) | ||
Yuya Nishihara
|
r31776 | |||
Augie Fackler
|
r43346 | |||
r52180 | def tocrlf(s: bytes) -> bytes: | |||
Augie Fackler
|
r43347 | return _eolre.sub(b'\r\n', s) | ||
Yuya Nishihara
|
r31776 | |||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r52614 | def _crlfwriter(fp: typelib.BinaryIO_Proxy) -> typelib.BinaryIO_Proxy: | ||
Yuya Nishihara
|
r36855 | return transformingwriter(fp, tocrlf) | ||
Augie Fackler
|
r43346 | |||
Augie Fackler
|
r43347 | if pycompat.oslinesep == b'\r\n': | ||
Yuya Nishihara
|
r31777 | tonativeeol = tocrlf | ||
fromnativeeol = tolf | ||||
Yuya Nishihara
|
r36855 | nativeeolwriter = _crlfwriter | ||
Yuya Nishihara
|
r31777 | else: | ||
tonativeeol = pycompat.identity | ||||
fromnativeeol = pycompat.identity | ||||
Yuya Nishihara
|
r36855 | nativeeolwriter = pycompat.identity | ||
Yuya Nishihara
|
r31777 | |||
Matt Harbison
|
r52614 | if typing.TYPE_CHECKING: | ||
# Replace the various overloads that come along with aliasing other methods | ||||
# with the narrow definition that we care about in the type checking phase | ||||
# only. This ensures that both Windows and POSIX see only the definition | ||||
# that is actually available. | ||||
def tonativeeol(s: bytes) -> bytes: | ||||
raise NotImplementedError | ||||
def fromnativeeol(s: bytes) -> bytes: | ||||
raise NotImplementedError | ||||
def nativeeolwriter(fp: typelib.BinaryIO_Proxy) -> typelib.BinaryIO_Proxy: | ||||
raise NotImplementedError | ||||
Gregory Szorc
|
r49795 | |||
# TODO delete since workaround variant for Python 2 no longer needed. | ||||
def iterfile(fp): | ||||
return fp | ||||
Jun Wu
|
r30395 | |||
Augie Fackler
|
r43346 | |||
r52180 | def iterlines(iterator: Iterable[bytes]) -> Iterator[bytes]: | |||
Alexander Solovyov <piranha at piranha.org.ua>
|
r7879 | for chunk in iterator: | ||
for line in chunk.splitlines(): | ||||
yield line | ||||
Alexander Solovyov
|
r9610 | |||
Augie Fackler
|
r43346 | |||
r52180 | def expandpath(path: bytes) -> bytes: | |||
Alexander Solovyov
|
r9610 | return os.path.expanduser(os.path.expandvars(path)) | ||
Patrick Mezard
|
r10239 | |||
Augie Fackler
|
r43346 | |||
Roman Sokolov
|
r13392 | def interpolate(prefix, mapping, s, fn=None, escape_prefix=False): | ||
Steve Losh
|
r11988 | """Return the result of interpolating items in the mapping into string s. | ||
prefix is a single character string, or a two character string with | ||||
a backslash as the first character if the prefix needs to be escaped in | ||||
a regular expression. | ||||
fn is an optional function that will be applied to the replacement text | ||||
just before replacement. | ||||
Roman Sokolov
|
r13392 | |||
escape_prefix is an optional flag that allows using doubled prefix for | ||||
its escaping. | ||||
Steve Losh
|
r11988 | """ | ||
fn = fn or (lambda s: s) | ||||
Augie Fackler
|
r43347 | patterns = b'|'.join(mapping.keys()) | ||
Roman Sokolov
|
r13392 | if escape_prefix: | ||
Augie Fackler
|
r43347 | patterns += b'|' + prefix | ||
Roman Sokolov
|
r13392 | if len(prefix) > 1: | ||
prefix_char = prefix[1:] | ||||
else: | ||||
prefix_char = prefix | ||||
mapping[prefix_char] = prefix_char | ||||
Pulkit Goyal
|
r35145 | r = remod.compile(br'%s(%s)' % (prefix, patterns)) | ||
Steve Losh
|
r11988 | return r.sub(lambda x: fn(mapping[x.group()[1:]]), s) | ||
Augie Fackler
|
r43346 | |||
Bryan O'Sullivan
|
r18736 | timecount = unitcountfn( | ||
Augie Fackler
|
r43347 | (1, 1e3, _(b'%.0f s')), | ||
(100, 1, _(b'%.1f s')), | ||||
(10, 1, _(b'%.2f s')), | ||||
(1, 1, _(b'%.3f s')), | ||||
(100, 0.001, _(b'%.1f ms')), | ||||
(10, 0.001, _(b'%.2f ms')), | ||||
(1, 0.001, _(b'%.3f ms')), | ||||
(100, 0.000001, _(b'%.1f us')), | ||||
(10, 0.000001, _(b'%.2f us')), | ||||
(1, 0.000001, _(b'%.3f us')), | ||||
(100, 0.000000001, _(b'%.1f ns')), | ||||
(10, 0.000000001, _(b'%.2f ns')), | ||||
(1, 0.000000001, _(b'%.3f ns')), | ||||
Augie Fackler
|
r43346 | ) | ||
Bryan O'Sullivan
|
r18736 | |||
Martijn Pieters
|
r38833 | @attr.s | ||
Gregory Szorc
|
r49801 | class timedcmstats: | ||
Martijn Pieters
|
r38833 | """Stats information produced by the timedcm context manager on entering.""" | ||
# the starting value of the timer as a float (meaning and resulution is | ||||
# platform dependent, see util.timer) | ||||
start = attr.ib(default=attr.Factory(lambda: timer())) | ||||
# the number of seconds as a floating point value; starts at 0, updated when | ||||
# the context is exited. | ||||
elapsed = attr.ib(default=0) | ||||
# the number of nested timedcm context managers. | ||||
level = attr.ib(default=1) | ||||
Martijn Pieters
|
r38848 | def __bytes__(self): | ||
Augie Fackler
|
r43347 | return timecount(self.elapsed) if self.elapsed else b'<unknown>' | ||
Martijn Pieters
|
r38833 | |||
Martijn Pieters
|
r38848 | __str__ = encoding.strmethod(__bytes__) | ||
Augie Fackler
|
r43346 | |||
Martijn Pieters
|
r38833 | @contextlib.contextmanager | ||
Augie Fackler
|
r39295 | def timedcm(whencefmt, *whenceargs): | ||
Martijn Pieters
|
r38833 | """A context manager that produces timing information for a given context. | ||
On entering a timedcmstats instance is produced. | ||||
This context manager is reentrant. | ||||
""" | ||||
# track nested context managers | ||||
timedcm._nested += 1 | ||||
timing_stats = timedcmstats(level=timedcm._nested) | ||||
try: | ||||
Augie Fackler
|
r39293 | with tracing.log(whencefmt, *whenceargs): | ||
yield timing_stats | ||||
Martijn Pieters
|
r38833 | finally: | ||
timing_stats.elapsed = timer() - timing_stats.start | ||||
timedcm._nested -= 1 | ||||
Augie Fackler
|
r43346 | |||
Martijn Pieters
|
r38833 | timedcm._nested = 0 | ||
Bryan O'Sullivan
|
r18736 | |||
Augie Fackler
|
r43346 | |||
Bryan O'Sullivan
|
r18736 | def timed(func): | ||
Augie Fackler
|
r46554 | """Report the execution time of a function call to stderr. | ||
Bryan O'Sullivan
|
r18736 | |||
During development, use as a decorator when you need to measure | ||||
the cost of a function, e.g. as follows: | ||||
@util.timed | ||||
def foo(a, b, c): | ||||
pass | ||||
Augie Fackler
|
r46554 | """ | ||
Bryan O'Sullivan
|
r18736 | |||
def wrapper(*args, **kwargs): | ||||
Augie Fackler
|
r39294 | with timedcm(pycompat.bytestr(func.__name__)) as time_stats: | ||
Martijn Pieters
|
r38833 | result = func(*args, **kwargs) | ||
stderr = procutil.stderr | ||||
Augie Fackler
|
r43346 | stderr.write( | ||
Augie Fackler
|
r43347 | b'%s%s: %s\n' | ||
Augie Fackler
|
r43346 | % ( | ||
Augie Fackler
|
r43347 | b' ' * time_stats.level * 2, | ||
Augie Fackler
|
r43346 | pycompat.bytestr(func.__name__), | ||
time_stats, | ||||
) | ||||
) | ||||
Martijn Pieters
|
r38833 | return result | ||
Augie Fackler
|
r43346 | |||
Bryan O'Sullivan
|
r18736 | return wrapper | ||
Bryan O'Sullivan
|
r19194 | |||
Augie Fackler
|
r43346 | |||
_sizeunits = ( | ||||
Raphaël Gomès
|
r52596 | (b'm', 2**20), | ||
(b'k', 2**10), | ||||
(b'g', 2**30), | ||||
(b'kb', 2**10), | ||||
(b'mb', 2**20), | ||||
(b'gb', 2**30), | ||||
Augie Fackler
|
r43347 | (b'b', 1), | ||
Augie Fackler
|
r43346 | ) | ||
Bryan O'Sullivan
|
r19194 | |||
r52180 | def sizetoint(s: bytes) -> int: | |||
Augie Fackler
|
r46554 | """Convert a space specifier to a byte count. | ||
Bryan O'Sullivan
|
r19194 | |||
Yuya Nishihara
|
r34133 | >>> sizetoint(b'30') | ||
Bryan O'Sullivan
|
r19194 | 30 | ||
Yuya Nishihara
|
r34133 | >>> sizetoint(b'2.2kb') | ||
Bryan O'Sullivan
|
r19194 | 2252 | ||
Yuya Nishihara
|
r34133 | >>> sizetoint(b'6M') | ||
Bryan O'Sullivan
|
r19194 | 6291456 | ||
Augie Fackler
|
r46554 | """ | ||
Bryan O'Sullivan
|
r19194 | t = s.strip().lower() | ||
try: | ||||
for k, u in _sizeunits: | ||||
if t.endswith(k): | ||||
Augie Fackler
|
r43346 | return int(float(t[: -len(k)]) * u) | ||
Bryan O'Sullivan
|
r19194 | return int(t) | ||
except ValueError: | ||||
Augie Fackler
|
r43347 | raise error.ParseError(_(b"couldn't parse size: %s") % s) | ||
Bryan O'Sullivan
|
r19211 | |||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r49801 | class hooks: | ||
Augie Fackler
|
r46554 | """A collection of hook functions that can be used to extend a | ||
timeless@mozdev.org
|
r26098 | function's behavior. Hooks are called in lexicographic order, | ||
Augie Fackler
|
r46554 | based on the names of their sources.""" | ||
Bryan O'Sullivan
|
r19211 | |||
def __init__(self): | ||||
self._hooks = [] | ||||
def add(self, source, hook): | ||||
self._hooks.append((source, hook)) | ||||
def __call__(self, *args): | ||||
self._hooks.sort(key=lambda x: x[0]) | ||||
FUJIWARA Katsunori
|
r21046 | results = [] | ||
Bryan O'Sullivan
|
r19211 | for source, hook in self._hooks: | ||
FUJIWARA Katsunori
|
r21046 | results.append(hook(*args)) | ||
return results | ||||
Mads Kiilerich
|
r20244 | |||
Augie Fackler
|
r43346 | |||
Augie Fackler
|
r43347 | def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0): | ||
Augie Fackler
|
r46554 | """Yields lines for a nicely formatted stacktrace. | ||
Mads Kiilerich
|
r31315 | Skips the 'skip' last entries, then return the last 'depth' entries. | ||
timeless
|
r28497 | Each file+linenumber is formatted according to fileline. | ||
Each line is formatted according to line. | ||||
If line is None, it yields: | ||||
length of longest filepath+line number, | ||||
filepath+linenumber, | ||||
function | ||||
Not be used in production code but very convenient while developing. | ||||
Augie Fackler
|
r46554 | """ | ||
Augie Fackler
|
r43346 | entries = [ | ||
(fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func)) | ||||
for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1] | ||||
][-depth:] | ||||
timeless
|
r28497 | if entries: | ||
fnmax = max(len(entry[0]) for entry in entries) | ||||
for fnln, func in entries: | ||||
if line is None: | ||||
yield (fnmax, fnln, func) | ||||
else: | ||||
yield line % (fnmax, fnln, func) | ||||
Augie Fackler
|
r43346 | |||
def debugstacktrace( | ||||
Augie Fackler
|
r43347 | msg=b'stacktrace', | ||
skip=0, | ||||
f=procutil.stderr, | ||||
otherf=procutil.stdout, | ||||
depth=0, | ||||
r44193 | prefix=b'', | |||
Augie Fackler
|
r43346 | ): | ||
Augie Fackler
|
r46554 | """Writes a message to f (stderr) with a nicely formatted stacktrace. | ||
Mads Kiilerich
|
r31315 | Skips the 'skip' entries closest to the call, then show 'depth' entries. | ||
By default it will flush stdout first. | ||||
timeless
|
r28496 | It can be used everywhere and intentionally does not require an ui object. | ||
Mads Kiilerich
|
r20244 | Not be used in production code but very convenient while developing. | ||
Augie Fackler
|
r46554 | """ | ||
Mads Kiilerich
|
r20542 | if otherf: | ||
otherf.flush() | ||||
r44193 | f.write(b'%s%s at:\n' % (prefix, msg.rstrip())) | |||
Mads Kiilerich
|
r31315 | for line in getstackframes(skip + 1, depth=depth): | ||
r44193 | f.write(prefix + line) | |||
Mads Kiilerich
|
r20542 | f.flush() | ||
Mads Kiilerich
|
r20244 | |||
Augie Fackler
|
r43346 | |||
Mads Kiilerich
|
r20244 | # convenient shortcut | ||
dst = debugstacktrace | ||||
Mark Thomas
|
r34555 | |||
Augie Fackler
|
r43346 | |||
Mark Thomas
|
r34555 | def safename(f, tag, ctx, others=None): | ||
""" | ||||
Generate a name that it is safe to rename f to in the given context. | ||||
f: filename to rename | ||||
tag: a string tag that will be included in the new name | ||||
ctx: a context, in which the new name must not exist | ||||
others: a set of other filenames that the new name must not be in | ||||
Returns a file name of the form oldname~tag[~number] which does not exist | ||||
in the provided context and is not in the set of other names. | ||||
""" | ||||
if others is None: | ||||
others = set() | ||||
Augie Fackler
|
r43347 | fn = b'%s~%s' % (f, tag) | ||
Mark Thomas
|
r34555 | if fn not in ctx and fn not in others: | ||
return fn | ||||
for n in itertools.count(1): | ||||
Augie Fackler
|
r43347 | fn = b'%s~%s~%s' % (f, tag, n) | ||
Mark Thomas
|
r34555 | if fn not in ctx and fn not in others: | ||
return fn | ||||
Boris Feld
|
r35772 | |||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r35772 | def readexactly(stream, n): | ||
'''read n bytes from stream.read and abort if less was available''' | ||||
s = stream.read(n) | ||||
if len(s) < n: | ||||
Augie Fackler
|
r43346 | raise error.Abort( | ||
Martin von Zweigbergk
|
r43387 | _(b"stream ended unexpectedly (got %d bytes, expected %d)") | ||
Augie Fackler
|
r43346 | % (len(s), n) | ||
) | ||||
Boris Feld
|
r35772 | return s | ||
Gregory Szorc
|
r35773 | |||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r35773 | def uvarintencode(value): | ||
"""Encode an unsigned integer value to a varint. | ||||
A varint is a variable length integer of 1 or more bytes. Each byte | ||||
except the last has the most significant bit set. The lower 7 bits of | ||||
each byte store the 2's complement representation, least significant group | ||||
first. | ||||
>>> uvarintencode(0) | ||||
'\\x00' | ||||
>>> uvarintencode(1) | ||||
'\\x01' | ||||
>>> uvarintencode(127) | ||||
'\\x7f' | ||||
>>> uvarintencode(1337) | ||||
'\\xb9\\n' | ||||
>>> uvarintencode(65536) | ||||
'\\x80\\x80\\x04' | ||||
>>> uvarintencode(-1) | ||||
Traceback (most recent call last): | ||||
... | ||||
ProgrammingError: negative value for uvarint: -1 | ||||
""" | ||||
if value < 0: | ||||
Augie Fackler
|
r43347 | raise error.ProgrammingError(b'negative value for uvarint: %d' % value) | ||
Augie Fackler
|
r43346 | bits = value & 0x7F | ||
Gregory Szorc
|
r35773 | value >>= 7 | ||
bytes = [] | ||||
while value: | ||||
bytes.append(pycompat.bytechr(0x80 | bits)) | ||||
Augie Fackler
|
r43346 | bits = value & 0x7F | ||
Gregory Szorc
|
r35773 | value >>= 7 | ||
bytes.append(pycompat.bytechr(bits)) | ||||
Augie Fackler
|
r43347 | return b''.join(bytes) | ||
Gregory Szorc
|
r35773 | |||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r35773 | def uvarintdecodestream(fh): | ||
"""Decode an unsigned variable length integer from a stream. | ||||
The passed argument is anything that has a ``.read(N)`` method. | ||||
Matt Harbison
|
r50708 | >>> from io import BytesIO | ||
Gregory Szorc
|
r35773 | >>> uvarintdecodestream(BytesIO(b'\\x00')) | ||
0 | ||||
>>> uvarintdecodestream(BytesIO(b'\\x01')) | ||||
1 | ||||
>>> uvarintdecodestream(BytesIO(b'\\x7f')) | ||||
127 | ||||
>>> uvarintdecodestream(BytesIO(b'\\xb9\\n')) | ||||
1337 | ||||
>>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04')) | ||||
65536 | ||||
>>> uvarintdecodestream(BytesIO(b'\\x80')) | ||||
Traceback (most recent call last): | ||||
... | ||||
Abort: stream ended unexpectedly (got 0 bytes, expected 1) | ||||
""" | ||||
result = 0 | ||||
shift = 0 | ||||
while True: | ||||
byte = ord(readexactly(fh, 1)) | ||||
Augie Fackler
|
r43346 | result |= (byte & 0x7F) << shift | ||
Gregory Szorc
|
r35773 | if not (byte & 0x80): | ||
return result | ||||
shift += 7 | ||||
Manuel Jacob
|
r45550 | |||
# Passing the '' locale means that the locale should be set according to the | ||||
# user settings (environment variables). | ||||
# Python sometimes avoids setting the global locale settings. When interfacing | ||||
# with C code (e.g. the curses module or the Subversion bindings), the global | ||||
# locale settings must be initialized correctly. Python 2 does not initialize | ||||
# the global locale settings on interpreter startup. Python 3 sometimes | ||||
# initializes LC_CTYPE, but not consistently at least on Windows. Therefore we | ||||
# explicitly initialize it to get consistent behavior if it's not already | ||||
# initialized. Since CPython commit 177d921c8c03d30daa32994362023f777624b10d, | ||||
# LC_CTYPE is always initialized. If we require Python 3.8+, we should re-check | ||||
# if we can remove this code. | ||||
@contextlib.contextmanager | ||||
def with_lc_ctype(): | ||||
oldloc = locale.setlocale(locale.LC_CTYPE, None) | ||||
if oldloc == 'C': | ||||
try: | ||||
try: | ||||
locale.setlocale(locale.LC_CTYPE, '') | ||||
except locale.Error: | ||||
# The likely case is that the locale from the environment | ||||
# variables is unknown. | ||||
pass | ||||
yield | ||||
finally: | ||||
locale.setlocale(locale.LC_CTYPE, oldloc) | ||||
else: | ||||
yield | ||||
Joerg Sonnenberger
|
r45621 | |||
r52180 | def _estimatememory() -> Optional[int]: | |||
Joerg Sonnenberger
|
r45621 | """Provide an estimate for the available system memory in Bytes. | ||
If no estimate can be provided on the platform, returns None. | ||||
""" | ||||
if pycompat.sysplatform.startswith(b'win'): | ||||
# On Windows, use the GlobalMemoryStatusEx kernel function directly. | ||||
Matt Harbison
|
r52773 | # noinspection PyPep8Naming | ||
Joerg Sonnenberger
|
r45621 | from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG | ||
Matt Harbison
|
r47391 | from ctypes.wintypes import ( # pytype: disable=import-error | ||
Structure, | ||||
byref, | ||||
sizeof, | ||||
windll, | ||||
) | ||||
Joerg Sonnenberger
|
r45621 | |||
class MEMORYSTATUSEX(Structure): | ||||
_fields_ = [ | ||||
('dwLength', DWORD), | ||||
('dwMemoryLoad', DWORD), | ||||
('ullTotalPhys', DWORDLONG), | ||||
('ullAvailPhys', DWORDLONG), | ||||
('ullTotalPageFile', DWORDLONG), | ||||
('ullAvailPageFile', DWORDLONG), | ||||
('ullTotalVirtual', DWORDLONG), | ||||
('ullAvailVirtual', DWORDLONG), | ||||
('ullExtendedVirtual', DWORDLONG), | ||||
] | ||||
x = MEMORYSTATUSEX() | ||||
x.dwLength = sizeof(x) | ||||
windll.kernel32.GlobalMemoryStatusEx(byref(x)) | ||||
return x.ullAvailPhys | ||||
# On newer Unix-like systems and Mac OSX, the sysconf interface | ||||
# can be used. _SC_PAGE_SIZE is part of POSIX; _SC_PHYS_PAGES | ||||
# seems to be implemented on most systems. | ||||
try: | ||||
pagesize = os.sysconf(os.sysconf_names['SC_PAGE_SIZE']) | ||||
pages = os.sysconf(os.sysconf_names['SC_PHYS_PAGES']) | ||||
return pagesize * pages | ||||
except OSError: # sysconf can fail | ||||
pass | ||||
except KeyError: # unknown parameter | ||||
pass | ||||