dirstate.py
1861 lines
| 65.6 KiB
| text/x-python
|
PythonLexer
/ mercurial / dirstate.py
Martin Geisler
|
r8226 | # dirstate.py - working directory tracking for mercurial | ||
# | ||||
Raphaël Gomès
|
r47575 | # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com> | ||
Martin Geisler
|
r8226 | # | ||
# This software may be used and distributed according to the terms of the | ||||
Matt Mackall
|
r10263 | # GNU General Public License version 2 or any later version. | ||
mpm@selenic.com
|
r1089 | |||
Matt Harbison
|
r52756 | from __future__ import annotations | ||
Gregory Szorc
|
r27503 | |||
Laurent Charignon
|
r27670 | import collections | ||
Augie Fackler
|
r32346 | import contextlib | ||
Gregory Szorc
|
r27503 | import os | ||
import stat | ||||
r49533 | import uuid | |||
Gregory Szorc
|
r27503 | |||
Matt Harbison
|
r52822 | from typing import ( | ||
Any, | ||||
Dict, | ||||
Iterable, | ||||
Iterator, | ||||
List, | ||||
Optional, | ||||
Tuple, | ||||
) | ||||
Gregory Szorc
|
r27503 | from .i18n import _ | ||
Augie Fackler
|
r43533 | |||
from hgdemandimport import tracing | ||||
Gregory Szorc
|
r27503 | from . import ( | ||
r48295 | dirstatemap, | |||
Gregory Szorc
|
r27503 | encoding, | ||
error, | ||||
match as matchmod, | ||||
r49533 | node, | |||
Gregory Szorc
|
r27503 | pathutil, | ||
Yuya Nishihara
|
r32372 | policy, | ||
Pulkit Goyal
|
r30304 | pycompat, | ||
Gregory Szorc
|
r27503 | scmutil, | ||
r51151 | txnutil, | |||
Gregory Szorc
|
r27503 | util, | ||
) | ||||
mpm@selenic.com
|
r1089 | |||
Simon Sapin
|
r49079 | from .dirstateutils import ( | ||
timestamp, | ||||
) | ||||
Augie Fackler
|
r43197 | from .interfaces import ( | ||
dirstate as intdirstate, | ||||
) | ||||
Augie Fackler
|
r43906 | parsers = policy.importmod('parsers') | ||
rustmod = policy.importrust('dirstate') | ||||
Yuya Nishihara
|
r32372 | |||
r49049 | HAS_FAST_DIRSTATE_V2 = rustmod is not None | |||
Matt Mackall
|
r8261 | propertycache = util.propertycache | ||
Idan Kamara
|
r16201 | filecache = scmutil.filecache | ||
r48310 | _rangemask = dirstatemap.rangemask | |||
Idan Kamara
|
r16201 | |||
Simon Sapin
|
r48858 | DirstateItem = dirstatemap.DirstateItem | ||
Siddharth Agarwal
|
r21808 | |||
r48276 | ||||
Idan Kamara
|
r16201 | class repocache(filecache): | ||
"""filecache for files in .hg/""" | ||||
Augie Fackler
|
r43346 | |||
Idan Kamara
|
r16201 | def join(self, obj, fname): | ||
return obj._opener.join(fname) | ||||
Matt Mackall
|
r4610 | |||
Augie Fackler
|
r43346 | |||
Idan Kamara
|
r16202 | class rootcache(filecache): | ||
"""filecache for files in the repository root""" | ||||
Augie Fackler
|
r43346 | |||
Idan Kamara
|
r16202 | def join(self, obj, fname): | ||
return obj._join(fname) | ||||
Augie Fackler
|
r43346 | |||
r51013 | def check_invalidated(func): | |||
r51051 | """check that the func is called with a non-invalidated dirstate | |||
r51013 | ||||
The dirstate is in an "invalidated state" after an error occured during its | ||||
modification and remains so until we exited the top level scope that framed | ||||
such change. | ||||
""" | ||||
def wrap(self, *args, **kwargs): | ||||
if self._invalidated_context: | ||||
msg = 'calling `%s` after the dirstate was invalidated' | ||||
msg %= func.__name__ | ||||
raise error.ProgrammingError(msg) | ||||
return func(self, *args, **kwargs) | ||||
return wrap | ||||
r50857 | def requires_changing_parents(func): | |||
r48392 | def wrap(self, *args, **kwargs): | |||
r50917 | if not self.is_changing_parents: | |||
r50855 | msg = 'calling `%s` outside of a changing_parents context' | |||
r48392 | msg %= func.__name__ | |||
raise error.ProgrammingError(msg) | ||||
return func(self, *args, **kwargs) | ||||
r51013 | return check_invalidated(wrap) | |||
r48392 | ||||
r50955 | def requires_changing_files(func): | |||
def wrap(self, *args, **kwargs): | ||||
if not self.is_changing_files: | ||||
msg = 'calling `%s` outside of a `changing_files`' | ||||
r48392 | msg %= func.__name__ | |||
raise error.ProgrammingError(msg) | ||||
return func(self, *args, **kwargs) | ||||
r51013 | return check_invalidated(wrap) | |||
r48392 | ||||
r51003 | def requires_changing_any(func): | |||
def wrap(self, *args, **kwargs): | ||||
if not self.is_changing_any: | ||||
msg = 'calling `%s` outside of a changing context' | ||||
msg %= func.__name__ | ||||
raise error.ProgrammingError(msg) | ||||
return func(self, *args, **kwargs) | ||||
r51013 | return check_invalidated(wrap) | |||
r51003 | ||||
r51047 | def requires_changing_files_or_status(func): | |||
r48393 | def wrap(self, *args, **kwargs): | |||
r51047 | if not (self.is_changing_files or self._running_status > 0): | |||
msg = ( | ||||
'calling `%s` outside of a changing_files ' | ||||
'or running_status context' | ||||
) | ||||
r48393 | msg %= func.__name__ | |||
raise error.ProgrammingError(msg) | ||||
return func(self, *args, **kwargs) | ||||
r51013 | return check_invalidated(wrap) | |||
r48393 | ||||
r50919 | CHANGE_TYPE_PARENTS = "parents" | |||
r50921 | CHANGE_TYPE_FILES = "files" | |||
r50919 | ||||
Matt Harbison
|
r52818 | class dirstate(intdirstate.idirstate): | ||
r51052 | # used by largefile to avoid overwritting transaction callback | |||
r51018 | _tr_key_suffix = b'' | |||
Joerg Sonnenberger
|
r47538 | def __init__( | ||
Simon Sapin
|
r48055 | self, | ||
opener, | ||||
ui, | ||||
root, | ||||
validate, | ||||
sparsematchfn, | ||||
nodeconstants, | ||||
use_dirstate_v2, | ||||
r49644 | use_tracked_hint=False, | |||
Joerg Sonnenberger
|
r47538 | ): | ||
Augie Fackler
|
r46554 | """Create a new dirstate object. | ||
Martin Geisler
|
r10145 | |||
opener is an open()-like callable that can be used to open the | ||||
dirstate file; root is the root of the directory tracked by | ||||
the dirstate. | ||||
Augie Fackler
|
r46554 | """ | ||
Simon Sapin
|
r48055 | self._use_dirstate_v2 = use_dirstate_v2 | ||
r49644 | self._use_tracked_hint = use_tracked_hint | |||
Joerg Sonnenberger
|
r47538 | self._nodeconstants = nodeconstants | ||
Matt Mackall
|
r4614 | self._opener = opener | ||
Matt Mackall
|
r13032 | self._validate = validate | ||
Matt Mackall
|
r4614 | self._root = root | ||
r50250 | # Either build a sparse-matcher or None if sparse is disabled | |||
Gregory Szorc
|
r33373 | self._sparsematchfn = sparsematchfn | ||
Yuya Nishihara
|
r24198 | # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is | ||
# UNC path pointing to root share (issue4557) | ||||
FUJIWARA Katsunori
|
r24833 | self._rootdir = pathutil.normasprefix(root) | ||
r49533 | # True is any internal state may be different | |||
Matt Mackall
|
r4903 | self._dirty = False | ||
r49533 | # True if the set of tracked file may be different | |||
self._dirty_tracked_set = False | ||||
Matt Mackall
|
r4614 | self._ui = ui | ||
Idan Kamara
|
r16200 | self._filecache = {} | ||
r50855 | # nesting level of `changing_parents` context | |||
r50916 | self._changing_level = 0 | |||
r50919 | # the change currently underway | |||
self._change_type = None | ||||
r51040 | # number of open _running_status context | |||
self._running_status = 0 | ||||
r50852 | # True if the current dirstate changing operations have been | |||
# invalidated (used to make sure all nested contexts have been exited) | ||||
self._invalidated_context = False | ||||
r51017 | self._attached_to_a_transaction = False | |||
Augie Fackler
|
r43347 | self._filename = b'dirstate' | ||
r49644 | self._filename_th = b'dirstate-tracked-hint' | |||
Augie Fackler
|
r43347 | self._pendingfilename = b'%s.pending' % self._filename | ||
Mateusz Kwapich
|
r29772 | self._plchangecallbacks = {} | ||
self._origpl = None | ||||
r48295 | self._mapcls = dirstatemap.dirstatemap | |||
Martin von Zweigbergk
|
r41823 | # Access and cache cwd early, so we don't access it for the first time | ||
# after a working-copy update caused it to not exist (accessing it then | ||||
# raises an exception). | ||||
self._cwd | ||||
Durham Goode
|
r22404 | |||
r51023 | def refresh(self): | |||
r51225 | # XXX if this happens, you likely did not enter the `changing_xxx` | |||
# using `repo.dirstate`, so a later `repo.dirstate` accesss might call | ||||
# `refresh`. | ||||
if self.is_changing_any: | ||||
msg = "refreshing the dirstate in the middle of a change" | ||||
raise error.ProgrammingError(msg) | ||||
r51023 | if '_branch' in vars(self): | |||
del self._branch | ||||
if '_map' in vars(self) and self._map.may_need_refresh(): | ||||
self.invalidate() | ||||
r45359 | def prefetch_parents(self): | |||
"""make sure the parents are loaded | ||||
Used to avoid a race condition. | ||||
""" | ||||
self._pl | ||||
Augie Fackler
|
r32346 | @contextlib.contextmanager | ||
r51014 | @check_invalidated | |||
r51027 | def running_status(self, repo): | |||
"""Wrap a status operation | ||||
r51039 | This context is not mutally exclusive with the `changing_*` context. It | |||
also do not warrant for the `wlock` to be taken. | ||||
r51042 | If the wlock is taken, this context will behave in a simple way, and | |||
ensure the data are scheduled for write when leaving the top level | ||||
context. | ||||
r51039 | ||||
If the lock is not taken, it will only warrant that the data are either | ||||
committed (written) and rolled back (invalidated) when exiting the top | ||||
level context. The write/invalidate action must be performed by the | ||||
wrapped code. | ||||
The expected logic is: | ||||
A: read the dirstate | ||||
B: run status | ||||
This might make the dirstate dirty by updating cache, | ||||
especially in Rust. | ||||
C: do more "post status fixup if relevant | ||||
D: try to take the w-lock (this will invalidate the changes if they were raced) | ||||
E0: if dirstate changed on disk → discard change (done by dirstate internal) | ||||
E1: elif lock was acquired → write the changes | ||||
E2: else → discard the changes | ||||
r51027 | """ | |||
r51042 | has_lock = repo.currentwlock() is not None | |||
r51041 | is_changing = self.is_changing_any | |||
r51042 | tr = repo.currenttransaction() | |||
has_tr = tr is not None | ||||
r51041 | nested = bool(self._running_status) | |||
first_and_alone = not (is_changing or has_tr or nested) | ||||
# enforce no change happened outside of a proper context. | ||||
if first_and_alone and self._dirty: | ||||
has_tr = repo.currenttransaction() is not None | ||||
if not has_tr and self._changing_level == 0 and self._dirty: | ||||
msg = "entering a status context, but dirstate is already dirty" | ||||
raise error.ProgrammingError(msg) | ||||
r51042 | should_write = has_lock and not (nested or is_changing) | |||
r51040 | self._running_status += 1 | |||
try: | ||||
yield | ||||
except Exception: | ||||
self.invalidate() | ||||
raise | ||||
finally: | ||||
self._running_status -= 1 | ||||
if self._invalidated_context: | ||||
r51042 | should_write = False | |||
r51040 | self.invalidate() | |||
r51027 | ||||
r51042 | if should_write: | |||
assert repo.currenttransaction() is tr | ||||
self.write(tr) | ||||
r51043 | elif not has_lock: | |||
if self._dirty: | ||||
msg = b'dirstate dirty while exiting an isolated status context' | ||||
repo.ui.develwarn(msg) | ||||
self.invalidate() | ||||
r51042 | ||||
r51027 | @contextlib.contextmanager | |||
@check_invalidated | ||||
r50919 | def _changing(self, repo, change_type): | |||
r50860 | if repo.currentwlock() is None: | |||
r50919 | msg = b"trying to change the dirstate without holding the wlock" | |||
r50860 | raise error.ProgrammingError(msg) | |||
r50919 | ||||
r50973 | has_tr = repo.currenttransaction() is not None | |||
r51012 | if not has_tr and self._changing_level == 0 and self._dirty: | |||
r51050 | msg = b"entering a changing context, but dirstate is already dirty" | |||
repo.ui.develwarn(msg) | ||||
Augie Fackler
|
r32346 | |||
r51015 | assert self._changing_level >= 0 | |||
r50919 | # different type of change are mutually exclusive | |||
if self._change_type is None: | ||||
assert self._changing_level == 0 | ||||
self._change_type = change_type | ||||
elif self._change_type != change_type: | ||||
msg = ( | ||||
'trying to open "%s" dirstate-changing context while a "%s" is' | ||||
' already open' | ||||
) | ||||
msg %= (change_type, self._change_type) | ||||
raise error.ProgrammingError(msg) | ||||
r51015 | should_write = False | |||
r50916 | self._changing_level += 1 | |||
r50852 | try: | |||
yield | ||||
r51000 | except: # re-raises | |||
r51015 | self.invalidate() # this will set `_invalidated_context` | |||
r50852 | raise | |||
finally: | ||||
r51015 | assert self._changing_level > 0 | |||
self._changing_level -= 1 | ||||
# If the dirstate is being invalidated, call invalidate again. | ||||
# This will throw away anything added by a upper context and | ||||
# reset the `_invalidated_context` flag when relevant | ||||
if self._changing_level <= 0: | ||||
self._change_type = None | ||||
assert self._changing_level == 0 | ||||
if self._invalidated_context: | ||||
# make sure we invalidate anything an upper context might | ||||
# have changed. | ||||
self.invalidate() | ||||
else: | ||||
should_write = self._changing_level <= 0 | ||||
tr = repo.currenttransaction() | ||||
if has_tr != (tr is not None): | ||||
if has_tr: | ||||
m = "transaction vanished while changing dirstate" | ||||
else: | ||||
m = "transaction appeared while changing dirstate" | ||||
raise error.ProgrammingError(m) | ||||
if should_write: | ||||
self.write(tr) | ||||
Augie Fackler
|
r32346 | |||
r50919 | @contextlib.contextmanager | |||
def changing_parents(self, repo): | ||||
r51962 | """Wrap a dirstate change related to a change of working copy parents | |||
This context scopes a series of dirstate modifications that match an | ||||
update of the working copy parents (typically `hg update`, `hg merge` | ||||
etc). | ||||
The dirstate's methods that perform this kind of modifications require | ||||
this context to be present before being called. | ||||
Such methods are decorated with `@requires_changing_parents`. | ||||
The new dirstate contents will be written to disk when the top-most | ||||
`changing_parents` context exits successfully. If an exception is | ||||
raised during a `changing_parents` context of any level, all changes | ||||
are invalidated. If this context is open within an open transaction, | ||||
the dirstate writing is delayed until that transaction is successfully | ||||
committed (and the dirstate is invalidated on transaction abort). | ||||
The `changing_parents` operation is mutually exclusive with the | ||||
`changing_files` one. | ||||
""" | ||||
r50919 | with self._changing(repo, CHANGE_TYPE_PARENTS) as c: | |||
yield c | ||||
r50921 | @contextlib.contextmanager | |||
def changing_files(self, repo): | ||||
r51962 | """Wrap a dirstate change related to the set of tracked files | |||
This context scopes a series of dirstate modifications that change the | ||||
set of tracked files. (typically `hg add`, `hg remove` etc) or some | ||||
dirstate stored information (like `hg rename --after`) but preserve | ||||
the working copy parents. | ||||
The dirstate's methods that perform this kind of modifications require | ||||
this context to be present before being called. | ||||
Such methods are decorated with `@requires_changing_files`. | ||||
The new dirstate contents will be written to disk when the top-most | ||||
`changing_files` context exits successfully. If an exception is raised | ||||
during a `changing_files` context of any level, all changes are | ||||
invalidated. If this context is open within an open transaction, the | ||||
dirstate writing is delayed until that transaction is successfully | ||||
committed (and the dirstate is invalidated on transaction abort). | ||||
The `changing_files` operation is mutually exclusive with the | ||||
`changing_parents` one. | ||||
""" | ||||
r50921 | with self._changing(repo, CHANGE_TYPE_FILES) as c: | |||
yield c | ||||
r50855 | # here to help migration to the new code | |||
def parentchange(self): | ||||
msg = ( | ||||
"Mercurial 6.4 and later requires call to " | ||||
"`dirstate.changing_parents(repo)`" | ||||
) | ||||
raise error.ProgrammingError(msg) | ||||
r50918 | @property | |||
Matt Harbison
|
r52822 | def is_changing_any(self) -> bool: | ||
r50918 | """Returns true if the dirstate is in the middle of a set of changes. | |||
This returns True for any kind of change. | ||||
Augie Fackler
|
r46554 | """ | ||
r50918 | return self._changing_level > 0 | |||
Augie Fackler
|
r32346 | |||
r50917 | @property | |||
Matt Harbison
|
r52822 | def is_changing_parents(self) -> bool: | ||
r50917 | """Returns true if the dirstate is in the middle of a set of changes | |||
that modify the dirstate parent. | ||||
""" | ||||
r50919 | if self._changing_level <= 0: | |||
return False | ||||
return self._change_type == CHANGE_TYPE_PARENTS | ||||
mpm@selenic.com
|
r1089 | |||
r50921 | @property | |||
Matt Harbison
|
r52822 | def is_changing_files(self) -> bool: | ||
r50921 | """Returns true if the dirstate is in the middle of a set of changes | |||
that modify the files tracked or their sources. | ||||
""" | ||||
if self._changing_level <= 0: | ||||
return False | ||||
return self._change_type == CHANGE_TYPE_FILES | ||||
mpm@selenic.com
|
r1089 | |||
Matt Mackall
|
r8261 | @propertycache | ||
def _map(self): | ||||
Mark Thomas
|
r35077 | """Return the dirstate contents (see documentation for dirstatemap).""" | ||
r51024 | return self._mapcls( | |||
Simon Sapin
|
r48055 | self._ui, | ||
self._opener, | ||||
self._root, | ||||
self._nodeconstants, | ||||
self._use_dirstate_v2, | ||||
Joerg Sonnenberger
|
r47538 | ) | ||
Matt Mackall
|
r8261 | |||
Gregory Szorc
|
r33373 | @property | ||
def _sparsematcher(self): | ||||
"""The matcher for the sparse checkout. | ||||
The working directory may not include every file from a manifest. The | ||||
matcher obtained by this property will match a path if it is to be | ||||
included in the working directory. | ||||
r50250 | ||||
When sparse if disabled, return None. | ||||
Gregory Szorc
|
r33373 | """ | ||
r50250 | if self._sparsematchfn is None: | |||
return None | ||||
Gregory Szorc
|
r33373 | # TODO there is potential to cache this property. For now, the matcher | ||
# is resolved on every access. (But the called function does use a | ||||
# cache to keep the lookup fast.) | ||||
return self._sparsematchfn() | ||||
Augie Fackler
|
r43347 | @repocache(b'branch') | ||
Matt Mackall
|
r8261 | def _branch(self): | ||
r51151 | f = None | |||
data = b'' | ||||
Matt Mackall
|
r8261 | try: | ||
r51151 | f, mode = txnutil.trypending(self._root, self._opener, b'branch') | |||
data = f.read().strip() | ||||
Manuel Jacob
|
r50201 | except FileNotFoundError: | ||
r51151 | pass | |||
finally: | ||||
if f is not None: | ||||
f.close() | ||||
if not data: | ||||
Augie Fackler
|
r43347 | return b"default" | ||
r51151 | return data | |||
Matt Mackall
|
r8261 | |||
Durham Goode
|
r34340 | @property | ||
Matt Mackall
|
r8261 | def _pl(self): | ||
Durham Goode
|
r34339 | return self._map.parents() | ||
Matt Mackall
|
r8261 | |||
Matt Harbison
|
r52822 | def hasdir(self, d: bytes) -> bool: | ||
Mark Thomas
|
r35083 | return self._map.hastrackeddir(d) | ||
FUJIWARA Katsunori
|
r16143 | |||
Augie Fackler
|
r43347 | @rootcache(b'.hgignore') | ||
Matt Harbison
|
r52822 | def _ignore(self) -> matchmod.basematcher: | ||
Laurent Charignon
|
r27594 | files = self._ignorefiles() | ||
Durham Goode
|
r25216 | if not files: | ||
Martin von Zweigbergk
|
r41825 | return matchmod.never() | ||
Durham Goode
|
r25216 | |||
Augie Fackler
|
r43347 | pats = [b'include:%s' % f for f in files] | ||
return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn) | ||||
Matt Mackall
|
r8261 | |||
@propertycache | ||||
def _slash(self): | ||||
Augie Fackler
|
r43347 | return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/' | ||
Matt Mackall
|
r8261 | |||
@propertycache | ||||
Matt Harbison
|
r52822 | def _checklink(self) -> bool: | ||
Matt Mackall
|
r8261 | return util.checklink(self._root) | ||
@propertycache | ||||
Matt Harbison
|
r52822 | def _checkexec(self) -> bool: | ||
Mitchell Plamann
|
r45310 | return bool(util.checkexec(self._root)) | ||
Matt Mackall
|
r8261 | |||
@propertycache | ||||
def _checkcase(self): | ||||
Augie Fackler
|
r43347 | return not util.fscasesensitive(self._join(b'.hg')) | ||
Matt Mackall
|
r8261 | |||
Matt Mackall
|
r4905 | def _join(self, f): | ||
Benoit Boissinot
|
r6972 | # much faster than os.path.join() | ||
Benoit Boissinot
|
r6973 | # it's safe because f is always a relative path | ||
Benoit Boissinot
|
r6972 | return self._rootdir + f | ||
mpm@selenic.com
|
r1089 | |||
Matt Harbison
|
r52822 | def flagfunc( | ||
self, buildfallback: intdirstate.FlagFuncFallbackT | ||||
) -> intdirstate.FlagFuncReturnT: | ||||
r49111 | """build a callable that returns flags associated with a filename | |||
Augie Fackler
|
r43346 | |||
r49111 | The information is extracted from three possible layers: | |||
1. the file system if it supports the information | ||||
2. the "fallback" information stored in the dirstate if any | ||||
3. a more expensive mechanism inferring the flags from the parents. | ||||
""" | ||||
# small hack to cache the result of buildfallback() | ||||
fallback_func = [] | ||||
Matt Harbison
|
r52822 | def get_flags(x: bytes) -> bytes: | ||
r49111 | entry = None | |||
fallback_value = None | ||||
Raphaël Gomès
|
r49103 | try: | ||
st = os.lstat(self._join(x)) | ||||
r49111 | except OSError: | |||
return b'' | ||||
if self._checklink: | ||||
Raphaël Gomès
|
r49103 | if util.statislink(st): | ||
Augie Fackler
|
r43347 | return b'l' | ||
r49111 | else: | |||
entry = self.get_entry(x) | ||||
if entry.has_fallback_symlink: | ||||
if entry.fallback_symlink: | ||||
return b'l' | ||||
else: | ||||
if not fallback_func: | ||||
fallback_func.append(buildfallback()) | ||||
fallback_value = fallback_func[0](x) | ||||
if b'l' in fallback_value: | ||||
return b'l' | ||||
if self._checkexec: | ||||
Raphaël Gomès
|
r49103 | if util.statisexec(st): | ||
Augie Fackler
|
r43347 | return b'x' | ||
r49111 | else: | |||
if entry is None: | ||||
entry = self.get_entry(x) | ||||
if entry.has_fallback_exec: | ||||
if entry.fallback_exec: | ||||
return b'x' | ||||
else: | ||||
if fallback_value is None: | ||||
if not fallback_func: | ||||
fallback_func.append(buildfallback()) | ||||
fallback_value = fallback_func[0](x) | ||||
if b'x' in fallback_value: | ||||
return b'x' | ||||
Raphaël Gomès
|
r49103 | return b'' | ||
Augie Fackler
|
r43346 | |||
r49111 | return get_flags | |||
Raphaël Gomès
|
r49102 | |||
Pierre-Yves David
|
r20335 | @propertycache | ||
def _cwd(self): | ||||
FUJIWARA Katsunori
|
r33212 | # internal config: ui.forcecwd | ||
Augie Fackler
|
r43347 | forcecwd = self._ui.config(b'ui', b'forcecwd') | ||
FUJIWARA Katsunori
|
r33212 | if forcecwd: | ||
return forcecwd | ||||
Matt Harbison
|
r39843 | return encoding.getcwd() | ||
Pierre-Yves David
|
r20335 | |||
Matt Harbison
|
r52822 | def getcwd(self) -> bytes: | ||
Augie Fackler
|
r46554 | """Return the path from which a canonical path is calculated. | ||
Yuya Nishihara
|
r26293 | |||
This path should be used to resolve file patterns or to convert | ||||
canonical paths back to file paths for display. It shouldn't be | ||||
used to get real file paths. Use vfs functions instead. | ||||
Augie Fackler
|
r46554 | """ | ||
Pierre-Yves David
|
r20335 | cwd = self._cwd | ||
Matt Mackall
|
r10282 | if cwd == self._root: | ||
Augie Fackler
|
r43347 | return b'' | ||
Matt Mackall
|
r4614 | # self._root ends with a path separator if self._root is '/' or 'C:\' | ||
rootsep = self._root | ||||
Shun-ichi GOTO
|
r5843 | if not util.endswithsep(rootsep): | ||
Pulkit Goyal
|
r30614 | rootsep += pycompat.ossep | ||
Alexis S. L. Carvalho
|
r4230 | if cwd.startswith(rootsep): | ||
Augie Fackler
|
r43346 | return cwd[len(rootsep) :] | ||
Alexis S. L. Carvalho
|
r4230 | else: | ||
# we're outside the repo. return an absolute path. | ||||
return cwd | ||||
mpm@selenic.com
|
r1089 | |||
Matt Harbison
|
r52822 | def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes: | ||
Alexis S. L. Carvalho
|
r4525 | if cwd is None: | ||
cwd = self.getcwd() | ||||
Matt Mackall
|
r4614 | path = util.pathto(self._root, cwd, f) | ||
Alexis S. L. Carvalho
|
r4527 | if self._slash: | ||
Matt Mackall
|
r19210 | return util.pconvert(path) | ||
Alexis S. L. Carvalho
|
r4527 | return path | ||
Alexis S. L. Carvalho
|
r4525 | |||
Matt Harbison
|
r52822 | def get_entry(self, path: bytes) -> intdirstate.DirstateItemT: | ||
r48897 | """return a DirstateItem for the associated path""" | |||
entry = self._map.get(path) | ||||
if entry is None: | ||||
return DirstateItem() | ||||
return entry | ||||
Matt Harbison
|
r52822 | def __contains__(self, key: Any) -> bool: | ||
Matt Mackall
|
r4614 | return key in self._map | ||
Matt Harbison
|
r52822 | def __iter__(self) -> Iterator[bytes]: | ||
Alex Gaynor
|
r33673 | return iter(sorted(self._map)) | ||
mpm@selenic.com
|
r1089 | |||
Matt Harbison
|
r52822 | def items(self) -> Iterator[Tuple[bytes, intdirstate.DirstateItemT]]: | ||
Gregory Szorc
|
r49768 | return self._map.items() | ||
Bryan O'Sullivan
|
r18792 | |||
Augie Fackler
|
r32550 | iteritems = items | ||
Matt Harbison
|
r52822 | def parents(self) -> List[bytes]: | ||
Matt Mackall
|
r13032 | return [self._validate(p) for p in self._pl] | ||
mpm@selenic.com
|
r1089 | |||
Matt Harbison
|
r52822 | def p1(self) -> bytes: | ||
Matt Mackall
|
r13876 | return self._validate(self._pl[0]) | ||
Matt Harbison
|
r52822 | def p2(self) -> bytes: | ||
Matt Mackall
|
r13876 | return self._validate(self._pl[1]) | ||
r48299 | @property | |||
def in_merge(self): | ||||
"""True if a merge is in progress""" | ||||
return self._pl[1] != self._nodeconstants.nullid | ||||
Matt Harbison
|
r52822 | def branch(self) -> bytes: | ||
Matt Mackall
|
r13047 | return encoding.tolocal(self._branch) | ||
Matt Mackall
|
r4179 | |||
r51001 | @requires_changing_parents | |||
Matt Harbison
|
r52822 | def setparents(self, p1: bytes, p2: Optional[bytes] = None): | ||
Patrick Mezard
|
r16551 | """Set dirstate parents to p1 and p2. | ||
r48302 | When moving from two parents to one, "merged" entries a | |||
Patrick Mezard
|
r16551 | adjusted to normal and previous copy records discarded and | ||
returned by the call. | ||||
See localrepo.setparents() | ||||
""" | ||||
Joerg Sonnenberger
|
r47771 | if p2 is None: | ||
p2 = self._nodeconstants.nullid | ||||
r50916 | if self._changing_level == 0: | |||
Augie Fackler
|
r43346 | raise ValueError( | ||
Matt Harbison
|
r52612 | "cannot set dirstate parent outside of " | ||
"dirstate.changing_parents context manager" | ||||
Augie Fackler
|
r43346 | ) | ||
Durham Goode
|
r22407 | |||
Durham Goode
|
r34340 | self._dirty = True | ||
Patrick Mezard
|
r16509 | oldp2 = self._pl[1] | ||
Mateusz Kwapich
|
r29772 | if self._origpl is None: | ||
self._origpl = self._pl | ||||
r48801 | nullid = self._nodeconstants.nullid | |||
r48873 | # True if we need to fold p2 related state back to a linear case | |||
fold_p2 = oldp2 != nullid and p2 == nullid | ||||
return self._map.setparents(p1, p2, fold_p2=fold_p2) | ||||
mpm@selenic.com
|
r1089 | |||
Matt Harbison
|
r52822 | def setbranch( | ||
self, branch: bytes, transaction: Optional[intdirstate.TransactionT] | ||||
) -> None: | ||||
Yuya Nishihara
|
r40454 | self.__class__._branch.set(self, encoding.fromlocal(branch)) | ||
r51151 | if transaction is not None: | |||
self._setup_tr_abort(transaction) | ||||
transaction.addfilegenerator( | ||||
b'dirstate-3-branch%s' % self._tr_key_suffix, | ||||
(b'branch',), | ||||
self._write_branch, | ||||
location=b'plain', | ||||
post_finalize=True, | ||||
) | ||||
return | ||||
r51149 | vfs = self._opener | |||
with vfs(b'branch', b'w', atomictemp=True, checkambig=True) as f: | ||||
r51151 | self._write_branch(f) | |||
Idan Kamara
|
r18317 | # make sure filecache has the correct stat info for _branch after | ||
# replacing the underlying file | ||||
r51149 | # | |||
# XXX do we actually need this, | ||||
# refreshing the attribute is quite cheap | ||||
Augie Fackler
|
r43347 | ce = self._filecache[b'_branch'] | ||
Idan Kamara
|
r18317 | if ce: | ||
ce.refresh() | ||||
Matt Mackall
|
r4179 | |||
r51151 | def _write_branch(self, file_obj): | |||
file_obj.write(self._branch + b'\n') | ||||
Matt Harbison
|
r52822 | def invalidate(self) -> None: | ||
Augie Fackler
|
r46554 | """Causes the next access to reread the dirstate. | ||
Siddharth Agarwal
|
r32682 | |||
This is different from localrepo.invalidatedirstate() because it always | ||||
rereads the dirstate. Use localrepo.invalidatedirstate() if you want to | ||||
Augie Fackler
|
r46554 | check whether the dirstate has changed before rereading it.""" | ||
Siddharth Agarwal
|
r32682 | |||
Augie Fackler
|
r43809 | for a in ("_map", "_branch", "_ignore"): | ||
Alexis S. L. Carvalho
|
r4953 | if a in self.__dict__: | ||
delattr(self, a) | ||||
Matt Mackall
|
r4903 | self._dirty = False | ||
r49533 | self._dirty_tracked_set = False | |||
r51040 | self._invalidated_context = bool( | |||
self._changing_level > 0 | ||||
or self._attached_to_a_transaction | ||||
or self._running_status | ||||
r51017 | ) | |||
Mateusz Kwapich
|
r29772 | self._origpl = None | ||
Bryan O'Sullivan
|
r4375 | |||
r51004 | @requires_changing_any | |||
Matt Harbison
|
r52822 | def copy(self, source: Optional[bytes], dest: bytes) -> None: | ||
Martin Geisler
|
r10145 | """Mark dest as a copy of source. Unmark dest if source is None.""" | ||
Patrick Mezard
|
r6680 | if source == dest: | ||
return | ||||
Matt Mackall
|
r4903 | self._dirty = True | ||
Patrick Mezard
|
r7566 | if source is not None: | ||
r50255 | self._check_sparse(source) | |||
Durham Goode
|
r34337 | self._map.copymap[dest] = source | ||
r48871 | else: | |||
self._map.copymap.pop(dest, None) | ||||
mpm@selenic.com
|
r1089 | |||
Matt Harbison
|
r52822 | def copied(self, file: bytes) -> Optional[bytes]: | ||
Durham Goode
|
r34337 | return self._map.copymap.get(file, None) | ||
Matt Mackall
|
r3154 | |||
Matt Harbison
|
r52822 | def copies(self) -> Dict[bytes, bytes]: | ||
Durham Goode
|
r34337 | return self._map.copymap | ||
mpm@selenic.com
|
r1089 | |||
r50955 | @requires_changing_files | |||
r49207 | def set_tracked(self, filename, reset_copy=False): | |||
r48393 | """a "public" method for generic code to mark a file as tracked | |||
This function is to be called outside of "update/merge" case. For | ||||
example by a command like `hg add X`. | ||||
r49207 | if reset_copy is set, any existing copy information will be dropped. | |||
r48393 | return True the file was previously untracked, False otherwise. | |||
""" | ||||
r48798 | self._dirty = True | |||
r48393 | entry = self._map.get(filename) | |||
r48804 | if entry is None or not entry.tracked: | |||
r48798 | self._check_new_tracked_filename(filename) | |||
r49207 | pre_tracked = self._map.set_tracked(filename) | |||
if reset_copy: | ||||
self._map.copymap.pop(filename, None) | ||||
r49533 | if pre_tracked: | |||
self._dirty_tracked_set = True | ||||
r49207 | return pre_tracked | |||
r48393 | ||||
r50955 | @requires_changing_files | |||
r48399 | def set_untracked(self, filename): | |||
"""a "public" method for generic code to mark a file as untracked | ||||
This function is to be called outside of "update/merge" case. For | ||||
example by a command like `hg remove X`. | ||||
return True the file was previously tracked, False otherwise. | ||||
""" | ||||
r48786 | ret = self._map.set_untracked(filename) | |||
if ret: | ||||
r48669 | self._dirty = True | |||
r49533 | self._dirty_tracked_set = True | |||
r48786 | return ret | |||
r48399 | ||||
r51047 | @requires_changing_files_or_status | |||
r49208 | def set_clean(self, filename, parentfiledata): | |||
r48504 | """record that the current state of the file on disk is known to be clean""" | |||
self._dirty = True | ||||
r48788 | if not self._map[filename].tracked: | |||
self._check_new_tracked_filename(filename) | ||||
r49208 | (mode, size, mtime) = parentfiledata | |||
r48788 | self._map.set_clean(filename, mode, size, mtime) | |||
r48504 | ||||
r51047 | @requires_changing_files_or_status | |||
r48520 | def set_possibly_dirty(self, filename): | |||
"""record that the current state of the file on disk is unknown""" | ||||
self._dirty = True | ||||
self._map.set_possibly_dirty(filename) | ||||
r50857 | @requires_changing_parents | |||
r48493 | def update_file_p1( | |||
r48392 | self, | |||
filename, | ||||
p1_tracked, | ||||
): | ||||
"""Set a file as tracked in the parent (or not) | ||||
This is to be called when adjust the dirstate to a new parent after an history | ||||
rewriting operation. | ||||
It should not be called during a merge (p2 != nullid) and only within | ||||
r50855 | a `with dirstate.changing_parents(repo):` context. | |||
r48392 | """ | |||
if self.in_merge: | ||||
Matt Harbison
|
r52612 | msg = 'update_file_reference should not be called when merging' | ||
r48392 | raise error.ProgrammingError(msg) | |||
entry = self._map.get(filename) | ||||
if entry is None: | ||||
wc_tracked = False | ||||
else: | ||||
wc_tracked = entry.tracked | ||||
r48952 | if not (p1_tracked or wc_tracked): | |||
r48392 | # the file is no longer relevant to anyone | |||
r48813 | if self._map.get(filename) is not None: | |||
self._map.reset_state(filename) | ||||
r48802 | self._dirty = True | |||
r48392 | elif (not p1_tracked) and wc_tracked: | |||
r48494 | if entry is not None and entry.added: | |||
return # avoid dropping copy information (maybe?) | ||||
r48392 | ||||
r48494 | self._map.reset_state( | |||
filename, | ||||
wc_tracked, | ||||
p1_tracked, | ||||
r48952 | # the underlying reference might have changed, we will have to | |||
# check it. | ||||
has_meaningful_mtime=False, | ||||
r48494 | ) | |||
r50857 | @requires_changing_parents | |||
r48411 | def update_file( | |||
self, | ||||
filename, | ||||
wc_tracked, | ||||
p1_tracked, | ||||
r48956 | p2_info=False, | |||
r48411 | possibly_dirty=False, | |||
r48491 | parentfiledata=None, | |||
r48411 | ): | |||
"""update the information about a file in the dirstate | ||||
This is to be called when the direstates parent changes to keep track | ||||
of what is the file situation in regards to the working copy and its parent. | ||||
r50855 | This function must be called within a `dirstate.changing_parents` context. | |||
r48411 | ||||
Augie Fackler
|
r48570 | note: the API is at an early stage and we might need to adjust it | ||
r48411 | depending of what information ends up being relevant and useful to | |||
other processing. | ||||
""" | ||||
r50906 | self._update_file( | |||
filename=filename, | ||||
wc_tracked=wc_tracked, | ||||
p1_tracked=p1_tracked, | ||||
p2_info=p2_info, | ||||
possibly_dirty=possibly_dirty, | ||||
parentfiledata=parentfiledata, | ||||
) | ||||
def hacky_extension_update_file(self, *args, **kwargs): | ||||
"""NEVER USE THIS, YOU DO NOT NEED IT | ||||
This function is a variant of "update_file" to be called by a small set | ||||
of extensions, it also adjust the internal state of file, but can be | ||||
called outside an `changing_parents` context. | ||||
A very small number of extension meddle with the working copy content | ||||
in a way that requires to adjust the dirstate accordingly. At the time | ||||
this command is written they are : | ||||
- keyword, | ||||
- largefile, | ||||
PLEASE DO NOT GROW THIS LIST ANY FURTHER. | ||||
This function could probably be replaced by more semantic one (like | ||||
"adjust expected size" or "always revalidate file content", etc) | ||||
however at the time where this is writen, this is too much of a detour | ||||
to be considered. | ||||
""" | ||||
r51049 | if not (self._changing_level > 0 or self._running_status > 0): | |||
msg = "requires a changes context" | ||||
raise error.ProgrammingError(msg) | ||||
r50906 | self._update_file( | |||
*args, | ||||
**kwargs, | ||||
) | ||||
def _update_file( | ||||
self, | ||||
filename, | ||||
wc_tracked, | ||||
p1_tracked, | ||||
p2_info=False, | ||||
possibly_dirty=False, | ||||
parentfiledata=None, | ||||
): | ||||
r48492 | # note: I do not think we need to double check name clash here since we | |||
# are in a update/merge case that should already have taken care of | ||||
# this. The test agrees | ||||
self._dirty = True | ||||
r49533 | old_entry = self._map.get(filename) | |||
if old_entry is None: | ||||
prev_tracked = False | ||||
else: | ||||
prev_tracked = old_entry.tracked | ||||
if prev_tracked != wc_tracked: | ||||
self._dirty_tracked_set = True | ||||
r48492 | ||||
self._map.reset_state( | ||||
filename, | ||||
wc_tracked, | ||||
p1_tracked, | ||||
r48956 | p2_info=p2_info, | |||
r48952 | has_meaningful_mtime=not possibly_dirty, | |||
r48492 | parentfiledata=parentfiledata, | |||
) | ||||
r48411 | ||||
r48787 | def _check_new_tracked_filename(self, filename): | |||
scmutil.checkfilename(filename) | ||||
if self._map.hastrackeddir(filename): | ||||
msg = _(b'directory %r already in dirstate') | ||||
msg %= pycompat.bytestr(filename) | ||||
raise error.Abort(msg) | ||||
# shadows | ||||
for d in pathutil.finddirs(filename): | ||||
if self._map.hastrackeddir(d): | ||||
break | ||||
entry = self._map.get(d) | ||||
if entry is not None and not entry.removed: | ||||
msg = _(b'file %r in dirstate clashes with %r') | ||||
msg %= (pycompat.bytestr(d), pycompat.bytestr(filename)) | ||||
raise error.Abort(msg) | ||||
r50255 | self._check_sparse(filename) | |||
def _check_sparse(self, filename): | ||||
"""Check that a filename is inside the sparse profile""" | ||||
sparsematch = self._sparsematcher | ||||
if sparsematch is not None and not sparsematch.always(): | ||||
if not sparsematch(filename): | ||||
msg = _(b"cannot add '%s' - it is outside the sparse checkout") | ||||
hint = _( | ||||
b'include file with `hg debugsparse --include <pattern>` or use ' | ||||
b'`hg add -s <file>` to include file directory while adding' | ||||
) | ||||
raise error.Abort(msg % filename, hint=hint) | ||||
r48787 | ||||
Siddharth Agarwal
|
r24538 | def _discoverpath(self, path, normed, ignoremissing, exists, storemap): | ||
if exists is None: | ||||
exists = os.path.lexists(os.path.join(self._root, path)) | ||||
if not exists: | ||||
# Maybe a path component exists | ||||
Augie Fackler
|
r43347 | if not ignoremissing and b'/' in path: | ||
d, f = path.rsplit(b'/', 1) | ||||
Siddharth Agarwal
|
r24538 | d = self._normalize(d, False, ignoremissing, None) | ||
Augie Fackler
|
r43347 | folded = d + b"/" + f | ||
Siddharth Agarwal
|
r24538 | else: | ||
# No path components, preserve original case | ||||
folded = path | ||||
else: | ||||
# recursively normalize leading directory components | ||||
# against dirstate | ||||
Augie Fackler
|
r43347 | if b'/' in normed: | ||
d, f = normed.rsplit(b'/', 1) | ||||
Siddharth Agarwal
|
r24538 | d = self._normalize(d, False, ignoremissing, True) | ||
Augie Fackler
|
r43347 | r = self._root + b"/" + d | ||
folded = d + b"/" + util.fspath(f, r) | ||||
Siddharth Agarwal
|
r24538 | else: | ||
folded = util.fspath(normed, self._root) | ||||
storemap[normed] = folded | ||||
return folded | ||||
Siddharth Agarwal
|
r24539 | def _normalizefile(self, path, isknown, ignoremissing=False, exists=None): | ||
Matt Mackall
|
r15488 | normed = util.normcase(path) | ||
Durham Goode
|
r34677 | folded = self._map.filefoldmap.get(normed, None) | ||
Matt Mackall
|
r13717 | if folded is None: | ||
Patrick Mezard
|
r16542 | if isknown: | ||
Matt Mackall
|
r13717 | folded = path | ||
Petr Kodl
|
r7068 | else: | ||
Augie Fackler
|
r43346 | folded = self._discoverpath( | ||
path, normed, ignoremissing, exists, self._map.filefoldmap | ||||
) | ||||
Siddharth Agarwal
|
r24539 | return folded | ||
Matt Mackall
|
r16302 | |||
Patrick Mezard
|
r16542 | def _normalize(self, path, isknown, ignoremissing=False, exists=None): | ||
Matt Mackall
|
r15488 | normed = util.normcase(path) | ||
Durham Goode
|
r34677 | folded = self._map.filefoldmap.get(normed, None) | ||
Siddharth Agarwal
|
r24561 | if folded is None: | ||
Durham Goode
|
r34679 | folded = self._map.dirfoldmap.get(normed, None) | ||
Matt Mackall
|
r13717 | if folded is None: | ||
Patrick Mezard
|
r16542 | if isknown: | ||
Matt Mackall
|
r13717 | folded = path | ||
Petr Kodl
|
r7068 | else: | ||
Siddharth Agarwal
|
r24540 | # store discovered result in dirfoldmap so that future | ||
# normalizefile calls don't start matching directories | ||||
Augie Fackler
|
r43346 | folded = self._discoverpath( | ||
path, normed, ignoremissing, exists, self._map.dirfoldmap | ||||
) | ||||
Matt Mackall
|
r13717 | return folded | ||
Matt Harbison
|
r52822 | def normalize( | ||
self, path: bytes, isknown: bool = False, ignoremissing: bool = False | ||||
) -> bytes: | ||||
Augie Fackler
|
r46554 | """ | ||
Matt Mackall
|
r13717 | normalize the case of a pathname when on a casefolding filesystem | ||
isknown specifies whether the filename came from walking the | ||||
Patrick Mezard
|
r16542 | disk, to avoid extra filesystem access. | ||
If ignoremissing is True, missing path are returned | ||||
unchanged. Otherwise, we try harder to normalize possibly | ||||
existing path components. | ||||
Matt Mackall
|
r13717 | |||
The normalized case is determined based on the following precedence: | ||||
- version of name already stored in the dirstate | ||||
- version of name stored on disk | ||||
- version provided via command arguments | ||||
Augie Fackler
|
r46554 | """ | ||
Matt Mackall
|
r13717 | |||
if self._checkcase: | ||||
Patrick Mezard
|
r16542 | return self._normalize(path, isknown, ignoremissing) | ||
Matt Mackall
|
r13717 | return path | ||
Paul Moore
|
r6677 | |||
r51009 | # XXX this method is barely used, as a result: | |||
# - its semantic is unclear | ||||
# - do we really needs it ? | ||||
r51010 | @requires_changing_parents | |||
Matt Harbison
|
r52822 | def clear(self) -> None: | ||
Durham Goode
|
r34934 | self._map.clear() | ||
Alexis S. L. Carvalho
|
r5123 | self._dirty = True | ||
Alexis S. L. Carvalho
|
r5065 | |||
r51010 | @requires_changing_parents | |||
Matt Harbison
|
r52822 | def rebuild( | ||
self, | ||||
parent: bytes, | ||||
allfiles: Iterable[bytes], # TODO: more than iterable? (uses len()) | ||||
changedfiles: Optional[Iterable[bytes]] = None, | ||||
) -> None: | ||||
r50252 | matcher = self._sparsematcher | |||
if matcher is not None and not matcher.always(): | ||||
# should not add non-matching files | ||||
allfiles = [f for f in allfiles if matcher(f)] | ||||
if changedfiles: | ||||
changedfiles = [f for f in changedfiles if matcher(f)] | ||||
if changedfiles is not None: | ||||
# these files will be deleted from the dirstate when they are | ||||
# not found to be in allfiles | ||||
dirstatefilestoremove = {f for f in self if not matcher(f)} | ||||
changedfiles = dirstatefilestoremove.union(changedfiles) | ||||
Pierre-Yves David
|
r25448 | if changedfiles is None: | ||
Christian Delahousse
|
r27176 | # Rebuild entire dirstate | ||
Kyle Lippincott
|
r44343 | to_lookup = allfiles | ||
to_drop = [] | ||||
Christian Delahousse
|
r27176 | self.clear() | ||
Kyle Lippincott
|
r44343 | elif len(changedfiles) < 10: | ||
# Avoid turning allfiles into a set, which can be expensive if it's | ||||
# large. | ||||
to_lookup = [] | ||||
to_drop = [] | ||||
for f in changedfiles: | ||||
if f in allfiles: | ||||
to_lookup.append(f) | ||||
else: | ||||
to_drop.append(f) | ||||
else: | ||||
changedfilesset = set(changedfiles) | ||||
to_lookup = changedfilesset & set(allfiles) | ||||
to_drop = changedfilesset - to_lookup | ||||
Christian Delahousse
|
r27176 | |||
Mateusz Kwapich
|
r29772 | if self._origpl is None: | ||
self._origpl = self._pl | ||||
Joerg Sonnenberger
|
r47771 | self._map.setparents(parent, self._nodeconstants.nullid) | ||
Kyle Lippincott
|
r44343 | |||
for f in to_lookup: | ||||
r48806 | if self.in_merge: | |||
self.set_tracked(f) | ||||
else: | ||||
self._map.reset_state( | ||||
f, | ||||
wc_tracked=True, | ||||
p1_tracked=True, | ||||
) | ||||
Kyle Lippincott
|
r44343 | for f in to_drop: | ||
r48814 | self._map.reset_state(f) | |||
Mateusz Kwapich
|
r30026 | |||
Matt Mackall
|
r4903 | self._dirty = True | ||
mpm@selenic.com
|
r1089 | |||
r51150 | def _setup_tr_abort(self, tr): | |||
"""make sure we invalidate the current change on abort""" | ||||
if tr is None: | ||||
return | ||||
def on_abort(tr): | ||||
self._attached_to_a_transaction = False | ||||
self.invalidate() | ||||
tr.addabort( | ||||
b'dirstate-invalidate%s' % self._tr_key_suffix, | ||||
on_abort, | ||||
) | ||||
Matt Harbison
|
r52822 | def write(self, tr: Optional[intdirstate.TransactionT]) -> None: | ||
Matt Mackall
|
r4612 | if not self._dirty: | ||
Benoit Boissinot
|
r1794 | return | ||
r51016 | # make sure we don't request a write of invalidated content | |||
# XXX move before the dirty check once `unlock` stop calling `write` | ||||
assert not self._invalidated_context | ||||
FUJIWARA Katsunori
|
r21931 | |||
r49644 | write_key = self._use_tracked_hint and self._dirty_tracked_set | |||
Pierre-Yves David
|
r29673 | if tr: | ||
r51150 | self._setup_tr_abort(tr) | |||
r51017 | self._attached_to_a_transaction = True | |||
def on_success(f): | ||||
self._attached_to_a_transaction = False | ||||
self._writedirstate(tr, f), | ||||
FUJIWARA Katsunori
|
r26634 | # delay writing in-memory changes out | ||
Augie Fackler
|
r43346 | tr.addfilegenerator( | ||
r51018 | b'dirstate-1-main%s' % self._tr_key_suffix, | |||
Augie Fackler
|
r43346 | (self._filename,), | ||
r51017 | on_success, | |||
Augie Fackler
|
r43347 | location=b'plain', | ||
r49534 | post_finalize=True, | |||
Augie Fackler
|
r43346 | ) | ||
r49533 | if write_key: | |||
tr.addfilegenerator( | ||||
r51018 | b'dirstate-2-key-post%s' % self._tr_key_suffix, | |||
r49644 | (self._filename_th,), | |||
lambda f: self._write_tracked_hint(tr, f), | ||||
r49533 | location=b'plain', | |||
r49534 | post_finalize=True, | |||
r49533 | ) | |||
FUJIWARA Katsunori
|
r26634 | return | ||
r49531 | file = lambda f: self._opener(f, b"w", atomictemp=True, checkambig=True) | |||
with file(self._filename) as f: | ||||
self._writedirstate(tr, f) | ||||
r49533 | if write_key: | |||
# we update the key-file after writing to make sure reader have a | ||||
# key that match the newly written content | ||||
r49644 | with file(self._filename_th) as f: | |||
self._write_tracked_hint(tr, f) | ||||
FUJIWARA Katsunori
|
r26521 | |||
r49644 | def delete_tracked_hint(self): | |||
"""remove the tracked_hint file | ||||
r49641 | ||||
To be used by format downgrades operation""" | ||||
r49644 | self._opener.unlink(self._filename_th) | |||
self._use_tracked_hint = False | ||||
r49641 | ||||
Matt Harbison
|
r52822 | def addparentchangecallback( | ||
self, category: bytes, callback: intdirstate.AddParentChangeCallbackT | ||||
) -> None: | ||||
Mateusz Kwapich
|
r29772 | """add a callback to be called when the wd parents are changed | ||
Callback will be called with the following arguments: | ||||
dirstate, (oldp1, oldp2), (newp1, newp2) | ||||
Category is a unique identifier to allow overwriting an old callback | ||||
with a newer callback. | ||||
""" | ||||
self._plchangecallbacks[category] = callback | ||||
r49222 | def _writedirstate(self, tr, st): | |||
r51016 | # make sure we don't write invalidated content | |||
assert not self._invalidated_context | ||||
Mateusz Kwapich
|
r29772 | # notify callbacks about parents change | ||
if self._origpl is not None and self._origpl != self._pl: | ||||
Gregory Szorc
|
r49768 | for c, callback in sorted(self._plchangecallbacks.items()): | ||
Mateusz Kwapich
|
r29772 | callback(self, self._origpl, self._pl) | ||
self._origpl = None | ||||
r49222 | self._map.write(tr, st) | |||
Durham Goode
|
r34674 | self._dirty = False | ||
r49533 | self._dirty_tracked_set = False | |||
r49644 | def _write_tracked_hint(self, tr, f): | |||
r49533 | key = node.hex(uuid.uuid4().bytes) | |||
f.write(b"1\n%s\n" % key) # 1 is the format version | ||||
mpm@selenic.com
|
r1089 | |||
Alexis S. L. Carvalho
|
r6032 | def _dirignore(self, f): | ||
if self._ignore(f): | ||||
return True | ||||
Martin von Zweigbergk
|
r44032 | for p in pathutil.finddirs(f): | ||
Matt Mackall
|
r6767 | if self._ignore(p): | ||
Alexis S. L. Carvalho
|
r6032 | return True | ||
return False | ||||
Matt Harbison
|
r52822 | def _ignorefiles(self) -> List[bytes]: | ||
Laurent Charignon
|
r27594 | files = [] | ||
Augie Fackler
|
r43347 | if os.path.exists(self._join(b'.hgignore')): | ||
files.append(self._join(b'.hgignore')) | ||||
for name, path in self._ui.configitems(b"ui"): | ||||
if name == b'ignore' or name.startswith(b'ignore.'): | ||||
Laurent Charignon
|
r27594 | # we need to use os.path.join here rather than self._join | ||
# because path is arbitrary and user-specified | ||||
files.append(os.path.join(self._rootdir, util.expandpath(path))) | ||||
return files | ||||
Matt Harbison
|
r52822 | def _ignorefileandline(self, f: bytes) -> intdirstate.IgnoreFileAndLineT: | ||
Laurent Charignon
|
r27670 | files = collections.deque(self._ignorefiles()) | ||
visited = set() | ||||
while files: | ||||
i = files.popleft() | ||||
Augie Fackler
|
r43346 | patterns = matchmod.readpatternfile( | ||
i, self._ui.warn, sourceinfo=True | ||||
) | ||||
Laurent Charignon
|
r27670 | for pattern, lineno, line in patterns: | ||
Augie Fackler
|
r43347 | kind, p = matchmod._patsplit(pattern, b'glob') | ||
if kind == b"subinclude": | ||||
Laurent Charignon
|
r27670 | if p not in visited: | ||
files.append(p) | ||||
continue | ||||
Augie Fackler
|
r43346 | m = matchmod.match( | ||
Augie Fackler
|
r43347 | self._root, b'', [], [pattern], warn=self._ui.warn | ||
Augie Fackler
|
r43346 | ) | ||
Laurent Charignon
|
r27670 | if m(f): | ||
return (i, lineno, line) | ||||
visited.add(i) | ||||
Augie Fackler
|
r43347 | return (None, -1, b"") | ||
Laurent Charignon
|
r27670 | |||
Siddharth Agarwal
|
r19173 | def _walkexplicit(self, match, subrepos): | ||
Augie Fackler
|
r46554 | """Get stat data about the files explicitly specified by match. | ||
Matt Mackall
|
r3529 | |||
Siddharth Agarwal
|
r19174 | Return a triple (results, dirsfound, dirsnotfound). | ||
Siddharth Agarwal
|
r19173 | - results is a mapping from filename to stat result. It also contains | ||
listings mapping subrepos and .hg to None. | ||||
Siddharth Agarwal
|
r19174 | - dirsfound is a list of files found to be directories. | ||
Siddharth Agarwal
|
r19173 | - dirsnotfound is a list of files that the dirstate thinks are | ||
Augie Fackler
|
r46554 | directories and that were not found.""" | ||
Matt Mackall
|
r6578 | |||
Matt Mackall
|
r8681 | def badtype(mode): | ||
Augie Fackler
|
r43347 | kind = _(b'unknown') | ||
Matt Mackall
|
r10282 | if stat.S_ISCHR(mode): | ||
Augie Fackler
|
r43347 | kind = _(b'character device') | ||
Matt Mackall
|
r10282 | elif stat.S_ISBLK(mode): | ||
Augie Fackler
|
r43347 | kind = _(b'block device') | ||
Matt Mackall
|
r10282 | elif stat.S_ISFIFO(mode): | ||
Augie Fackler
|
r43347 | kind = _(b'fifo') | ||
Matt Mackall
|
r10282 | elif stat.S_ISSOCK(mode): | ||
Augie Fackler
|
r43347 | kind = _(b'socket') | ||
Matt Mackall
|
r10282 | elif stat.S_ISDIR(mode): | ||
Augie Fackler
|
r43347 | kind = _(b'directory') | ||
return _(b'unsupported file type (type is %s)') % kind | ||||
Matt Mackall
|
r6830 | |||
Matt Mackall
|
r8676 | badfn = match.bad | ||
Matt Mackall
|
r6831 | dmap = self._map | ||
Matt Mackall
|
r5000 | lstat = os.lstat | ||
Matt Mackall
|
r6830 | getkind = stat.S_IFMT | ||
Matt Mackall
|
r6828 | dirkind = stat.S_IFDIR | ||
Matt Mackall
|
r6830 | regkind = stat.S_IFREG | ||
lnkkind = stat.S_IFLNK | ||||
Matt Mackall
|
r6831 | join = self._join | ||
Siddharth Agarwal
|
r19174 | dirsfound = [] | ||
foundadd = dirsfound.append | ||||
Siddharth Agarwal
|
r19173 | dirsnotfound = [] | ||
Siddharth Agarwal
|
r19175 | notfoundadd = dirsnotfound.append | ||
Matt Mackall
|
r6820 | |||
Martin von Zweigbergk
|
r24448 | if not match.isexact() and self._checkcase: | ||
Matt Mackall
|
r12907 | normalize = self._normalize | ||
else: | ||||
Siddharth Agarwal
|
r18032 | normalize = None | ||
Matt Mackall
|
r12907 | |||
Martin Geisler
|
r12211 | files = sorted(match.files()) | ||
subrepos.sort() | ||||
i, j = 0, 0 | ||||
while i < len(files) and j < len(subrepos): | ||||
Augie Fackler
|
r43347 | subpath = subrepos[j] + b"/" | ||
Oleg Stepanov
|
r13233 | if files[i] < subpath: | ||
Martin Geisler
|
r12211 | i += 1 | ||
continue | ||||
trbs
|
r13339 | while i < len(files) and files[i].startswith(subpath): | ||
Martin Geisler
|
r12211 | del files[i] | ||
j += 1 | ||||
Augie Fackler
|
r43347 | if not files or b'' in files: | ||
files = [b''] | ||||
Martin von Zweigbergk
|
r42527 | # constructing the foldmap is expensive, so don't do it for the | ||
Martin von Zweigbergk
|
r42528 | # common case where files is [''] | ||
Martin von Zweigbergk
|
r42527 | normalize = None | ||
Augie Fackler
|
r10176 | results = dict.fromkeys(subrepos) | ||
Augie Fackler
|
r43347 | results[b'.hg'] = None | ||
Matt Mackall
|
r5000 | |||
Martin Geisler
|
r12211 | for ff in files: | ||
Martin von Zweigbergk
|
r42527 | if normalize: | ||
Siddharth Agarwal
|
r24522 | nf = normalize(ff, False, True) | ||
Siddharth Agarwal
|
r18032 | else: | ||
Siddharth Agarwal
|
r24522 | nf = ff | ||
Matt Mackall
|
r6829 | if nf in results: | ||
Matt Mackall
|
r6820 | continue | ||
try: | ||||
Matt Mackall
|
r6831 | st = lstat(join(nf)) | ||
Matt Mackall
|
r6830 | kind = getkind(st.st_mode) | ||
if kind == dirkind: | ||||
Simon Heimberg
|
r8588 | if nf in dmap: | ||
Mads Kiilerich
|
r21115 | # file replaced by dir on disk but still in dirstate | ||
Simon Heimberg
|
r8588 | results[nf] = None | ||
Matt Harbison
|
r24537 | foundadd((nf, ff)) | ||
Matt Mackall
|
r12401 | elif kind == regkind or kind == lnkkind: | ||
Matt Mackall
|
r6830 | results[nf] = st | ||
Matt Mackall
|
r6828 | else: | ||
Matt Mackall
|
r8681 | badfn(ff, badtype(kind)) | ||
Matt Mackall
|
r6830 | if nf in dmap: | ||
Matt Mackall
|
r6829 | results[nf] = None | ||
Raphaël Gomès
|
r52596 | except OSError as inst: | ||
r50916 | # nf not found on disk - it is dirstate only | |||
Augie Fackler
|
r43346 | if nf in dmap: # does it exactly match a missing file? | ||
Matt Mackall
|
r8675 | results[nf] = None | ||
Augie Fackler
|
r43346 | else: # does it match a missing directory? | ||
Mark Thomas
|
r35083 | if self._map.hasdir(nf): | ||
Martin von Zweigbergk
|
r23375 | notfoundadd(nf) | ||
Matt Mackall
|
r8677 | else: | ||
Augie Fackler
|
r34024 | badfn(ff, encoding.strtolocal(inst.strerror)) | ||
Matt Mackall
|
r6820 | |||
Yuya Nishihara
|
r36218 | # match.files() may contain explicitly-specified paths that shouldn't | ||
# be taken; drop them from the list of files found. dirsfound/notfound | ||||
# aren't filtered here because they will be tested later. | ||||
if match.anypats(): | ||||
for f in list(results): | ||||
Augie Fackler
|
r43347 | if f == b'.hg' or f in subrepos: | ||
Yuya Nishihara
|
r36218 | # keep sentinel to disable further out-of-repo walks | ||
continue | ||||
if not match(f): | ||||
del results[f] | ||||
Matt Harbison
|
r25877 | # Case insensitive filesystems cannot rely on lstat() failing to detect | ||
# a case-only rename. Prune the stat object for any file that does not | ||||
# match the case in the filesystem, if there are multiple files that | ||||
# normalize to the same path. | ||||
if match.isexact() and self._checkcase: | ||||
normed = {} | ||||
Gregory Szorc
|
r49768 | for f, st in results.items(): | ||
Matt Harbison
|
r25877 | if st is None: | ||
continue | ||||
nc = util.normcase(f) | ||||
paths = normed.get(nc) | ||||
if paths is None: | ||||
paths = set() | ||||
normed[nc] = paths | ||||
paths.add(f) | ||||
Gregory Szorc
|
r49768 | for norm, paths in normed.items(): | ||
Matt Harbison
|
r25877 | if len(paths) > 1: | ||
for path in paths: | ||||
Augie Fackler
|
r43346 | folded = self._discoverpath( | ||
path, norm, True, None, self._map.dirfoldmap | ||||
) | ||||
Matt Harbison
|
r25877 | if path != folded: | ||
results[path] = None | ||||
Siddharth Agarwal
|
r19174 | return results, dirsfound, dirsnotfound | ||
Siddharth Agarwal
|
r19173 | |||
Matt Harbison
|
r52822 | def walk( | ||
self, | ||||
match: matchmod.basematcher, | ||||
subrepos: Any, | ||||
unknown: bool, | ||||
ignored: bool, | ||||
full: bool = True, | ||||
) -> intdirstate.WalkReturnT: | ||||
Augie Fackler
|
r46554 | """ | ||
Siddharth Agarwal
|
r19173 | Walk recursively through the directory tree, finding all files | ||
matched by match. | ||||
Siddharth Agarwal
|
r19190 | If full is False, maybe skip some known-clean files. | ||
Siddharth Agarwal
|
r19173 | Return a dict mapping filename to stat-like object (either | ||
mercurial.osutil.stat instance or return value of os.stat()). | ||||
Siddharth Agarwal
|
r19190 | |||
Augie Fackler
|
r46554 | """ | ||
Siddharth Agarwal
|
r19190 | # full is a flag that extensions that hook into walk can use -- this | ||
# implementation doesn't use it at all. This satisfies the contract | ||||
# because we only guarantee a "maybe". | ||||
Siddharth Agarwal
|
r19173 | |||
if ignored: | ||||
ignore = util.never | ||||
dirignore = util.never | ||||
Mads Kiilerich
|
r21115 | elif unknown: | ||
ignore = self._ignore | ||||
dirignore = self._dirignore | ||||
else: | ||||
# if not unknown and not ignored, drop dir recursion and step 2 | ||||
Siddharth Agarwal
|
r19173 | ignore = util.always | ||
dirignore = util.always | ||||
r50251 | if self._sparsematchfn is not None: | |||
em = matchmod.exact(match.files()) | ||||
sm = matchmod.unionmatcher([self._sparsematcher, em]) | ||||
match = matchmod.intersectmatchers(match, sm) | ||||
Siddharth Agarwal
|
r19173 | matchfn = match.matchfn | ||
matchalways = match.always() | ||||
matchtdir = match.traversedir | ||||
dmap = self._map | ||||
Yuya Nishihara
|
r32203 | listdir = util.listdir | ||
Siddharth Agarwal
|
r19173 | lstat = os.lstat | ||
dirkind = stat.S_IFDIR | ||||
regkind = stat.S_IFREG | ||||
lnkkind = stat.S_IFLNK | ||||
join = self._join | ||||
exact = skipstep3 = False | ||||
Augie Fackler
|
r43346 | if match.isexact(): # match.exact | ||
Siddharth Agarwal
|
r19173 | exact = True | ||
Augie Fackler
|
r43346 | dirignore = util.always # skip step 2 | ||
elif match.prefix(): # match.match, no patterns | ||||
Siddharth Agarwal
|
r19173 | skipstep3 = True | ||
if not exact and self._checkcase: | ||||
normalize = self._normalize | ||||
Siddharth Agarwal
|
r24541 | normalizefile = self._normalizefile | ||
Siddharth Agarwal
|
r19173 | skipstep3 = False | ||
else: | ||||
Siddharth Agarwal
|
r24560 | normalize = self._normalize | ||
Siddharth Agarwal
|
r24541 | normalizefile = None | ||
Siddharth Agarwal
|
r19173 | |||
# step 1: find all explicit files | ||||
results, work, dirsnotfound = self._walkexplicit(match, subrepos) | ||||
Martin von Zweigbergk
|
r44112 | if matchtdir: | ||
for d in work: | ||||
matchtdir(d[0]) | ||||
for d in dirsnotfound: | ||||
matchtdir(d) | ||||
Siddharth Agarwal
|
r19173 | |||
Siddharth Agarwal
|
r19172 | skipstep3 = skipstep3 and not (work or dirsnotfound) | ||
Matt Harbison
|
r24537 | work = [d for d in work if not dirignore(d[0])] | ||
Siddharth Agarwal
|
r19171 | |||
Matt Mackall
|
r6826 | # step 2: visit subdirectories | ||
Siddharth Agarwal
|
r24560 | def traverse(work, alreadynormed): | ||
Siddharth Agarwal
|
r24559 | wadd = work.append | ||
while work: | ||||
Augie Fackler
|
r43533 | tracing.counter('dirstate.walk work', len(work)) | ||
Siddharth Agarwal
|
r24560 | nd = work.pop() | ||
Kyle Lippincott
|
r38991 | visitentries = match.visitchildrenset(nd) | ||
if not visitentries: | ||||
Martin von Zweigbergk
|
r32177 | continue | ||
Augie Fackler
|
r43347 | if visitentries == b'this' or visitentries == b'all': | ||
Kyle Lippincott
|
r38991 | visitentries = None | ||
Siddharth Agarwal
|
r24559 | skip = None | ||
Augie Fackler
|
r43347 | if nd != b'': | ||
skip = b'.hg' | ||||
Siddharth Agarwal
|
r24559 | try: | ||
Augie Fackler
|
r43533 | with tracing.log('dirstate.walk.traverse listdir %s', nd): | ||
entries = listdir(join(nd), stat=True, skip=skip) | ||||
Manuel Jacob
|
r50205 | except (PermissionError, FileNotFoundError) as inst: | ||
match.bad( | ||||
self.pathto(nd), encoding.strtolocal(inst.strerror) | ||||
) | ||||
continue | ||||
Siddharth Agarwal
|
r24559 | for f, kind, st in entries: | ||
Kyle Lippincott
|
r39296 | # Some matchers may return files in the visitentries set, | ||
# instead of 'this', if the matcher explicitly mentions them | ||||
# and is not an exactmatcher. This is acceptable; we do not | ||||
# make any hard assumptions about file-or-directory below | ||||
# based on the presence of `f` in visitentries. If | ||||
# visitchildrenset returned a set, we can always skip the | ||||
# entries *not* in the set it provided regardless of whether | ||||
# they're actually a file or a directory. | ||||
Kyle Lippincott
|
r38991 | if visitentries and f not in visitentries: | ||
continue | ||||
Siddharth Agarwal
|
r24559 | if normalizefile: | ||
# even though f might be a directory, we're only | ||||
# interested in comparing it to files currently in the | ||||
# dmap -- therefore normalizefile is enough | ||||
Augie Fackler
|
r43346 | nf = normalizefile( | ||
Augie Fackler
|
r43347 | nd and (nd + b"/" + f) or f, True, True | ||
Augie Fackler
|
r43346 | ) | ||
Siddharth Agarwal
|
r24559 | else: | ||
Augie Fackler
|
r43347 | nf = nd and (nd + b"/" + f) or f | ||
Siddharth Agarwal
|
r24559 | if nf not in results: | ||
if kind == dirkind: | ||||
if not ignore(nf): | ||||
if matchtdir: | ||||
matchtdir(nf) | ||||
Siddharth Agarwal
|
r24560 | wadd(nf) | ||
Siddharth Agarwal
|
r24559 | if nf in dmap and (matchalways or matchfn(nf)): | ||
results[nf] = None | ||||
elif kind == regkind or kind == lnkkind: | ||||
if nf in dmap: | ||||
if matchalways or matchfn(nf): | ||||
results[nf] = st | ||||
Augie Fackler
|
r43346 | elif (matchalways or matchfn(nf)) and not ignore( | ||
nf | ||||
): | ||||
Siddharth Agarwal
|
r24560 | # unknown file -- normalize if necessary | ||
if not alreadynormed: | ||||
nf = normalize(nf, False, True) | ||||
Siddharth Agarwal
|
r24559 | results[nf] = st | ||
elif nf in dmap and (matchalways or matchfn(nf)): | ||||
Matt Mackall
|
r6829 | results[nf] = None | ||
Siddharth Agarwal
|
r24559 | |||
Siddharth Agarwal
|
r24560 | for nd, d in work: | ||
# alreadynormed means that processwork doesn't have to do any | ||||
# expensive directory normalization | ||||
alreadynormed = not normalize or nd == d | ||||
traverse([d], alreadynormed) | ||||
mpm@selenic.com
|
r1089 | |||
Siddharth Agarwal
|
r18812 | for s in subrepos: | ||
del results[s] | ||||
Augie Fackler
|
r43347 | del results[b'.hg'] | ||
Siddharth Agarwal
|
r18812 | |||
Mads Kiilerich
|
r21115 | # step 3: visit remaining files from dmap | ||
Matt Mackall
|
r8684 | if not skipstep3 and not exact: | ||
Mads Kiilerich
|
r21115 | # If a dmap file is not in results yet, it was either | ||
# a) not matching matchfn b) ignored, c) missing, or d) under a | ||||
# symlink directory. | ||||
Siddharth Agarwal
|
r18815 | if not results and matchalways: | ||
Pulkit Goyal
|
r31422 | visit = [f for f in dmap] | ||
Siddharth Agarwal
|
r18815 | else: | ||
visit = [f for f in dmap if f not in results and matchfn(f)] | ||||
visit.sort() | ||||
Durham Goode
|
r18625 | if unknown: | ||
Mads Kiilerich
|
r21115 | # unknown == True means we walked all dirs under the roots | ||
# that wasn't ignored, and everything that matched was stat'ed | ||||
# and is already in results. | ||||
# The rest must thus be ignored or under a symlink. | ||||
Yuya Nishihara
|
r33722 | audit_path = pathutil.pathauditor(self._root, cached=True) | ||
Durham Goode
|
r18625 | |||
for nf in iter(visit): | ||||
Martin von Zweigbergk
|
r24621 | # If a stat for the same file was already added with a | ||
# different case, don't add one for this, since that would | ||||
# make it appear as if the file exists under both names | ||||
# on disk. | ||||
Augie Fackler
|
r43346 | if ( | ||
normalizefile | ||||
and normalizefile(nf, True, True) in results | ||||
): | ||||
Martin von Zweigbergk
|
r24621 | results[nf] = None | ||
Durham Goode
|
r18625 | # Report ignored items in the dmap as long as they are not | ||
# under a symlink directory. | ||||
Martin von Zweigbergk
|
r24621 | elif audit_path.check(nf): | ||
Durham Goode
|
r18663 | try: | ||
results[nf] = lstat(join(nf)) | ||||
Mads Kiilerich
|
r21115 | # file was just ignored, no links, and exists | ||
Durham Goode
|
r18663 | except OSError: | ||
# file doesn't exist | ||||
results[nf] = None | ||||
Durham Goode
|
r18625 | else: | ||
# It's either missing or under a symlink directory | ||||
Mads Kiilerich
|
r21115 | # which we in this case report as missing | ||
Durham Goode
|
r18625 | results[nf] = None | ||
else: | ||||
# We may not have walked the full directory tree above, | ||||
Mads Kiilerich
|
r21115 | # so stat and check everything we missed. | ||
Augie Fackler
|
r31507 | iv = iter(visit) | ||
Bryan O'Sullivan
|
r26984 | for st in util.statfiles([join(i) for i in visit]): | ||
Augie Fackler
|
r31507 | results[next(iv)] = st | ||
Matt Mackall
|
r6829 | return results | ||
mpm@selenic.com
|
r1089 | |||
Raphaël Gomès
|
r45017 | def _rust_status(self, matcher, list_clean, list_ignored, list_unknown): | ||
r50256 | if self._sparsematchfn is not None: | |||
em = matchmod.exact(matcher.files()) | ||||
sm = matchmod.unionmatcher([self._sparsematcher, em]) | ||||
matcher = matchmod.intersectmatchers(matcher, sm) | ||||
Raphaël Gomès
|
r44537 | # Force Rayon (Rust parallelism library) to respect the number of | ||
# workers. This is a temporary workaround until Rust code knows | ||||
# how to read the config file. | ||||
numcpus = self._ui.configint(b"worker", b"numcpus") | ||||
if numcpus is not None: | ||||
encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus) | ||||
workers_enabled = self._ui.configbool(b"worker", b"enabled", True) | ||||
if not workers_enabled: | ||||
encoding.environ[b"RAYON_NUM_THREADS"] = b"1" | ||||
( | ||||
lookup, | ||||
modified, | ||||
added, | ||||
removed, | ||||
deleted, | ||||
Raphaël Gomès
|
r45017 | clean, | ||
ignored, | ||||
Raphaël Gomès
|
r44537 | unknown, | ||
Raphaël Gomès
|
r45017 | warnings, | ||
bad, | ||||
Raphaël Gomès
|
r45355 | traversed, | ||
Simon Sapin
|
r48139 | dirty, | ||
Raphaël Gomès
|
r44537 | ) = rustmod.status( | ||
r48933 | self._map._map, | |||
Raphaël Gomès
|
r44537 | matcher, | ||
self._rootdir, | ||||
Raphaël Gomès
|
r45017 | self._ignorefiles(), | ||
self._checkexec, | ||||
bool(list_clean), | ||||
bool(list_ignored), | ||||
bool(list_unknown), | ||||
Raphaël Gomès
|
r45355 | bool(matcher.traversedir), | ||
Raphaël Gomès
|
r44537 | ) | ||
Raphaël Gomès
|
r45355 | |||
Simon Sapin
|
r48139 | self._dirty |= dirty | ||
Raphaël Gomès
|
r45355 | if matcher.traversedir: | ||
for dir in traversed: | ||||
matcher.traversedir(dir) | ||||
Raphaël Gomès
|
r45017 | if self._ui.warn: | ||
for item in warnings: | ||||
if isinstance(item, tuple): | ||||
file_path, syntax = item | ||||
msg = _(b"%s: ignoring invalid syntax '%s'\n") % ( | ||||
file_path, | ||||
syntax, | ||||
) | ||||
self._ui.warn(msg) | ||||
else: | ||||
msg = _(b"skipping unreadable pattern file '%s': %s\n") | ||||
self._ui.warn( | ||||
msg | ||||
% ( | ||||
pathutil.canonpath( | ||||
self._rootdir, self._rootdir, item | ||||
), | ||||
b"No such file or directory", | ||||
) | ||||
) | ||||
Raphaël Gomès
|
r52522 | for fn, message in sorted(bad): | ||
Raphaël Gomès
|
r45017 | matcher.bad(fn, encoding.strtolocal(message)) | ||
Raphaël Gomès
|
r44537 | |||
status = scmutil.status( | ||||
modified=modified, | ||||
added=added, | ||||
removed=removed, | ||||
deleted=deleted, | ||||
unknown=unknown, | ||||
Raphaël Gomès
|
r45017 | ignored=ignored, | ||
Raphaël Gomès
|
r44537 | clean=clean, | ||
) | ||||
return (lookup, status) | ||||
Matt Harbison
|
r52822 | def status( | ||
self, | ||||
match: matchmod.basematcher, | ||||
subrepos: bool, | ||||
ignored: bool, | ||||
clean: bool, | ||||
unknown: bool, | ||||
) -> intdirstate.StatusReturnT: | ||||
Augie Fackler
|
r46554 | """Determine the status of the working copy relative to the | ||
Martin von Zweigbergk
|
r22915 | dirstate and return a pair of (unsure, status), where status is of type | ||
scmutil.status and: | ||||
Greg Ward
|
r9518 | |||
unsure: | ||||
files that might have been modified since the dirstate was | ||||
written, but need to be read to be sure (size is the same | ||||
but mtime differs) | ||||
Martin von Zweigbergk
|
r22915 | status.modified: | ||
Greg Ward
|
r9518 | files that have definitely been modified since the dirstate | ||
was written (different size or mode) | ||||
Martin von Zweigbergk
|
r22915 | status.clean: | ||
Greg Ward
|
r9518 | files that have definitely not been modified since the | ||
dirstate was written | ||||
Augie Fackler
|
r46554 | """ | ||
r51044 | if not self._running_status: | |||
msg = "Calling `status` outside a `running_status` context" | ||||
raise error.ProgrammingError(msg) | ||||
Matt Mackall
|
r6753 | listignored, listclean, listunknown = ignored, clean, unknown | ||
Thomas Arendsen Hein
|
r2022 | lookup, modified, added, unknown, ignored = [], [], [], [], [] | ||
Vadim Gelfer
|
r2661 | removed, deleted, clean = [], [], [] | ||
mpm@selenic.com
|
r1089 | |||
Matt Mackall
|
r5003 | dmap = self._map | ||
Durham Goode
|
r34936 | dmap.preload() | ||
Raphaël Gomès
|
r43568 | |||
use_rust = True | ||||
Raphaël Gomès
|
r44369 | |||
Raphaël Gomès
|
r43568 | if rustmod is None: | ||
use_rust = False | ||||
Raphaël Gomès
|
r45017 | elif self._checkcase: | ||
# Case-insensitive filesystems are not handled yet | ||||
use_rust = False | ||||
Raphaël Gomès
|
r43568 | elif subrepos: | ||
use_rust = False | ||||
r49213 | # Get the time from the filesystem so we can disambiguate files that | |||
# appear modified in the present or future. | ||||
try: | ||||
mtime_boundary = timestamp.get_fs_now(self._opener) | ||||
except OSError: | ||||
# In largefiles or readonly context | ||||
mtime_boundary = None | ||||
Raphaël Gomès
|
r43568 | if use_rust: | ||
Raphaël Gomès
|
r45017 | try: | ||
r49213 | res = self._rust_status( | |||
Raphaël Gomès
|
r45017 | match, listclean, listignored, listunknown | ||
) | ||||
r49213 | return res + (mtime_boundary,) | |||
Raphaël Gomès
|
r45017 | except rustmod.FallbackError: | ||
pass | ||||
Raphaël Gomès
|
r43568 | |||
Martin von Zweigbergk
|
r44019 | def noop(f): | ||
pass | ||||
Durham Goode
|
r34936 | dcontains = dmap.__contains__ | ||
dget = dmap.__getitem__ | ||||
Augie Fackler
|
r43346 | ladd = lookup.append # aka "unsure" | ||
Matt Mackall
|
r5003 | madd = modified.append | ||
aadd = added.append | ||||
Martin von Zweigbergk
|
r44019 | uadd = unknown.append if listunknown else noop | ||
iadd = ignored.append if listignored else noop | ||||
Matt Mackall
|
r5003 | radd = removed.append | ||
dadd = deleted.append | ||||
Martin von Zweigbergk
|
r44019 | cadd = clean.append if listclean else noop | ||
Siddharth Agarwal
|
r18034 | mexact = match.exact | ||
dirignore = self._dirignore | ||||
checkexec = self._checkexec | ||||
r49112 | checklink = self._checklink | |||
Durham Goode
|
r34337 | copymap = self._map.copymap | ||
Matt Mackall
|
r5003 | |||
Siddharth Agarwal
|
r19191 | # We need to do full walks when either | ||
# - we're listing all clean files, or | ||||
# - match.traversedir does something, because match.traversedir should | ||||
# be called for every dir in the working dir | ||||
full = listclean or match.traversedir is not None | ||||
Gregory Szorc
|
r49777 | for fn, st in self.walk( | ||
match, subrepos, listunknown, listignored, full=full | ||||
).items(): | ||||
Durham Goode
|
r34936 | if not dcontains(fn): | ||
Siddharth Agarwal
|
r18034 | if (listignored or mexact(fn)) and dirignore(fn): | ||
Matt Mackall
|
r6753 | if listignored: | ||
Alexis S. L. Carvalho
|
r6033 | iadd(fn) | ||
Siddharth Agarwal
|
r19910 | else: | ||
Matt Mackall
|
r5003 | uadd(fn) | ||
Benoit Boissinot
|
r1471 | continue | ||
Matt Mackall
|
r6591 | |||
Durham Goode
|
r34936 | t = dget(fn) | ||
r48325 | mode = t.mode | |||
r48326 | size = t.size | |||
Matt Mackall
|
r6591 | |||
r48320 | if not st and t.tracked: | |||
Matt Mackall
|
r6818 | dadd(fn) | ||
r48960 | elif t.p2_info: | |||
r48322 | madd(fn) | |||
elif t.added: | ||||
aadd(fn) | ||||
elif t.removed: | ||||
radd(fn) | ||||
r48323 | elif t.tracked: | |||
r49112 | if not checklink and t.has_fallback_symlink: | |||
# If the file system does not support symlink, the mode | ||||
# might not be correctly stored in the dirstate, so do not | ||||
# trust it. | ||||
ladd(fn) | ||||
elif not checkexec and t.has_fallback_exec: | ||||
# If the file system does not support exec bits, the mode | ||||
# might not be correctly stored in the dirstate, so do not | ||||
# trust it. | ||||
ladd(fn) | ||||
elif ( | ||||
Augie Fackler
|
r43346 | size >= 0 | ||
and ( | ||||
(size != st.st_size and size != st.st_size & _rangemask) | ||||
or ((mode ^ st.st_mode) & 0o100 and checkexec) | ||||
) | ||||
or fn in copymap | ||||
): | ||||
Raphaël Gomès
|
r47530 | if stat.S_ISLNK(st.st_mode) and size != st.st_size: | ||
Corey Schuhen
|
r47436 | # issue6456: Size returned may be longer due to | ||
# encryption on EXT-4 fscrypt, undecided. | ||||
ladd(fn) | ||||
else: | ||||
madd(fn) | ||||
Raphaël Gomès
|
r52952 | else: | ||
reliable = None | ||||
if mtime_boundary is not None: | ||||
reliable = timestamp.reliable_mtime_of( | ||||
st, mtime_boundary | ||||
) | ||||
elif t.mtime_likely_equal_to(timestamp.mtime_of(st)): | ||||
# We can't compute the current fs time, so we're in | ||||
# a readonly fs or a LFS context. | ||||
cadd(fn) | ||||
continue | ||||
if reliable is None or not t.mtime_likely_equal_to( | ||||
reliable | ||||
): | ||||
# There might be a change in the future if for example | ||||
# the internal clock is off, but this is a case where | ||||
# the issues the user would face would be a lot worse | ||||
# and there is nothing we can really do. | ||||
ladd(fn) | ||||
elif listclean: | ||||
cadd(fn) | ||||
Raphaël Gomès
|
r45017 | status = scmutil.status( | ||
modified, added, removed, deleted, unknown, ignored, clean | ||||
Augie Fackler
|
r43346 | ) | ||
r49213 | return (lookup, status, mtime_boundary) | |||
Siddharth Agarwal
|
r21984 | |||
Matt Harbison
|
r52822 | def matches(self, match: matchmod.basematcher) -> Iterable[bytes]: | ||
Augie Fackler
|
r46554 | """ | ||
Siddharth Agarwal
|
r21984 | return files in the dirstate (in whatever state) filtered by match | ||
Augie Fackler
|
r46554 | """ | ||
Siddharth Agarwal
|
r21984 | dmap = self._map | ||
Raphaël Gomès
|
r44833 | if rustmod is not None: | ||
r48933 | dmap = self._map._map | |||
Raphaël Gomès
|
r44833 | |||
Siddharth Agarwal
|
r21984 | if match.always(): | ||
return dmap.keys() | ||||
files = match.files() | ||||
Martin von Zweigbergk
|
r24448 | if match.isexact(): | ||
Siddharth Agarwal
|
r21984 | # fast path -- filter the other way around, since typically files is | ||
# much smaller than dmap | ||||
return [f for f in files if f in dmap] | ||||
Martin von Zweigbergk
|
r25275 | if match.prefix() and all(fn in dmap for fn in files): | ||
Siddharth Agarwal
|
r21984 | # fast path -- all the values are known to be files, so just return | ||
# that | ||||
return list(files) | ||||
return [f for f in dmap if match(f)] | ||||
FUJIWARA Katsunori
|
r26632 | |||
r50979 | def all_file_names(self): | |||
"""list all filename currently used by this dirstate | ||||
FUJIWARA Katsunori
|
r26633 | |||
r50979 | This is only used to do `hg rollback` related backup in the transaction | |||
""" | ||||
r51162 | files = [b'branch'] | |||
if self._opener.exists(self._filename): | ||||
files.append(self._filename) | ||||
if self._use_dirstate_v2: | ||||
files.append(self._map.docket.data_filename()) | ||||
return tuple(files) | ||||
FUJIWARA Katsunori
|
r26633 | |||
Matt Harbison
|
r52822 | def verify( | ||
self, m1, m2, p1: bytes, narrow_matcher: Optional[Any] = None | ||||
) -> Iterator[bytes]: | ||||
Raphaël Gomès
|
r50718 | """ | ||
check the dirstate contents against the parent manifest and yield errors | ||||
""" | ||||
missing_from_p1 = _( | ||||
Raphaël Gomès
|
r50722 | b"%s marked as tracked in p1 (%s) but not in manifest1\n" | ||
Augie Fackler
|
r43346 | ) | ||
Raphaël Gomès
|
r50718 | unexpected_in_p1 = _(b"%s marked as added, but also in manifest1\n") | ||
missing_from_ps = _( | ||||
b"%s marked as modified, but not in either manifest\n" | ||||
) | ||||
missing_from_ds = _( | ||||
Raphaël Gomès
|
r50722 | b"%s in manifest1, but not marked as tracked in p1 (%s)\n" | ||
Raphaël Gomès
|
r50718 | ) | ||
r48901 | for f, entry in self.items(): | |||
Raphaël Gomès
|
r50716 | if entry.p1_tracked: | ||
if entry.modified and f not in m1 and f not in m2: | ||||
Raphaël Gomès
|
r50719 | yield missing_from_ps % f | ||
Raphaël Gomès
|
r50716 | elif f not in m1: | ||
Raphaël Gomès
|
r50722 | yield missing_from_p1 % (f, node.short(p1)) | ||
Raphaël Gomès
|
r50716 | if entry.added and f in m1: | ||
Raphaël Gomès
|
r50719 | yield unexpected_in_p1 % f | ||
r48901 | for f in m1: | |||
Raphaël Gomès
|
r50717 | if narrow_matcher is not None and not narrow_matcher(f): | ||
continue | ||||
Raphaël Gomès
|
r50716 | entry = self.get_entry(f) | ||
if not entry.p1_tracked: | ||||
Raphaël Gomès
|
r50722 | yield missing_from_ds % (f, node.short(p1)) | ||