repository.py
2213 lines
| 70.6 KiB
| text/x-python
|
PythonLexer
Pulkit Goyal
|
r43078 | # repository.py - Interfaces and base classes for repositories and peers. | ||
r48076 | # coding: utf-8 | |||
Pulkit Goyal
|
r43078 | # | ||
# Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com> | ||||
# | ||||
# This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | ||||
Matt Harbison
|
r52756 | from __future__ import annotations | ||
Pulkit Goyal
|
r43078 | |||
Matt Harbison
|
r53372 | import abc | ||
Matt Harbison
|
r53360 | import typing | ||
Matt Harbison
|
r53342 | from typing import ( | ||
Matt Harbison
|
r53360 | Any, | ||
Matt Harbison
|
r53371 | Collection, | ||
Matt Harbison
|
r53342 | Protocol, | ||
) | ||||
Pulkit Goyal
|
r43078 | from ..i18n import _ | ||
Augie Fackler
|
r43346 | from .. import error | ||
from . import util as interfaceutil | ||||
Pulkit Goyal
|
r43078 | |||
Matt Harbison
|
r53360 | if typing.TYPE_CHECKING: | ||
# Almost all mercurial modules are only imported in the type checking phase | ||||
# to avoid circular imports | ||||
from ..utils import ( | ||||
urlutil, | ||||
) | ||||
Matt Harbison
|
r53375 | # TODO: make a protocol class for this | ||
NodeConstants = Any | ||||
Matt Harbison
|
r53360 | # TODO: create a Protocol class, since importing uimod here causes a cycle | ||
# that confuses pytype. | ||||
Ui = Any | ||||
Matt Harbison
|
r53375 | # TODO: make a protocol class for this | ||
Vfs = Any | ||||
Pulkit Goyal
|
r43078 | # Local repository feature string. | ||
# Revlogs are being used for file storage. | ||||
REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage' | ||||
# The storage part of the repository is shared from an external source. | ||||
REPO_FEATURE_SHARED_STORAGE = b'sharedstore' | ||||
# LFS supported for backing file storage. | ||||
REPO_FEATURE_LFS = b'lfs' | ||||
# Repository supports being stream cloned. | ||||
REPO_FEATURE_STREAM_CLONE = b'streamclone' | ||||
r48000 | # Repository supports (at least) some sidedata to be stored | |||
REPO_FEATURE_SIDE_DATA = b'side-data' | ||||
Pulkit Goyal
|
r43078 | # Files storage may lack data for all ancestors. | ||
REPO_FEATURE_SHALLOW_FILE_STORAGE = b'shallowfilestorage' | ||||
REVISION_FLAG_CENSORED = 1 << 15 | ||||
REVISION_FLAG_ELLIPSIS = 1 << 14 | ||||
REVISION_FLAG_EXTSTORED = 1 << 13 | ||||
Raphaël Gomès
|
r47843 | REVISION_FLAG_HASCOPIESINFO = 1 << 12 | ||
Pulkit Goyal
|
r43078 | |||
REVISION_FLAGS_KNOWN = ( | ||||
Augie Fackler
|
r43346 | REVISION_FLAG_CENSORED | ||
| REVISION_FLAG_ELLIPSIS | ||||
| REVISION_FLAG_EXTSTORED | ||||
r46263 | | REVISION_FLAG_HASCOPIESINFO | |||
r43300 | ) | |||
Pulkit Goyal
|
r43078 | |||
CG_DELTAMODE_STD = b'default' | ||||
CG_DELTAMODE_PREV = b'previous' | ||||
CG_DELTAMODE_FULL = b'fulltext' | ||||
CG_DELTAMODE_P1 = b'p1' | ||||
Augie Fackler
|
r43346 | |||
r48076 | ## Cache related constants: | |||
# | ||||
# Used to control which cache should be warmed in a repo.updatecaches(…) call. | ||||
# Warm branchmaps of all known repoview's filter-level | ||||
CACHE_BRANCHMAP_ALL = b"branchmap-all" | ||||
# Warm branchmaps of repoview's filter-level used by server | ||||
CACHE_BRANCHMAP_SERVED = b"branchmap-served" | ||||
# Warm internal changelog cache (eg: persistent nodemap) | ||||
CACHE_CHANGELOG_CACHE = b"changelog-cache" | ||||
r52430 | # check of a branchmap can use the "pure topo" mode | |||
CACHE_BRANCHMAP_DETECT_PURE_TOPO = b"branchmap-detect-pure-topo" | ||||
r48076 | # Warm full manifest cache | |||
CACHE_FULL_MANIFEST = b"full-manifest" | ||||
# Warm file-node-tags cache | ||||
CACHE_FILE_NODE_TAGS = b"file-node-tags" | ||||
# Warm internal manifestlog cache (eg: persistent nodemap) | ||||
CACHE_MANIFESTLOG_CACHE = b"manifestlog-cache" | ||||
# Warn rev branch cache | ||||
CACHE_REV_BRANCH = b"rev-branch-cache" | ||||
# Warm tags' cache for default repoview' | ||||
CACHE_TAGS_DEFAULT = b"tags-default" | ||||
# Warm tags' cache for repoview's filter-level used by server | ||||
CACHE_TAGS_SERVED = b"tags-served" | ||||
# the cache to warm by default after a simple transaction | ||||
# (this is a mutable set to let extension update it) | ||||
CACHES_DEFAULT = { | ||||
CACHE_BRANCHMAP_SERVED, | ||||
} | ||||
# the caches to warm when warming all of them | ||||
# (this is a mutable set to let extension update it) | ||||
CACHES_ALL = { | ||||
CACHE_BRANCHMAP_SERVED, | ||||
CACHE_BRANCHMAP_ALL, | ||||
r52430 | CACHE_BRANCHMAP_DETECT_PURE_TOPO, | |||
r52802 | CACHE_REV_BRANCH, | |||
r48076 | CACHE_CHANGELOG_CACHE, | |||
CACHE_FILE_NODE_TAGS, | ||||
CACHE_FULL_MANIFEST, | ||||
CACHE_MANIFESTLOG_CACHE, | ||||
CACHE_TAGS_DEFAULT, | ||||
CACHE_TAGS_SERVED, | ||||
} | ||||
r48079 | # the cache to warm by default on simple call | |||
# (this is a mutable set to let extension update it) | ||||
CACHES_POST_CLONE = CACHES_ALL.copy() | ||||
CACHES_POST_CLONE.discard(CACHE_FILE_NODE_TAGS) | ||||
r53138 | CACHES_POST_CLONE.discard(CACHE_REV_BRANCH) | |||
r48079 | ||||
r48076 | ||||
Matt Harbison
|
r53361 | class _ipeerconnection(Protocol): | ||
Pulkit Goyal
|
r43078 | """Represents a "connection" to a repository. | ||
This is the base interface for representing a connection to a repository. | ||||
It holds basic properties and methods applicable to all peer types. | ||||
This is not a complete interface definition and should not be used | ||||
outside of this module. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r53360 | ui: Ui | ||
"""ui.ui instance""" | ||||
path: urlutil.path | None | ||||
"""a urlutil.path instance or None""" | ||||
Pulkit Goyal
|
r43078 | |||
Matt Harbison
|
r53343 | def url(self): | ||
Pulkit Goyal
|
r43078 | """Returns a URL string representing this peer. | ||
Currently, implementations expose the raw URL used to construct the | ||||
instance. It may contain credentials as part of the URL. The | ||||
expectations of the value aren't well-defined and this could lead to | ||||
data leakage. | ||||
TODO audit/clean consumers and more clearly define the contents of this | ||||
value. | ||||
""" | ||||
Matt Harbison
|
r53343 | def local(self): | ||
Pulkit Goyal
|
r43078 | """Returns a local repository instance. | ||
If the peer represents a local repository, returns an object that | ||||
can be used to interface with it. Otherwise returns ``None``. | ||||
""" | ||||
Matt Harbison
|
r53343 | def canpush(self): | ||
Pulkit Goyal
|
r43078 | """Returns a boolean indicating if this peer can be pushed to.""" | ||
Matt Harbison
|
r53343 | def close(self): | ||
Pulkit Goyal
|
r43078 | """Close the connection to this peer. | ||
This is called when the peer will no longer be used. Resources | ||||
associated with the peer should be cleaned up. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r53342 | class ipeercapabilities(Protocol): | ||
Pulkit Goyal
|
r43078 | """Peer sub-interface related to capabilities.""" | ||
Matt Harbison
|
r53343 | def capable(self, name): | ||
Pulkit Goyal
|
r43078 | """Determine support for a named capability. | ||
Returns ``False`` if capability not supported. | ||||
Returns ``True`` if boolean capability is supported. Returns a string | ||||
if capability support is non-boolean. | ||||
Capability strings may or may not map to wire protocol capabilities. | ||||
""" | ||||
Matt Harbison
|
r53343 | def requirecap(self, name, purpose): | ||
Pulkit Goyal
|
r43078 | """Require a capability to be present. | ||
Raises a ``CapabilityError`` if the capability isn't present. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r53342 | class ipeercommands(Protocol): | ||
Pulkit Goyal
|
r43078 | """Client-side interface for communicating over the wire protocol. | ||
This interface is used as a gateway to the Mercurial wire protocol. | ||||
methods commonly call wire protocol commands of the same name. | ||||
""" | ||||
Matt Harbison
|
r53343 | def branchmap(self): | ||
Pulkit Goyal
|
r43078 | """Obtain heads in named branches. | ||
Returns a dict mapping branch name to an iterable of nodes that are | ||||
heads on that branch. | ||||
""" | ||||
Matt Harbison
|
r53343 | def capabilities(self): | ||
Pulkit Goyal
|
r43078 | """Obtain capabilities of the peer. | ||
Returns a set of string capabilities. | ||||
""" | ||||
Matt Harbison
|
r53343 | def get_cached_bundle_inline(self, path): | ||
r51592 | """Retrieve a clonebundle across the wire. | |||
Mathias De Mare
|
r51559 | |||
Returns a chunkbuffer | ||||
""" | ||||
Matt Harbison
|
r53343 | def clonebundles(self): | ||
Pulkit Goyal
|
r43078 | """Obtains the clone bundles manifest for the repo. | ||
Returns the manifest as unparsed bytes. | ||||
""" | ||||
Matt Harbison
|
r53343 | def debugwireargs(self, one, two, three=None, four=None, five=None): | ||
Pulkit Goyal
|
r43078 | """Used to facilitate debugging of arguments passed over the wire.""" | ||
Matt Harbison
|
r53343 | def getbundle(self, source, **kwargs): | ||
Pulkit Goyal
|
r43078 | """Obtain remote repository data as a bundle. | ||
This command is how the bulk of repository data is transferred from | ||||
the peer to the local repository | ||||
Returns a generator of bundle data. | ||||
""" | ||||
Matt Harbison
|
r53343 | def heads(self): | ||
Pulkit Goyal
|
r43078 | """Determine all known head revisions in the peer. | ||
Returns an iterable of binary nodes. | ||||
""" | ||||
Matt Harbison
|
r53343 | def known(self, nodes): | ||
Pulkit Goyal
|
r43078 | """Determine whether multiple nodes are known. | ||
Accepts an iterable of nodes whose presence to check for. | ||||
Returns an iterable of booleans indicating of the corresponding node | ||||
at that index is known to the peer. | ||||
""" | ||||
Matt Harbison
|
r53343 | def listkeys(self, namespace): | ||
Pulkit Goyal
|
r43078 | """Obtain all keys in a pushkey namespace. | ||
Returns an iterable of key names. | ||||
""" | ||||
Matt Harbison
|
r53343 | def lookup(self, key): | ||
Pulkit Goyal
|
r43078 | """Resolve a value to a known revision. | ||
Returns a binary node of the resolved revision on success. | ||||
""" | ||||
Matt Harbison
|
r53343 | def pushkey(self, namespace, key, old, new): | ||
Pulkit Goyal
|
r43078 | """Set a value using the ``pushkey`` protocol. | ||
Arguments correspond to the pushkey namespace and key to operate on and | ||||
the old and new values for that key. | ||||
Returns a string with the peer result. The value inside varies by the | ||||
namespace. | ||||
""" | ||||
Matt Harbison
|
r53343 | def stream_out(self): | ||
Pulkit Goyal
|
r43078 | """Obtain streaming clone data. | ||
Successful result should be a generator of data chunks. | ||||
""" | ||||
Matt Harbison
|
r53343 | def unbundle(self, bundle, heads, url): | ||
Pulkit Goyal
|
r43078 | """Transfer repository data to the peer. | ||
This is how the bulk of data during a push is transferred. | ||||
Returns the integer number of heads added to the peer. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r53342 | class ipeerlegacycommands(Protocol): | ||
Pulkit Goyal
|
r43078 | """Interface for implementing support for legacy wire protocol commands. | ||
Wire protocol commands transition to legacy status when they are no longer | ||||
used by modern clients. To facilitate identifying which commands are | ||||
legacy, the interfaces are split. | ||||
""" | ||||
Matt Harbison
|
r53343 | def between(self, pairs): | ||
Pulkit Goyal
|
r43078 | """Obtain nodes between pairs of nodes. | ||
``pairs`` is an iterable of node pairs. | ||||
Returns an iterable of iterables of nodes corresponding to each | ||||
requested pair. | ||||
""" | ||||
Matt Harbison
|
r53343 | def branches(self, nodes): | ||
Pulkit Goyal
|
r43078 | """Obtain ancestor changesets of specific nodes back to a branch point. | ||
For each requested node, the peer finds the first ancestor node that is | ||||
a DAG root or is a merge. | ||||
Returns an iterable of iterables with the resolved values for each node. | ||||
""" | ||||
Matt Harbison
|
r53343 | def changegroup(self, nodes, source): | ||
Pulkit Goyal
|
r43078 | """Obtain a changegroup with data for descendants of specified nodes.""" | ||
Matt Harbison
|
r53343 | def changegroupsubset(self, bases, heads, source): | ||
Pulkit Goyal
|
r43078 | pass | ||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r53342 | class ipeercommandexecutor(Protocol): | ||
Pulkit Goyal
|
r43078 | """Represents a mechanism to execute remote commands. | ||
This is the primary interface for requesting that wire protocol commands | ||||
be executed. Instances of this interface are active in a context manager | ||||
and have a well-defined lifetime. When the context manager exits, all | ||||
outstanding requests are waited on. | ||||
""" | ||||
Matt Harbison
|
r53343 | def callcommand(self, name, args): | ||
Pulkit Goyal
|
r43078 | """Request that a named command be executed. | ||
Receives the command name and a dictionary of command arguments. | ||||
Returns a ``concurrent.futures.Future`` that will resolve to the | ||||
result of that command request. That exact value is left up to | ||||
the implementation and possibly varies by command. | ||||
Not all commands can coexist with other commands in an executor | ||||
instance: it depends on the underlying wire protocol transport being | ||||
used and the command itself. | ||||
Implementations MAY call ``sendcommands()`` automatically if the | ||||
requested command can not coexist with other commands in this executor. | ||||
Implementations MAY call ``sendcommands()`` automatically when the | ||||
future's ``result()`` is called. So, consumers using multiple | ||||
commands with an executor MUST ensure that ``result()`` is not called | ||||
until all command requests have been issued. | ||||
""" | ||||
Matt Harbison
|
r53343 | def sendcommands(self): | ||
Pulkit Goyal
|
r43078 | """Trigger submission of queued command requests. | ||
Not all transports submit commands as soon as they are requested to | ||||
run. When called, this method forces queued command requests to be | ||||
issued. It will no-op if all commands have already been sent. | ||||
When called, no more new commands may be issued with this executor. | ||||
""" | ||||
Matt Harbison
|
r53343 | def close(self): | ||
Pulkit Goyal
|
r43078 | """Signal that this command request is finished. | ||
When called, no more new commands may be issued. All outstanding | ||||
commands that have previously been issued are waited on before | ||||
returning. This not only includes waiting for the futures to resolve, | ||||
but also waiting for all response data to arrive. In other words, | ||||
calling this waits for all on-wire state for issued command requests | ||||
to finish. | ||||
When used as a context manager, this method is called when exiting the | ||||
context manager. | ||||
This method may call ``sendcommands()`` if there are buffered commands. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r53342 | class ipeerrequests(Protocol): | ||
Pulkit Goyal
|
r43078 | """Interface for executing commands on a peer.""" | ||
Matt Harbison
|
r53362 | limitedarguments: bool | ||
"""True if the peer cannot receive large argument value for commands.""" | ||||
Pulkit Goyal
|
r43078 | |||
Matt Harbison
|
r53343 | def commandexecutor(self): | ||
Pulkit Goyal
|
r43078 | """A context manager that resolves to an ipeercommandexecutor. | ||
The object this resolves to can be used to issue command requests | ||||
to the peer. | ||||
Callers should call its ``callcommand`` method to issue command | ||||
requests. | ||||
A new executor should be obtained for each distinct set of commands | ||||
(possibly just a single command) that the consumer wants to execute | ||||
as part of a single operation or round trip. This is because some | ||||
peers are half-duplex and/or don't support persistent connections. | ||||
e.g. in the case of HTTP peers, commands sent to an executor represent | ||||
a single HTTP request. While some peers may support multiple command | ||||
sends over the wire per executor, consumers need to code to the least | ||||
capable peer. So it should be assumed that command executors buffer | ||||
called commands until they are told to send them and that each | ||||
command executor could result in a new connection or wire-level request | ||||
being issued. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r53363 | class peer(_ipeerconnection, ipeercapabilities, ipeerrequests, Protocol): | ||
Pulkit Goyal
|
r43078 | """Unified interface for peer repositories. | ||
All peer instances must conform to this interface. | ||||
""" | ||||
Matt Harbison
|
r53363 | limitedarguments: bool = False | ||
Pulkit Goyal
|
r43078 | |||
Manuel Jacob
|
r51309 | def __init__(self, ui, path=None, remotehidden=False): | ||
r50646 | self.ui = ui | |||
r50647 | self.path = path | |||
r50646 | ||||
Pulkit Goyal
|
r43078 | def capable(self, name): | ||
Matt Harbison
|
r53329 | # TODO: this class should maybe subclass ipeercommands too, otherwise it | ||
# is assuming whatever uses this as a mixin also has this interface. | ||||
caps = self.capabilities() # pytype: disable=attribute-error | ||||
Pulkit Goyal
|
r43078 | if name in caps: | ||
return True | ||||
Augie Fackler
|
r43347 | name = b'%s=' % name | ||
Pulkit Goyal
|
r43078 | for cap in caps: | ||
if cap.startswith(name): | ||||
Augie Fackler
|
r43346 | return cap[len(name) :] | ||
Pulkit Goyal
|
r43078 | |||
return False | ||||
def requirecap(self, name, purpose): | ||||
if self.capable(name): | ||||
return | ||||
raise error.CapabilityError( | ||||
Augie Fackler
|
r43346 | _( | ||
Augie Fackler
|
r43347 | b'cannot %s; remote repository does not support the ' | ||
b'\'%s\' capability' | ||||
Augie Fackler
|
r43346 | ) | ||
% (purpose, name) | ||||
) | ||||
Pulkit Goyal
|
r43078 | |||
Matt Harbison
|
r53342 | class iverifyproblem(Protocol): | ||
Pulkit Goyal
|
r43078 | """Represents a problem with the integrity of the repository. | ||
Instances of this interface are emitted to describe an integrity issue | ||||
with a repository (e.g. corrupt storage, missing data, etc). | ||||
Instances are essentially messages associated with severity. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r53364 | warning: bytes | None | ||
"""Message indicating a non-fatal problem.""" | ||||
error: bytes | None | ||||
"""Message indicating a fatal problem.""" | ||||
node: bytes | None | ||||
"""Revision encountering the problem. | ||||
``None`` means the problem doesn't apply to a single revision. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r43078 | |||
Matt Harbison
|
r53342 | class irevisiondelta(Protocol): | ||
Pulkit Goyal
|
r43078 | """Represents a delta between one revision and another. | ||
Instances convey enough information to allow a revision to be exchanged | ||||
with another repository. | ||||
Instances represent the fulltext revision data or a delta against | ||||
another revision. Therefore the ``revision`` and ``delta`` attributes | ||||
are mutually exclusive. | ||||
Typically used for changegroup generation. | ||||
""" | ||||
Matt Harbison
|
r53366 | node: bytes | ||
"""20 byte node of this revision.""" | ||||
p1node: bytes | ||||
"""20 byte node of 1st parent of this revision.""" | ||||
p2node: bytes | ||||
"""20 byte node of 2nd parent of this revision.""" | ||||
# TODO: is this really optional? revlog.revlogrevisiondelta defaults to None | ||||
linknode: bytes | None | ||||
"""20 byte node of the changelog revision this node is linked to.""" | ||||
flags: int | ||||
"""2 bytes of integer flags that apply to this revision. | ||||
This is a bitwise composition of the ``REVISION_FLAG_*`` constants. | ||||
""" | ||||
basenode: bytes | ||||
"""20 byte node of the revision this data is a delta against. | ||||
``nullid`` indicates that the revision is a full revision and not | ||||
a delta. | ||||
""" | ||||
baserevisionsize: int | None | ||||
"""Size of base revision this delta is against. | ||||
May be ``None`` if ``basenode`` is ``nullid``. | ||||
""" | ||||
# TODO: is this really optional? (Seems possible in | ||||
# storageutil.emitrevisions()). | ||||
revision: bytes | None | ||||
"""Raw fulltext of revision data for this node.""" | ||||
delta: bytes | None | ||||
"""Delta between ``basenode`` and ``node``. | ||||
Stored in the bdiff delta format. | ||||
""" | ||||
sidedata: bytes | None | ||||
"""Raw sidedata bytes for the given revision.""" | ||||
protocol_flags: int | ||||
"""Single byte of integer flags that can influence the protocol. | ||||
This is a bitwise composition of the ``storageutil.CG_FLAG*`` constants. | ||||
""" | ||||
Raphaël Gomès
|
r47843 | |||
Pulkit Goyal
|
r43078 | |||
Matt Harbison
|
r53342 | class ifilerevisionssequence(Protocol): | ||
Pulkit Goyal
|
r43078 | """Contains index data for all revisions of a file. | ||
Types implementing this behave like lists of tuples. The index | ||||
in the list corresponds to the revision number. The values contain | ||||
index metadata. | ||||
The *null* revision (revision number -1) is always the last item | ||||
in the index. | ||||
""" | ||||
Matt Harbison
|
r53343 | def __len__(self): | ||
Pulkit Goyal
|
r43078 | """The total number of revisions.""" | ||
Matt Harbison
|
r53343 | def __getitem__(self, rev): | ||
Pulkit Goyal
|
r43078 | """Returns the object having a specific revision number. | ||
Returns an 8-tuple with the following fields: | ||||
offset+flags | ||||
Contains the offset and flags for the revision. 64-bit unsigned | ||||
integer where first 6 bytes are the offset and the next 2 bytes | ||||
are flags. The offset can be 0 if it is not used by the store. | ||||
compressed size | ||||
Size of the revision data in the store. It can be 0 if it isn't | ||||
needed by the store. | ||||
uncompressed size | ||||
Fulltext size. It can be 0 if it isn't needed by the store. | ||||
base revision | ||||
Revision number of revision the delta for storage is encoded | ||||
against. -1 indicates not encoded against a base revision. | ||||
link revision | ||||
Revision number of changelog revision this entry is related to. | ||||
p1 revision | ||||
Revision number of 1st parent. -1 if no 1st parent. | ||||
p2 revision | ||||
Revision number of 2nd parent. -1 if no 1st parent. | ||||
node | ||||
Binary node value for this revision number. | ||||
Negative values should index off the end of the sequence. ``-1`` | ||||
should return the null revision. ``-2`` should return the most | ||||
recent revision. | ||||
""" | ||||
Matt Harbison
|
r53343 | def __contains__(self, rev): | ||
Pulkit Goyal
|
r43078 | """Whether a revision number exists.""" | ||
def insert(self, i, entry): | ||||
"""Add an item to the index at specific revision.""" | ||||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r53342 | class ifileindex(Protocol): | ||
Pulkit Goyal
|
r43078 | """Storage interface for index data of a single file. | ||
File storage data is divided into index metadata and data storage. | ||||
This interface defines the index portion of the interface. | ||||
The index logically consists of: | ||||
* A mapping between revision numbers and nodes. | ||||
* DAG data (storing and querying the relationship between nodes). | ||||
* Metadata to facilitate storage. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r53369 | nullid: bytes | ||
"""node for the null revision for use as delta base.""" | ||||
Joerg Sonnenberger
|
r47538 | |||
Matt Harbison
|
r53343 | def __len__(self): | ||
Pulkit Goyal
|
r43078 | """Obtain the number of revisions stored for this file.""" | ||
Matt Harbison
|
r53343 | def __iter__(self): | ||
Pulkit Goyal
|
r43078 | """Iterate over revision numbers for this file.""" | ||
Matt Harbison
|
r53343 | def hasnode(self, node): | ||
Pulkit Goyal
|
r43078 | """Returns a bool indicating if a node is known to this store. | ||
Implementations must only return True for full, binary node values: | ||||
hex nodes, revision numbers, and partial node matches must be | ||||
rejected. | ||||
The null node is never present. | ||||
""" | ||||
Matt Harbison
|
r53343 | def revs(self, start=0, stop=None): | ||
Pulkit Goyal
|
r43078 | """Iterate over revision numbers for this file, with control.""" | ||
Matt Harbison
|
r53343 | def parents(self, node): | ||
Pulkit Goyal
|
r43078 | """Returns a 2-tuple of parent nodes for a revision. | ||
Values will be ``nullid`` if the parent is empty. | ||||
""" | ||||
Matt Harbison
|
r53343 | def parentrevs(self, rev): | ||
Pulkit Goyal
|
r43078 | """Like parents() but operates on revision numbers.""" | ||
Matt Harbison
|
r53343 | def rev(self, node): | ||
Pulkit Goyal
|
r43078 | """Obtain the revision number given a node. | ||
Raises ``error.LookupError`` if the node is not known. | ||||
""" | ||||
Matt Harbison
|
r53343 | def node(self, rev): | ||
Pulkit Goyal
|
r43078 | """Obtain the node value given a revision number. | ||
Raises ``IndexError`` if the node is not known. | ||||
""" | ||||
Matt Harbison
|
r53343 | def lookup(self, node): | ||
Pulkit Goyal
|
r43078 | """Attempt to resolve a value to a node. | ||
Value can be a binary node, hex node, revision number, or a string | ||||
that can be converted to an integer. | ||||
Raises ``error.LookupError`` if a node could not be resolved. | ||||
""" | ||||
Matt Harbison
|
r53343 | def linkrev(self, rev): | ||
Pulkit Goyal
|
r43078 | """Obtain the changeset revision number a revision is linked to.""" | ||
Matt Harbison
|
r53343 | def iscensored(self, rev): | ||
Pulkit Goyal
|
r43078 | """Return whether a revision's content has been censored.""" | ||
Matt Harbison
|
r53343 | def commonancestorsheads(self, node1, node2): | ||
Pulkit Goyal
|
r43078 | """Obtain an iterable of nodes containing heads of common ancestors. | ||
See ``ancestor.commonancestorsheads()``. | ||||
""" | ||||
Matt Harbison
|
r53343 | def descendants(self, revs): | ||
Pulkit Goyal
|
r43078 | """Obtain descendant revision numbers for a set of revision numbers. | ||
If ``nullrev`` is in the set, this is equivalent to ``revs()``. | ||||
""" | ||||
Matt Harbison
|
r53343 | def heads(self, start=None, stop=None): | ||
Pulkit Goyal
|
r43078 | """Obtain a list of nodes that are DAG heads, with control. | ||
The set of revisions examined can be limited by specifying | ||||
``start`` and ``stop``. ``start`` is a node. ``stop`` is an | ||||
iterable of nodes. DAG traversal starts at earlier revision | ||||
``start`` and iterates forward until any node in ``stop`` is | ||||
encountered. | ||||
""" | ||||
Matt Harbison
|
r53343 | def children(self, node): | ||
Pulkit Goyal
|
r43078 | """Obtain nodes that are children of a node. | ||
Returns a list of nodes. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r53342 | class ifiledata(Protocol): | ||
Pulkit Goyal
|
r43078 | """Storage interface for data storage of a specific file. | ||
This complements ``ifileindex`` and provides an interface for accessing | ||||
data for a tracked file. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r53343 | def size(self, rev): | ||
Pulkit Goyal
|
r43078 | """Obtain the fulltext size of file data. | ||
Any metadata is excluded from size measurements. | ||||
""" | ||||
Matt Harbison
|
r53343 | def revision(self, node): | ||
Matt Harbison
|
r46558 | """Obtain fulltext data for a node. | ||
Pulkit Goyal
|
r43078 | |||
By default, any storage transformations are applied before the data | ||||
is returned. If ``raw`` is True, non-raw storage transformations | ||||
are not applied. | ||||
The fulltext data may contain a header containing metadata. Most | ||||
consumers should use ``read()`` to obtain the actual file data. | ||||
""" | ||||
Matt Harbison
|
r53343 | def rawdata(self, node): | ||
Augie Fackler
|
r46554 | """Obtain raw data for a node.""" | ||
Pulkit Goyal
|
r43078 | |||
Matt Harbison
|
r53343 | def read(self, node): | ||
Pulkit Goyal
|
r43078 | """Resolve file fulltext data. | ||
This is similar to ``revision()`` except any metadata in the data | ||||
headers is stripped. | ||||
""" | ||||
Matt Harbison
|
r53343 | def renamed(self, node): | ||
Pulkit Goyal
|
r43078 | """Obtain copy metadata for a node. | ||
Returns ``False`` if no copy metadata is stored or a 2-tuple of | ||||
(path, node) from which this revision was copied. | ||||
""" | ||||
Matt Harbison
|
r53343 | def cmp(self, node, fulltext): | ||
Pulkit Goyal
|
r43078 | """Compare fulltext to another revision. | ||
Returns True if the fulltext is different from what is stored. | ||||
This takes copy metadata into account. | ||||
TODO better document the copy metadata and censoring logic. | ||||
""" | ||||
Augie Fackler
|
r43346 | def emitrevisions( | ||
Matt Harbison
|
r53343 | self, | ||
Augie Fackler
|
r43346 | nodes, | ||
nodesorder=None, | ||||
revisiondata=False, | ||||
assumehaveparentrevisions=False, | ||||
deltamode=CG_DELTAMODE_STD, | ||||
): | ||||
Pulkit Goyal
|
r43078 | """Produce ``irevisiondelta`` for revisions. | ||
Given an iterable of nodes, emits objects conforming to the | ||||
``irevisiondelta`` interface that describe revisions in storage. | ||||
This method is a generator. | ||||
The input nodes may be unordered. Implementations must ensure that a | ||||
node's parents are emitted before the node itself. Transitively, this | ||||
means that a node may only be emitted once all its ancestors in | ||||
``nodes`` have also been emitted. | ||||
By default, emits "index" data (the ``node``, ``p1node``, and | ||||
``p2node`` attributes). If ``revisiondata`` is set, revision data | ||||
will also be present on the emitted objects. | ||||
With default argument values, implementations can choose to emit | ||||
either fulltext revision data or a delta. When emitting deltas, | ||||
implementations must consider whether the delta's base revision | ||||
fulltext is available to the receiver. | ||||
The base revision fulltext is guaranteed to be available if any of | ||||
the following are met: | ||||
* Its fulltext revision was emitted by this method call. | ||||
* A delta for that revision was emitted by this method call. | ||||
* ``assumehaveparentrevisions`` is True and the base revision is a | ||||
parent of the node. | ||||
``nodesorder`` can be used to control the order that revisions are | ||||
emitted. By default, revisions can be reordered as long as they are | ||||
in DAG topological order (see above). If the value is ``nodes``, | ||||
the iteration order from ``nodes`` should be used. If the value is | ||||
``storage``, then the native order from the backing storage layer | ||||
is used. (Not all storage layers will have strong ordering and behavior | ||||
of this mode is storage-dependent.) ``nodes`` ordering can force | ||||
revisions to be emitted before their ancestors, so consumers should | ||||
use it with care. | ||||
The ``linknode`` attribute on the returned ``irevisiondelta`` may not | ||||
be set and it is the caller's responsibility to resolve it, if needed. | ||||
If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested, | ||||
all revision data should be emitted as deltas against the revision | ||||
emitted just prior. The initial revision should be a delta against its | ||||
1st parent. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r53342 | class ifilemutation(Protocol): | ||
Pulkit Goyal
|
r43078 | """Storage interface for mutation events of a tracked file.""" | ||
Matt Harbison
|
r53343 | def add(self, filedata, meta, transaction, linkrev, p1, p2): | ||
Pulkit Goyal
|
r43078 | """Add a new revision to the store. | ||
Takes file data, dictionary of metadata, a transaction, linkrev, | ||||
and parent nodes. | ||||
Returns the node that was added. | ||||
May no-op if a revision matching the supplied data is already stored. | ||||
""" | ||||
Augie Fackler
|
r43346 | def addrevision( | ||
Matt Harbison
|
r53343 | self, | ||
Augie Fackler
|
r43346 | revisiondata, | ||
transaction, | ||||
linkrev, | ||||
p1, | ||||
p2, | ||||
node=None, | ||||
flags=0, | ||||
cachedelta=None, | ||||
): | ||||
Joerg Sonnenberger
|
r47258 | """Add a new revision to the store and return its number. | ||
Pulkit Goyal
|
r43078 | |||
This is similar to ``add()`` except it operates at a lower level. | ||||
The data passed in already contains a metadata header, if any. | ||||
``node`` and ``flags`` can be used to define the expected node and | ||||
the flags to use with storage. ``flags`` is a bitwise value composed | ||||
of the various ``REVISION_FLAG_*`` constants. | ||||
``add()`` is usually called when adding files from e.g. the working | ||||
directory. ``addrevision()`` is often called by ``add()`` and for | ||||
scenarios where revision data has already been computed, such as when | ||||
applying raw data from a peer repo. | ||||
""" | ||||
Augie Fackler
|
r43346 | def addgroup( | ||
Matt Harbison
|
r53343 | self, | ||
Augie Fackler
|
r43346 | deltas, | ||
linkmapper, | ||||
transaction, | ||||
addrevisioncb=None, | ||||
Joerg Sonnenberger
|
r46373 | duplicaterevisioncb=None, | ||
Augie Fackler
|
r43346 | maybemissingparents=False, | ||
): | ||||
Pulkit Goyal
|
r43078 | """Process a series of deltas for storage. | ||
``deltas`` is an iterable of 7-tuples of | ||||
(node, p1, p2, linknode, deltabase, delta, flags) defining revisions | ||||
to add. | ||||
The ``delta`` field contains ``mpatch`` data to apply to a base | ||||
revision, identified by ``deltabase``. The base node can be | ||||
``nullid``, in which case the header from the delta can be ignored | ||||
and the delta used as the fulltext. | ||||
Joerg Sonnenberger
|
r47085 | ``alwayscache`` instructs the lower layers to cache the content of the | ||
newly added revision, even if it needs to be explicitly computed. | ||||
This used to be the default when ``addrevisioncb`` was provided up to | ||||
Mercurial 5.8. | ||||
Joerg Sonnenberger
|
r47259 | ``addrevisioncb`` should be called for each new rev as it is committed. | ||
``duplicaterevisioncb`` should be called for all revs with a | ||||
pre-existing node. | ||||
Pulkit Goyal
|
r43078 | |||
``maybemissingparents`` is a bool indicating whether the incoming | ||||
data may reference parents/ancestor revisions that aren't present. | ||||
This flag is set when receiving data into a "shallow" store that | ||||
doesn't hold all history. | ||||
Returns a list of nodes that were processed. A node will be in the list | ||||
even if it existed in the store previously. | ||||
""" | ||||
Matt Harbison
|
r53343 | def censorrevision(self, tr, node, tombstone=b''): | ||
Pulkit Goyal
|
r43078 | """Remove the content of a single revision. | ||
The specified ``node`` will have its content purged from storage. | ||||
Future attempts to access the revision data for this node will | ||||
result in failure. | ||||
A ``tombstone`` message can optionally be stored. This message may be | ||||
displayed to users when they attempt to access the missing revision | ||||
data. | ||||
Storage backends may have stored deltas against the previous content | ||||
in this revision. As part of censoring a revision, these storage | ||||
backends are expected to rewrite any internally stored deltas such | ||||
that they no longer reference the deleted content. | ||||
""" | ||||
Matt Harbison
|
r53343 | def getstrippoint(self, minlink): | ||
Pulkit Goyal
|
r43078 | """Find the minimum revision that must be stripped to strip a linkrev. | ||
Returns a 2-tuple containing the minimum revision number and a set | ||||
of all revisions numbers that would be broken by this strip. | ||||
TODO this is highly revlog centric and should be abstracted into | ||||
a higher-level deletion API. ``repair.strip()`` relies on this. | ||||
""" | ||||
Matt Harbison
|
r53343 | def strip(self, minlink, transaction): | ||
Pulkit Goyal
|
r43078 | """Remove storage of items starting at a linkrev. | ||
This uses ``getstrippoint()`` to determine the first node to remove. | ||||
Then it effectively truncates storage for all revisions after that. | ||||
TODO this is highly revlog centric and should be abstracted into a | ||||
higher-level deletion API. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r43078 | class ifilestorage(ifileindex, ifiledata, ifilemutation): | ||
"""Complete storage interface for a single tracked file.""" | ||||
Matt Harbison
|
r53343 | def files(self): | ||
Pulkit Goyal
|
r43078 | """Obtain paths that are backing storage for this file. | ||
TODO this is used heavily by verify code and there should probably | ||||
be a better API for that. | ||||
""" | ||||
Augie Fackler
|
r43346 | def storageinfo( | ||
Matt Harbison
|
r53343 | self, | ||
Augie Fackler
|
r43346 | exclusivefiles=False, | ||
sharedfiles=False, | ||||
revisionscount=False, | ||||
trackedsize=False, | ||||
storedsize=False, | ||||
): | ||||
Pulkit Goyal
|
r43078 | """Obtain information about storage for this file's data. | ||
Returns a dict describing storage for this tracked path. The keys | ||||
in the dict map to arguments of the same. The arguments are bools | ||||
indicating whether to calculate and obtain that data. | ||||
exclusivefiles | ||||
Iterable of (vfs, path) describing files that are exclusively | ||||
used to back storage for this tracked path. | ||||
sharedfiles | ||||
Iterable of (vfs, path) describing files that are used to back | ||||
storage for this tracked path. Those files may also provide storage | ||||
for other stored entities. | ||||
revisionscount | ||||
Number of revisions available for retrieval. | ||||
trackedsize | ||||
Total size in bytes of all tracked revisions. This is a sum of the | ||||
length of the fulltext of all revisions. | ||||
storedsize | ||||
Total size in bytes used to store data for all tracked revisions. | ||||
This is commonly less than ``trackedsize`` due to internal usage | ||||
of deltas rather than fulltext revisions. | ||||
Not all storage backends may support all queries are have a reasonable | ||||
value to use. In that case, the value should be set to ``None`` and | ||||
callers are expected to handle this special value. | ||||
""" | ||||
Matt Harbison
|
r53343 | def verifyintegrity(self, state): | ||
Pulkit Goyal
|
r43078 | """Verifies the integrity of file storage. | ||
``state`` is a dict holding state of the verifier process. It can be | ||||
used to communicate data between invocations of multiple storage | ||||
primitives. | ||||
If individual revisions cannot have their revision content resolved, | ||||
the method is expected to set the ``skipread`` key to a set of nodes | ||||
Matt Harbison
|
r44530 | that encountered problems. If set, the method can also add the node(s) | ||
to ``safe_renamed`` in order to indicate nodes that may perform the | ||||
rename checks with currently accessible data. | ||||
Pulkit Goyal
|
r43078 | |||
The method yields objects conforming to the ``iverifyproblem`` | ||||
interface. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r53342 | class idirs(Protocol): | ||
Pulkit Goyal
|
r43078 | """Interface representing a collection of directories from paths. | ||
This interface is essentially a derived data structure representing | ||||
directories from a collection of paths. | ||||
""" | ||||
Matt Harbison
|
r53343 | def addpath(self, path): | ||
Pulkit Goyal
|
r43078 | """Add a path to the collection. | ||
All directories in the path will be added to the collection. | ||||
""" | ||||
Matt Harbison
|
r53343 | def delpath(self, path): | ||
Pulkit Goyal
|
r43078 | """Remove a path from the collection. | ||
If the removal was the last path in a particular directory, the | ||||
directory is removed from the collection. | ||||
""" | ||||
Matt Harbison
|
r53343 | def __iter__(self): | ||
Pulkit Goyal
|
r43078 | """Iterate over the directories in this collection of paths.""" | ||
Matt Harbison
|
r53343 | def __contains__(self, path): | ||
Pulkit Goyal
|
r43078 | """Whether a specific directory is in this collection.""" | ||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r53342 | class imanifestdict(Protocol): | ||
Pulkit Goyal
|
r43078 | """Interface representing a manifest data structure. | ||
A manifest is effectively a dict mapping paths to entries. Each entry | ||||
consists of a binary node and extra flags affecting that entry. | ||||
""" | ||||
Matt Harbison
|
r53343 | def __getitem__(self, path): | ||
Pulkit Goyal
|
r43078 | """Returns the binary node value for a path in the manifest. | ||
Raises ``KeyError`` if the path does not exist in the manifest. | ||||
Equivalent to ``self.find(path)[0]``. | ||||
""" | ||||
Matt Harbison
|
r53343 | def find(self, path): | ||
Pulkit Goyal
|
r43078 | """Returns the entry for a path in the manifest. | ||
Returns a 2-tuple of (node, flags). | ||||
Raises ``KeyError`` if the path does not exist in the manifest. | ||||
""" | ||||
Matt Harbison
|
r53343 | def __len__(self): | ||
Pulkit Goyal
|
r43078 | """Return the number of entries in the manifest.""" | ||
Matt Harbison
|
r53343 | def __nonzero__(self): | ||
Pulkit Goyal
|
r43078 | """Returns True if the manifest has entries, False otherwise.""" | ||
__bool__ = __nonzero__ | ||||
Matt Harbison
|
r53343 | def set(self, path, node, flags): | ||
Arseniy Alekseyev
|
r52651 | """Define the node value and flags for a path in the manifest. | ||
Equivalent to __setitem__ followed by setflag, but can be more efficient. | ||||
""" | ||||
Matt Harbison
|
r53343 | def __setitem__(self, path, node): | ||
Pulkit Goyal
|
r43078 | """Define the node value for a path in the manifest. | ||
If the path is already in the manifest, its flags will be copied to | ||||
the new entry. | ||||
""" | ||||
Matt Harbison
|
r53343 | def __contains__(self, path): | ||
Pulkit Goyal
|
r43078 | """Whether a path exists in the manifest.""" | ||
Matt Harbison
|
r53343 | def __delitem__(self, path): | ||
Pulkit Goyal
|
r43078 | """Remove a path from the manifest. | ||
Raises ``KeyError`` if the path is not in the manifest. | ||||
""" | ||||
Matt Harbison
|
r53343 | def __iter__(self): | ||
Pulkit Goyal
|
r43078 | """Iterate over paths in the manifest.""" | ||
Matt Harbison
|
r53343 | def iterkeys(self): | ||
Pulkit Goyal
|
r43078 | """Iterate over paths in the manifest.""" | ||
Matt Harbison
|
r53343 | def keys(self): | ||
Pulkit Goyal
|
r43078 | """Obtain a list of paths in the manifest.""" | ||
Matt Harbison
|
r53343 | def filesnotin(self, other, match=None): | ||
Pulkit Goyal
|
r43078 | """Obtain the set of paths in this manifest but not in another. | ||
``match`` is an optional matcher function to be applied to both | ||||
manifests. | ||||
Returns a set of paths. | ||||
""" | ||||
Matt Harbison
|
r53343 | def dirs(self): | ||
Pulkit Goyal
|
r43078 | """Returns an object implementing the ``idirs`` interface.""" | ||
Matt Harbison
|
r53343 | def hasdir(self, dir): | ||
Pulkit Goyal
|
r43078 | """Returns a bool indicating if a directory is in this manifest.""" | ||
Matt Harbison
|
r53343 | def walk(self, match): | ||
Pulkit Goyal
|
r43078 | """Generator of paths in manifest satisfying a matcher. | ||
If the matcher has explicit files listed and they don't exist in | ||||
the manifest, ``match.bad()`` is called for each missing file. | ||||
""" | ||||
Matt Harbison
|
r53343 | def diff(self, other, match=None, clean=False): | ||
Pulkit Goyal
|
r43078 | """Find differences between this manifest and another. | ||
This manifest is compared to ``other``. | ||||
If ``match`` is provided, the two manifests are filtered against this | ||||
matcher and only entries satisfying the matcher are compared. | ||||
If ``clean`` is True, unchanged files are included in the returned | ||||
object. | ||||
Returns a dict with paths as keys and values of 2-tuples of 2-tuples of | ||||
the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)`` | ||||
represents the node and flags for this manifest and ``(node2, flag2)`` | ||||
are the same for the other manifest. | ||||
""" | ||||
Matt Harbison
|
r53343 | def setflag(self, path, flag): | ||
Pulkit Goyal
|
r43078 | """Set the flag value for a given path. | ||
Raises ``KeyError`` if the path is not already in the manifest. | ||||
""" | ||||
Matt Harbison
|
r53343 | def get(self, path, default=None): | ||
Pulkit Goyal
|
r43078 | """Obtain the node value for a path or a default value if missing.""" | ||
Matt Harbison
|
r53343 | def flags(self, path): | ||
Augie Fackler
|
r44725 | """Return the flags value for a path (default: empty bytestring).""" | ||
Pulkit Goyal
|
r43078 | |||
Matt Harbison
|
r53343 | def copy(self): | ||
Pulkit Goyal
|
r43078 | """Return a copy of this manifest.""" | ||
Matt Harbison
|
r53343 | def items(self): | ||
Pulkit Goyal
|
r43078 | """Returns an iterable of (path, node) for items in this manifest.""" | ||
Matt Harbison
|
r53343 | def iteritems(self): | ||
Pulkit Goyal
|
r43078 | """Identical to items().""" | ||
Matt Harbison
|
r53343 | def iterentries(self): | ||
Pulkit Goyal
|
r43078 | """Returns an iterable of (path, node, flags) for this manifest. | ||
Similar to ``iteritems()`` except items are a 3-tuple and include | ||||
flags. | ||||
""" | ||||
Matt Harbison
|
r53343 | def text(self): | ||
Pulkit Goyal
|
r43078 | """Obtain the raw data representation for this manifest. | ||
Result is used to create a manifest revision. | ||||
""" | ||||
Matt Harbison
|
r53343 | def fastdelta(self, base, changes): | ||
Pulkit Goyal
|
r43078 | """Obtain a delta between this manifest and another given changes. | ||
``base`` in the raw data representation for another manifest. | ||||
``changes`` is an iterable of ``(path, to_delete)``. | ||||
Returns a 2-tuple containing ``bytearray(self.text())`` and the | ||||
delta between ``base`` and this manifest. | ||||
Augie Fackler
|
r45154 | |||
If this manifest implementation can't support ``fastdelta()``, | ||||
raise ``mercurial.manifest.FastdeltaUnavailable``. | ||||
Pulkit Goyal
|
r43078 | """ | ||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r53342 | class imanifestrevisionbase(Protocol): | ||
Pulkit Goyal
|
r43078 | """Base interface representing a single revision of a manifest. | ||
Should not be used as a primary interface: should always be inherited | ||||
as part of a larger interface. | ||||
""" | ||||
Matt Harbison
|
r53343 | def copy(self): | ||
Pulkit Goyal
|
r43078 | """Obtain a copy of this manifest instance. | ||
Returns an object conforming to the ``imanifestrevisionwritable`` | ||||
interface. The instance will be associated with the same | ||||
``imanifestlog`` collection as this instance. | ||||
""" | ||||
Matt Harbison
|
r53343 | def read(self): | ||
Pulkit Goyal
|
r43078 | """Obtain the parsed manifest data structure. | ||
The returned object conforms to the ``imanifestdict`` interface. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r53372 | class imanifestrevisionstored(imanifestrevisionbase, Protocol): | ||
Pulkit Goyal
|
r43078 | """Interface representing a manifest revision committed to storage.""" | ||
Matt Harbison
|
r53372 | @abc.abstractmethod | ||
Matt Harbison
|
r53371 | def node(self) -> bytes: | ||
Pulkit Goyal
|
r43078 | """The binary node for this manifest.""" | ||
Matt Harbison
|
r53370 | parents: list[bytes] | ||
"""List of binary nodes that are parents for this manifest revision.""" | ||||
Pulkit Goyal
|
r43078 | |||
Matt Harbison
|
r53372 | @abc.abstractmethod | ||
Matt Harbison
|
r53371 | def readdelta(self, shallow: bool = False): | ||
Pulkit Goyal
|
r43078 | """Obtain the manifest data structure representing changes from parent. | ||
r52668 | This manifest is compared to its 1st parent. A new manifest | |||
representing those differences is constructed. | ||||
If `shallow` is True, this will read the delta for this directory, | ||||
without recursively reading subdirectory manifests. Instead, any | ||||
subdirectory entry will be reported as it appears in the manifest, i.e. | ||||
the subdirectory will be reported among files and distinguished only by | ||||
its 't' flag. This only apply if the underlying manifest support it. | ||||
The returned object conforms to the ``imanifestdict`` interface. | ||||
""" | ||||
Matt Harbison
|
r53372 | @abc.abstractmethod | ||
Matt Harbison
|
r53371 | def read_any_fast_delta( | ||
self, | ||||
valid_bases: Collection[int] | None = None, | ||||
*, | ||||
shallow: bool = False, | ||||
): | ||||
r52668 | """read some manifest information as fast if possible | |||
This might return a "delta", a manifest object containing only file | ||||
changed compared to another revisions. The `valid_bases` argument | ||||
control the set of revision that might be used as a base. | ||||
If no delta can be retrieved quickly, a full read of the manifest will | ||||
be performed instead. | ||||
The function return a tuple with two elements. The first one is the | ||||
delta base used (or None if we did a full read), the second one is the | ||||
manifest information. | ||||
If `shallow` is True, this will read the delta for this directory, | ||||
without recursively reading subdirectory manifests. Instead, any | ||||
subdirectory entry will be reported as it appears in the manifest, i.e. | ||||
the subdirectory will be reported among files and distinguished only by | ||||
its 't' flag. This only apply if the underlying manifest support it. | ||||
Pulkit Goyal
|
r43078 | |||
The returned object conforms to the ``imanifestdict`` interface. | ||||
""" | ||||
Matt Harbison
|
r53372 | @abc.abstractmethod | ||
Matt Harbison
|
r53371 | def read_delta_parents(self, *, shallow: bool = False, exact: bool = True): | ||
r52674 | """return a diff from this revision against both parents. | |||
If `exact` is False, this might return a superset of the diff, containing | ||||
files that are actually present as is in one of the parents. | ||||
If `shallow` is True, this will read the delta for this directory, | ||||
without recursively reading subdirectory manifests. Instead, any | ||||
subdirectory entry will be reported as it appears in the manifest, i.e. | ||||
the subdirectory will be reported among files and distinguished only by | ||||
its 't' flag. This only apply if the underlying manifest support it. | ||||
The returned object conforms to the ``imanifestdict`` interface.""" | ||||
Matt Harbison
|
r53372 | @abc.abstractmethod | ||
Matt Harbison
|
r53371 | def read_delta_new_entries(self, *, shallow: bool = False): | ||
r52678 | """Return a manifest containing just the entries that might be new to | |||
the repository. | ||||
This is often equivalent to a diff against both parents, but without | ||||
garantee. For performance reason, It might contains more files in some cases. | ||||
If `shallow` is True, this will read the delta for this directory, | ||||
without recursively reading subdirectory manifests. Instead, any | ||||
subdirectory entry will be reported as it appears in the manifest, i.e. | ||||
the subdirectory will be reported among files and distinguished only by | ||||
its 't' flag. This only apply if the underlying manifest support it. | ||||
The returned object conforms to the ``imanifestdict`` interface.""" | ||||
Matt Harbison
|
r53372 | @abc.abstractmethod | ||
Matt Harbison
|
r53371 | def readfast(self, shallow: bool = False): | ||
Pulkit Goyal
|
r43078 | """Calls either ``read()`` or ``readdelta()``. | ||
The faster of the two options is called. | ||||
""" | ||||
Matt Harbison
|
r53372 | @abc.abstractmethod | ||
Matt Harbison
|
r53371 | def find(self, key: bytes) -> tuple[bytes, bytes]: | ||
Matt Harbison
|
r53373 | """Calls ``self.read().find(key)``. | ||
Pulkit Goyal
|
r43078 | |||
Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r53377 | class imanifestrevisionwritable(imanifestrevisionbase, Protocol): | ||
Pulkit Goyal
|
r43078 | """Interface representing a manifest revision that can be committed.""" | ||
Matt Harbison
|
r53377 | @abc.abstractmethod | ||
Matt Harbison
|
r53343 | def write( | ||
self, transaction, linkrev, p1node, p2node, added, removed, match=None | ||||
): | ||||
Pulkit Goyal
|
r43078 | """Add this revision to storage. | ||
Takes a transaction object, the changeset revision number it will | ||||
be associated with, its parent nodes, and lists of added and | ||||
removed paths. | ||||
If match is provided, storage can choose not to inspect or write out | ||||
items that do not match. Storage is still required to be able to provide | ||||
the full manifest in the future for any directories written (these | ||||
manifests should not be "narrowed on disk"). | ||||
Returns the binary node of the created revision. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r53342 | class imanifeststorage(Protocol): | ||
Pulkit Goyal
|
r43078 | """Storage interface for manifest data.""" | ||
Matt Harbison
|
r53375 | nodeconstants: NodeConstants | ||
"""nodeconstants used by the current repository.""" | ||||
tree: bytes | ||||
"""The path to the directory this manifest tracks. | ||||
The empty bytestring represents the root manifest. | ||||
""" | ||||
index: ifilerevisionssequence | ||||
"""An ``ifilerevisionssequence`` instance.""" | ||||
opener: Vfs | ||||
"""VFS opener to use to access underlying files used for storage. | ||||
TODO this is revlog specific and should not be exposed. | ||||
""" | ||||
# TODO: finish type hints | ||||
fulltextcache: dict | ||||
"""Dict with cache of fulltexts. | ||||
TODO this doesn't feel appropriate for the storage interface. | ||||
""" | ||||
Pulkit Goyal
|
r43078 | |||
Matt Harbison
|
r53376 | @abc.abstractmethod | ||
Matt Harbison
|
r53343 | def __len__(self): | ||
Pulkit Goyal
|
r43078 | """Obtain the number of revisions stored for this manifest.""" | ||
Matt Harbison
|
r53376 | @abc.abstractmethod | ||
Matt Harbison
|
r53343 | def __iter__(self): | ||
Pulkit Goyal
|
r43078 | """Iterate over revision numbers for this manifest.""" | ||
Matt Harbison
|
r53376 | @abc.abstractmethod | ||
Matt Harbison
|
r53343 | def rev(self, node): | ||
Pulkit Goyal
|
r43078 | """Obtain the revision number given a binary node. | ||
Raises ``error.LookupError`` if the node is not known. | ||||
""" | ||||
Matt Harbison
|
r53376 | @abc.abstractmethod | ||
Matt Harbison
|
r53343 | def node(self, rev): | ||
Pulkit Goyal
|
r43078 | """Obtain the node value given a revision number. | ||
Raises ``error.LookupError`` if the revision is not known. | ||||
""" | ||||
Matt Harbison
|
r53376 | @abc.abstractmethod | ||
Matt Harbison
|
r53343 | def lookup(self, value): | ||
Pulkit Goyal
|
r43078 | """Attempt to resolve a value to a node. | ||
Value can be a binary node, hex node, revision number, or a bytes | ||||
that can be converted to an integer. | ||||
Raises ``error.LookupError`` if a ndoe could not be resolved. | ||||
""" | ||||
Matt Harbison
|
r53376 | @abc.abstractmethod | ||
Matt Harbison
|
r53343 | def parents(self, node): | ||
Pulkit Goyal
|
r43078 | """Returns a 2-tuple of parent nodes for a node. | ||
Values will be ``nullid`` if the parent is empty. | ||||
""" | ||||
Matt Harbison
|
r53376 | @abc.abstractmethod | ||
Matt Harbison
|
r53343 | def parentrevs(self, rev): | ||
Pulkit Goyal
|
r43078 | """Like parents() but operates on revision numbers.""" | ||
Matt Harbison
|
r53376 | @abc.abstractmethod | ||
Matt Harbison
|
r53343 | def linkrev(self, rev): | ||
Pulkit Goyal
|
r43078 | """Obtain the changeset revision number a revision is linked to.""" | ||
Matt Harbison
|
r53376 | @abc.abstractmethod | ||
Matt Harbison
|
r53343 | def revision(self, node): | ||
Pulkit Goyal
|
r43078 | """Obtain fulltext data for a node.""" | ||
Matt Harbison
|
r53376 | @abc.abstractmethod | ||
Matt Harbison
|
r53343 | def rawdata(self, node): | ||
Pulkit Goyal
|
r43078 | """Obtain raw data for a node.""" | ||
Matt Harbison
|
r53376 | @abc.abstractmethod | ||
Matt Harbison
|
r53343 | def revdiff(self, rev1, rev2): | ||
Pulkit Goyal
|
r43078 | """Obtain a delta between two revision numbers. | ||
The returned data is the result of ``bdiff.bdiff()`` on the raw | ||||
revision data. | ||||
""" | ||||
Matt Harbison
|
r53376 | @abc.abstractmethod | ||
Matt Harbison
|
r53343 | def cmp(self, node, fulltext): | ||
Pulkit Goyal
|
r43078 | """Compare fulltext to another revision. | ||
Returns True if the fulltext is different from what is stored. | ||||
""" | ||||
Matt Harbison
|
r53376 | @abc.abstractmethod | ||
Augie Fackler
|
r43346 | def emitrevisions( | ||
Matt Harbison
|
r53343 | self, | ||
Augie Fackler
|
r43346 | nodes, | ||
nodesorder=None, | ||||
revisiondata=False, | ||||
assumehaveparentrevisions=False, | ||||
): | ||||
Pulkit Goyal
|
r43078 | """Produce ``irevisiondelta`` describing revisions. | ||
See the documentation for ``ifiledata`` for more. | ||||
""" | ||||
Matt Harbison
|
r53376 | @abc.abstractmethod | ||
Joerg Sonnenberger
|
r46373 | def addgroup( | ||
Matt Harbison
|
r53343 | self, | ||
Joerg Sonnenberger
|
r46373 | deltas, | ||
linkmapper, | ||||
transaction, | ||||
addrevisioncb=None, | ||||
duplicaterevisioncb=None, | ||||
): | ||||
Pulkit Goyal
|
r43078 | """Process a series of deltas for storage. | ||
See the documentation in ``ifilemutation`` for more. | ||||
""" | ||||
Matt Harbison
|
r53376 | @abc.abstractmethod | ||
Matt Harbison
|
r53343 | def rawsize(self, rev): | ||
Pulkit Goyal
|
r43078 | """Obtain the size of tracked data. | ||
Is equivalent to ``len(m.rawdata(node))``. | ||||
TODO this method is only used by upgrade code and may be removed. | ||||
""" | ||||
Matt Harbison
|
r53376 | @abc.abstractmethod | ||
Matt Harbison
|
r53343 | def getstrippoint(self, minlink): | ||
Pulkit Goyal
|
r43078 | """Find minimum revision that must be stripped to strip a linkrev. | ||
See the documentation in ``ifilemutation`` for more. | ||||
""" | ||||
Matt Harbison
|
r53376 | @abc.abstractmethod | ||
Matt Harbison
|
r53343 | def strip(self, minlink, transaction): | ||
Pulkit Goyal
|
r43078 | """Remove storage of items starting at a linkrev. | ||
See the documentation in ``ifilemutation`` for more. | ||||
""" | ||||
Matt Harbison
|
r53376 | @abc.abstractmethod | ||
Matt Harbison
|
r53343 | def checksize(self): | ||
Pulkit Goyal
|
r43078 | """Obtain the expected sizes of backing files. | ||
TODO this is used by verify and it should not be part of the interface. | ||||
""" | ||||
Matt Harbison
|
r53376 | @abc.abstractmethod | ||
Matt Harbison
|
r53343 | def files(self): | ||
Pulkit Goyal
|
r43078 | """Obtain paths that are backing storage for this manifest. | ||
TODO this is used by verify and there should probably be a better API | ||||
for this functionality. | ||||
""" | ||||
Matt Harbison
|
r53376 | @abc.abstractmethod | ||
Matt Harbison
|
r53343 | def deltaparent(self, rev): | ||
Pulkit Goyal
|
r43078 | """Obtain the revision that a revision is delta'd against. | ||
TODO delta encoding is an implementation detail of storage and should | ||||
not be exposed to the storage interface. | ||||
""" | ||||
Matt Harbison
|
r53376 | @abc.abstractmethod | ||
Matt Harbison
|
r53343 | def clone(self, tr, dest, **kwargs): | ||
Pulkit Goyal
|
r43078 | """Clone this instance to another.""" | ||
Matt Harbison
|
r53376 | @abc.abstractmethod | ||
Matt Harbison
|
r53343 | def clearcaches(self, clear_persisted_data=False): | ||
Pulkit Goyal
|
r43078 | """Clear any caches associated with this instance.""" | ||
Matt Harbison
|
r53376 | @abc.abstractmethod | ||
Matt Harbison
|
r53343 | def dirlog(self, d): | ||
Pulkit Goyal
|
r43078 | """Obtain a manifest storage instance for a tree.""" | ||
Matt Harbison
|
r53376 | @abc.abstractmethod | ||
Augie Fackler
|
r43346 | def add( | ||
Matt Harbison
|
r53343 | self, | ||
m, | ||||
transaction, | ||||
link, | ||||
p1, | ||||
p2, | ||||
added, | ||||
removed, | ||||
readtree=None, | ||||
match=None, | ||||
Augie Fackler
|
r43346 | ): | ||
Pulkit Goyal
|
r43078 | """Add a revision to storage. | ||
``m`` is an object conforming to ``imanifestdict``. | ||||
``link`` is the linkrev revision number. | ||||
``p1`` and ``p2`` are the parent revision numbers. | ||||
``added`` and ``removed`` are iterables of added and removed paths, | ||||
respectively. | ||||
``readtree`` is a function that can be used to read the child tree(s) | ||||
when recursively writing the full tree structure when using | ||||
treemanifets. | ||||
``match`` is a matcher that can be used to hint to storage that not all | ||||
paths must be inspected; this is an optimization and can be safely | ||||
ignored. Note that the storage must still be able to reproduce a full | ||||
manifest including files that did not match. | ||||
""" | ||||
Matt Harbison
|
r53376 | @abc.abstractmethod | ||
Augie Fackler
|
r43346 | def storageinfo( | ||
Matt Harbison
|
r53343 | self, | ||
Augie Fackler
|
r43346 | exclusivefiles=False, | ||
sharedfiles=False, | ||||
revisionscount=False, | ||||
trackedsize=False, | ||||
storedsize=False, | ||||
): | ||||
Pulkit Goyal
|
r43078 | """Obtain information about storage for this manifest's data. | ||
See ``ifilestorage.storageinfo()`` for a description of this method. | ||||
This one behaves the same way, except for manifest data. | ||||
""" | ||||
Matt Harbison
|
r53376 | @abc.abstractmethod | ||
Matt Harbison
|
r53343 | def get_revlog(self): | ||
r51530 | """return an actual revlog instance if any | |||
This exist because a lot of code leverage the fact the underlying | ||||
storage is a revlog for optimization, so giving simple way to access | ||||
the revlog instance helps such code. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r53342 | class imanifestlog(Protocol): | ||
Pulkit Goyal
|
r43078 | """Interface representing a collection of manifest snapshots. | ||
Represents the root manifest in a repository. | ||||
Also serves as a means to access nested tree manifests and to cache | ||||
tree manifests. | ||||
""" | ||||
Matt Harbison
|
r53379 | nodeconstants: NodeConstants | ||
"""nodeconstants used by the current repository.""" | ||||
narrowed: bool | ||||
"""True, is the manifest is narrowed by a matcher""" | ||||
r52678 | ||||
Matt Harbison
|
r53380 | @abc.abstractmethod | ||
Matt Harbison
|
r53343 | def __getitem__(self, node): | ||
Pulkit Goyal
|
r43078 | """Obtain a manifest instance for a given binary node. | ||
Equivalent to calling ``self.get('', node)``. | ||||
The returned object conforms to the ``imanifestrevisionstored`` | ||||
interface. | ||||
""" | ||||
Matt Harbison
|
r53380 | @abc.abstractmethod | ||
Matt Harbison
|
r53343 | def get(self, tree, node, verify=True): | ||
Pulkit Goyal
|
r43078 | """Retrieve the manifest instance for a given directory and binary node. | ||
``node`` always refers to the node of the root manifest (which will be | ||||
the only manifest if flat manifests are being used). | ||||
If ``tree`` is the empty string, the root manifest is returned. | ||||
Otherwise the manifest for the specified directory will be returned | ||||
(requires tree manifests). | ||||
If ``verify`` is True, ``LookupError`` is raised if the node is not | ||||
known. | ||||
The returned object conforms to the ``imanifestrevisionstored`` | ||||
interface. | ||||
""" | ||||
Matt Harbison
|
r53380 | @abc.abstractmethod | ||
Matt Harbison
|
r53343 | def getstorage(self, tree): | ||
Pulkit Goyal
|
r43078 | """Retrieve an interface to storage for a particular tree. | ||
If ``tree`` is the empty bytestring, storage for the root manifest will | ||||
be returned. Otherwise storage for a tree manifest is returned. | ||||
TODO formalize interface for returned object. | ||||
""" | ||||
Matt Harbison
|
r53380 | @abc.abstractmethod | ||
Matt Harbison
|
r53343 | def clearcaches(self, clear_persisted_data: bool = False) -> None: | ||
Pulkit Goyal
|
r43078 | """Clear caches associated with this collection.""" | ||
Matt Harbison
|
r53380 | @abc.abstractmethod | ||
Matt Harbison
|
r53343 | def rev(self, node): | ||
Pulkit Goyal
|
r43078 | """Obtain the revision number for a binary node. | ||
Raises ``error.LookupError`` if the node is not known. | ||||
""" | ||||
Matt Harbison
|
r53380 | @abc.abstractmethod | ||
Matt Harbison
|
r53343 | def update_caches(self, transaction): | ||
r45291 | """update whatever cache are relevant for the used storage.""" | |||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r53342 | class ilocalrepositoryfilestorage(Protocol): | ||
Pulkit Goyal
|
r43078 | """Local repository sub-interface providing access to tracked file storage. | ||
This interface defines how a repository accesses storage for a single | ||||
tracked file path. | ||||
""" | ||||
Matt Harbison
|
r53343 | def file(self, f): | ||
Pulkit Goyal
|
r43078 | """Obtain a filelog for a tracked path. | ||
The returned type conforms to the ``ifilestorage`` interface. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r53342 | class ilocalrepositorymain(Protocol): | ||
Pulkit Goyal
|
r43078 | """Main interface for local repositories. | ||
This currently captures the reality of things - not how things should be. | ||||
""" | ||||
Joerg Sonnenberger
|
r47538 | nodeconstants = interfaceutil.Attribute( | ||
"""Constant nodes matching the hash function used by the repository.""" | ||||
) | ||||
nullid = interfaceutil.Attribute( | ||||
"""null revision for the hash function used by the repository.""" | ||||
) | ||||
Pulkit Goyal
|
r43078 | supported = interfaceutil.Attribute( | ||
Augie Fackler
|
r43346 | """Set of requirements that this repo is capable of opening.""" | ||
) | ||||
Pulkit Goyal
|
r43078 | |||
requirements = interfaceutil.Attribute( | ||||
Augie Fackler
|
r43346 | """Set of requirements this repo uses.""" | ||
) | ||||
Pulkit Goyal
|
r43078 | |||
features = interfaceutil.Attribute( | ||||
"""Set of "features" this repository supports. | ||||
A "feature" is a loosely-defined term. It can refer to a feature | ||||
in the classical sense or can describe an implementation detail | ||||
of the repository. For example, a ``readonly`` feature may denote | ||||
the repository as read-only. Or a ``revlogfilestore`` feature may | ||||
denote that the repository is using revlogs for file storage. | ||||
The intent of features is to provide a machine-queryable mechanism | ||||
for repo consumers to test for various repository characteristics. | ||||
Features are similar to ``requirements``. The main difference is that | ||||
requirements are stored on-disk and represent requirements to open the | ||||
repository. Features are more run-time capabilities of the repository | ||||
and more granular capabilities (which may be derived from requirements). | ||||
Augie Fackler
|
r43346 | """ | ||
) | ||||
Pulkit Goyal
|
r43078 | |||
filtername = interfaceutil.Attribute( | ||||
Augie Fackler
|
r43346 | """Name of the repoview that is active on this repo.""" | ||
) | ||||
Pulkit Goyal
|
r43078 | |||
r51189 | vfs_map = interfaceutil.Attribute( | |||
"""a bytes-key → vfs mapping used by transaction and others""" | ||||
) | ||||
Pulkit Goyal
|
r43078 | wvfs = interfaceutil.Attribute( | ||
Augie Fackler
|
r43346 | """VFS used to access the working directory.""" | ||
) | ||||
Pulkit Goyal
|
r43078 | |||
vfs = interfaceutil.Attribute( | ||||
"""VFS rooted at the .hg directory. | ||||
Used to access repository data not in the store. | ||||
Augie Fackler
|
r43346 | """ | ||
) | ||||
Pulkit Goyal
|
r43078 | |||
svfs = interfaceutil.Attribute( | ||||
"""VFS rooted at the store. | ||||
Used to access repository data in the store. Typically .hg/store. | ||||
But can point elsewhere if the store is shared. | ||||
Augie Fackler
|
r43346 | """ | ||
) | ||||
Pulkit Goyal
|
r43078 | |||
root = interfaceutil.Attribute( | ||||
Augie Fackler
|
r43346 | """Path to the root of the working directory.""" | ||
) | ||||
path = interfaceutil.Attribute("""Path to the .hg directory.""") | ||||
Pulkit Goyal
|
r43078 | |||
origroot = interfaceutil.Attribute( | ||||
Augie Fackler
|
r43346 | """The filesystem path that was used to construct the repo.""" | ||
) | ||||
Pulkit Goyal
|
r43078 | |||
auditor = interfaceutil.Attribute( | ||||
"""A pathauditor for the working directory. | ||||
This checks if a path refers to a nested repository. | ||||
Operates on the filesystem. | ||||
Augie Fackler
|
r43346 | """ | ||
) | ||||
Pulkit Goyal
|
r43078 | |||
nofsauditor = interfaceutil.Attribute( | ||||
"""A pathauditor for the working directory. | ||||
This is like ``auditor`` except it doesn't do filesystem checks. | ||||
Augie Fackler
|
r43346 | """ | ||
) | ||||
Pulkit Goyal
|
r43078 | |||
baseui = interfaceutil.Attribute( | ||||
Augie Fackler
|
r43346 | """Original ui instance passed into constructor.""" | ||
) | ||||
ui = interfaceutil.Attribute("""Main ui instance for this instance.""") | ||||
Pulkit Goyal
|
r43078 | |||
sharedpath = interfaceutil.Attribute( | ||||
Augie Fackler
|
r43346 | """Path to the .hg directory of the repo this repo was shared from.""" | ||
) | ||||
store = interfaceutil.Attribute("""A store instance.""") | ||||
spath = interfaceutil.Attribute("""Path to the store.""") | ||||
sjoin = interfaceutil.Attribute("""Alias to self.store.join.""") | ||||
Pulkit Goyal
|
r43078 | |||
cachevfs = interfaceutil.Attribute( | ||||
"""A VFS used to access the cache directory. | ||||
Typically .hg/cache. | ||||
Augie Fackler
|
r43346 | """ | ||
) | ||||
Pulkit Goyal
|
r43078 | |||
wcachevfs = interfaceutil.Attribute( | ||||
"""A VFS used to access the cache directory dedicated to working copy | ||||
Typically .hg/wcache. | ||||
Augie Fackler
|
r43346 | """ | ||
) | ||||
Pulkit Goyal
|
r43078 | |||
filteredrevcache = interfaceutil.Attribute( | ||||
Augie Fackler
|
r43346 | """Holds sets of revisions to be filtered.""" | ||
) | ||||
names = interfaceutil.Attribute("""A ``namespaces`` instance.""") | ||||
Pulkit Goyal
|
r43078 | |||
r43412 | filecopiesmode = interfaceutil.Attribute( | |||
"""The way files copies should be dealt with in this repo.""" | ||||
) | ||||
Matt Harbison
|
r53343 | def close(self): | ||
Pulkit Goyal
|
r43078 | """Close the handle on this repository.""" | ||
Matt Harbison
|
r53343 | def peer(self, path=None): | ||
Pulkit Goyal
|
r43078 | """Obtain an object conforming to the ``peer`` interface.""" | ||
Matt Harbison
|
r53343 | def unfiltered(self): | ||
Pulkit Goyal
|
r43078 | """Obtain an unfiltered/raw view of this repo.""" | ||
Matt Harbison
|
r53343 | def filtered(self, name, visibilityexceptions=None): | ||
Pulkit Goyal
|
r43078 | """Obtain a named view of this repository.""" | ||
Augie Fackler
|
r43346 | obsstore = interfaceutil.Attribute("""A store of obsolescence data.""") | ||
changelog = interfaceutil.Attribute("""A handle on the changelog revlog.""") | ||||
Pulkit Goyal
|
r43078 | |||
manifestlog = interfaceutil.Attribute( | ||||
"""An instance conforming to the ``imanifestlog`` interface. | ||||
Provides access to manifests for the repository. | ||||
Augie Fackler
|
r43346 | """ | ||
) | ||||
dirstate = interfaceutil.Attribute("""Working directory state.""") | ||||
Pulkit Goyal
|
r43078 | |||
narrowpats = interfaceutil.Attribute( | ||||
Augie Fackler
|
r43346 | """Matcher patterns for this repository's narrowspec.""" | ||
) | ||||
Pulkit Goyal
|
r43078 | |||
Matt Harbison
|
r53343 | def narrowmatch(self, match=None, includeexact=False): | ||
Pulkit Goyal
|
r43078 | """Obtain a matcher for the narrowspec.""" | ||
Matt Harbison
|
r53343 | def setnarrowpats(self, newincludes, newexcludes): | ||
Pulkit Goyal
|
r43078 | """Define the narrowspec for this repository.""" | ||
Matt Harbison
|
r53343 | def __getitem__(self, changeid): | ||
Pulkit Goyal
|
r43078 | """Try to resolve a changectx.""" | ||
Matt Harbison
|
r53343 | def __contains__(self, changeid): | ||
Pulkit Goyal
|
r43078 | """Whether a changeset exists.""" | ||
Matt Harbison
|
r53343 | def __nonzero__(self): | ||
Pulkit Goyal
|
r43078 | """Always returns True.""" | ||
return True | ||||
__bool__ = __nonzero__ | ||||
Matt Harbison
|
r53343 | def __len__(self): | ||
Pulkit Goyal
|
r43078 | """Returns the number of changesets in the repo.""" | ||
Matt Harbison
|
r53343 | def __iter__(self): | ||
Pulkit Goyal
|
r43078 | """Iterate over revisions in the changelog.""" | ||
Matt Harbison
|
r53343 | def revs(self, expr, *args): | ||
Pulkit Goyal
|
r43078 | """Evaluate a revset. | ||
Emits revisions. | ||||
""" | ||||
Matt Harbison
|
r53343 | def set(self, expr, *args): | ||
Pulkit Goyal
|
r43078 | """Evaluate a revset. | ||
Emits changectx instances. | ||||
""" | ||||
Matt Harbison
|
r53343 | def anyrevs(self, specs, user=False, localalias=None): | ||
Pulkit Goyal
|
r43078 | """Find revisions matching one of the given revsets.""" | ||
Matt Harbison
|
r53343 | def url(self): | ||
Pulkit Goyal
|
r43078 | """Returns a string representing the location of this repo.""" | ||
Matt Harbison
|
r53343 | def hook(self, name, throw=False, **args): | ||
Pulkit Goyal
|
r43078 | """Call a hook.""" | ||
Matt Harbison
|
r53343 | def tags(self): | ||
Pulkit Goyal
|
r43078 | """Return a mapping of tag to node.""" | ||
Matt Harbison
|
r53343 | def tagtype(self, tagname): | ||
Pulkit Goyal
|
r43078 | """Return the type of a given tag.""" | ||
Matt Harbison
|
r53343 | def tagslist(self): | ||
Pulkit Goyal
|
r43078 | """Return a list of tags ordered by revision.""" | ||
Matt Harbison
|
r53343 | def nodetags(self, node): | ||
Pulkit Goyal
|
r43078 | """Return the tags associated with a node.""" | ||
Matt Harbison
|
r53343 | def nodebookmarks(self, node): | ||
Pulkit Goyal
|
r43078 | """Return the list of bookmarks pointing to the specified node.""" | ||
Matt Harbison
|
r53343 | def branchmap(self): | ||
Pulkit Goyal
|
r43078 | """Return a mapping of branch to heads in that branch.""" | ||
Matt Harbison
|
r53343 | def revbranchcache(self): | ||
Pulkit Goyal
|
r43078 | pass | ||
Matt Harbison
|
r53343 | def register_changeset(self, rev, changelogrevision): | ||
Joerg Sonnenberger
|
r47083 | """Extension point for caches for new nodes. | ||
Multiple consumers are expected to need parts of the changelogrevision, | ||||
so it is provided as optimization to avoid duplicate lookups. A simple | ||||
cache would be fragile when other revisions are accessed, too.""" | ||||
pass | ||||
Matt Harbison
|
r53343 | def branchtip(self, branchtip, ignoremissing=False): | ||
Pulkit Goyal
|
r43078 | """Return the tip node for a given branch.""" | ||
Matt Harbison
|
r53343 | def lookup(self, key): | ||
Pulkit Goyal
|
r43078 | """Resolve the node for a revision.""" | ||
Matt Harbison
|
r53343 | def lookupbranch(self, key): | ||
Pulkit Goyal
|
r43078 | """Look up the branch name of the given revision or branch name.""" | ||
Matt Harbison
|
r53343 | def known(self, nodes): | ||
Pulkit Goyal
|
r43078 | """Determine whether a series of nodes is known. | ||
Returns a list of bools. | ||||
""" | ||||
Matt Harbison
|
r53343 | def local(self): | ||
Pulkit Goyal
|
r43078 | """Whether the repository is local.""" | ||
return True | ||||
Matt Harbison
|
r53343 | def publishing(self): | ||
Pulkit Goyal
|
r43078 | """Whether the repository is a publishing repository.""" | ||
Matt Harbison
|
r53343 | def cancopy(self): | ||
Pulkit Goyal
|
r43078 | pass | ||
Matt Harbison
|
r53343 | def shared(self): | ||
Pulkit Goyal
|
r43078 | """The type of shared repository or None.""" | ||
Matt Harbison
|
r53343 | def wjoin(self, f, *insidef): | ||
Pulkit Goyal
|
r43078 | """Calls self.vfs.reljoin(self.root, f, *insidef)""" | ||
Matt Harbison
|
r53343 | def setparents(self, p1, p2): | ||
Pulkit Goyal
|
r43078 | """Set the parent nodes of the working directory.""" | ||
Matt Harbison
|
r53343 | def filectx(self, path, changeid=None, fileid=None): | ||
Pulkit Goyal
|
r43078 | """Obtain a filectx for the given file revision.""" | ||
Matt Harbison
|
r53343 | def getcwd(self): | ||
Pulkit Goyal
|
r43078 | """Obtain the current working directory from the dirstate.""" | ||
Matt Harbison
|
r53343 | def pathto(self, f, cwd=None): | ||
Pulkit Goyal
|
r43078 | """Obtain the relative path to a file.""" | ||
Matt Harbison
|
r53343 | def adddatafilter(self, name, fltr): | ||
Pulkit Goyal
|
r43078 | pass | ||
Matt Harbison
|
r53343 | def wread(self, filename): | ||
Pulkit Goyal
|
r43078 | """Read a file from wvfs, using data filters.""" | ||
Matt Harbison
|
r53343 | def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs): | ||
Pulkit Goyal
|
r43078 | """Write data to a file in the wvfs, using data filters.""" | ||
Matt Harbison
|
r53343 | def wwritedata(self, filename, data): | ||
Pulkit Goyal
|
r43078 | """Resolve data for writing to the wvfs, using data filters.""" | ||
Matt Harbison
|
r53343 | def currenttransaction(self): | ||
Pulkit Goyal
|
r43078 | """Obtain the current transaction instance or None.""" | ||
Matt Harbison
|
r53343 | def transaction(self, desc, report=None): | ||
Pulkit Goyal
|
r43078 | """Open a new transaction to write to the repository.""" | ||
Matt Harbison
|
r53343 | def undofiles(self): | ||
Pulkit Goyal
|
r43078 | """Returns a list of (vfs, path) for files to undo transactions.""" | ||
Matt Harbison
|
r53343 | def recover(self): | ||
Pulkit Goyal
|
r43078 | """Roll back an interrupted transaction.""" | ||
Matt Harbison
|
r53343 | def rollback(self, dryrun=False, force=False): | ||
Pulkit Goyal
|
r43078 | """Undo the last transaction. | ||
DANGEROUS. | ||||
""" | ||||
Matt Harbison
|
r53343 | def updatecaches(self, tr=None, full=False, caches=None): | ||
Pulkit Goyal
|
r43078 | """Warm repo caches.""" | ||
Matt Harbison
|
r53343 | def invalidatecaches(self): | ||
Pulkit Goyal
|
r43078 | """Invalidate cached data due to the repository mutating.""" | ||
Matt Harbison
|
r53343 | def invalidatevolatilesets(self): | ||
Pulkit Goyal
|
r43078 | pass | ||
Matt Harbison
|
r53343 | def invalidatedirstate(self): | ||
Pulkit Goyal
|
r43078 | """Invalidate the dirstate.""" | ||
Matt Harbison
|
r53343 | def invalidate(self, clearfilecache=False): | ||
Pulkit Goyal
|
r43078 | pass | ||
Matt Harbison
|
r53343 | def invalidateall(self): | ||
Pulkit Goyal
|
r43078 | pass | ||
Matt Harbison
|
r53343 | def lock(self, wait=True): | ||
Pulkit Goyal
|
r43078 | """Lock the repository store and return a lock instance.""" | ||
Matt Harbison
|
r53343 | def currentlock(self): | ||
r51250 | """Return the lock if it's held or None.""" | |||
Matt Harbison
|
r53343 | def wlock(self, wait=True): | ||
Pulkit Goyal
|
r43078 | """Lock the non-store parts of the repository.""" | ||
Matt Harbison
|
r53343 | def currentwlock(self): | ||
Pulkit Goyal
|
r43078 | """Return the wlock if it's held or None.""" | ||
Matt Harbison
|
r53343 | def checkcommitpatterns(self, wctx, match, status, fail): | ||
Pulkit Goyal
|
r43078 | pass | ||
Augie Fackler
|
r43346 | def commit( | ||
Matt Harbison
|
r53343 | self, | ||
Augie Fackler
|
r43347 | text=b'', | ||
Augie Fackler
|
r43346 | user=None, | ||
date=None, | ||||
match=None, | ||||
force=False, | ||||
editor=False, | ||||
extra=None, | ||||
): | ||||
Pulkit Goyal
|
r43078 | """Add a new revision to the repository.""" | ||
Matt Harbison
|
r53343 | def commitctx(self, ctx, error=False, origctx=None): | ||
Pulkit Goyal
|
r43078 | """Commit a commitctx instance to the repository.""" | ||
Matt Harbison
|
r53343 | def destroying(self): | ||
Pulkit Goyal
|
r43078 | """Inform the repository that nodes are about to be destroyed.""" | ||
Matt Harbison
|
r53343 | def destroyed(self): | ||
Pulkit Goyal
|
r43078 | """Inform the repository that nodes have been destroyed.""" | ||
Augie Fackler
|
r43346 | def status( | ||
Matt Harbison
|
r53343 | self, | ||
Augie Fackler
|
r43347 | node1=b'.', | ||
Augie Fackler
|
r43346 | node2=None, | ||
match=None, | ||||
ignored=False, | ||||
clean=False, | ||||
unknown=False, | ||||
listsubrepos=False, | ||||
): | ||||
Pulkit Goyal
|
r43078 | """Convenience method to call repo[x].status().""" | ||
Matt Harbison
|
r53343 | def addpostdsstatus(self, ps): | ||
Pulkit Goyal
|
r43078 | pass | ||
Matt Harbison
|
r53343 | def postdsstatus(self): | ||
Pulkit Goyal
|
r43078 | pass | ||
Matt Harbison
|
r53343 | def clearpostdsstatus(self): | ||
Pulkit Goyal
|
r43078 | pass | ||
Matt Harbison
|
r53343 | def heads(self, start=None): | ||
Pulkit Goyal
|
r43078 | """Obtain list of nodes that are DAG heads.""" | ||
Matt Harbison
|
r53343 | def branchheads(self, branch=None, start=None, closed=False): | ||
Pulkit Goyal
|
r43078 | pass | ||
Matt Harbison
|
r53343 | def branches(self, nodes): | ||
Pulkit Goyal
|
r43078 | pass | ||
Matt Harbison
|
r53343 | def between(self, pairs): | ||
Pulkit Goyal
|
r43078 | pass | ||
Matt Harbison
|
r53343 | def checkpush(self, pushop): | ||
Pulkit Goyal
|
r43078 | pass | ||
Augie Fackler
|
r43346 | prepushoutgoinghooks = interfaceutil.Attribute("""util.hooks instance.""") | ||
Pulkit Goyal
|
r43078 | |||
Matt Harbison
|
r53343 | def pushkey(self, namespace, key, old, new): | ||
Pulkit Goyal
|
r43078 | pass | ||
Matt Harbison
|
r53343 | def listkeys(self, namespace): | ||
Pulkit Goyal
|
r43078 | pass | ||
Matt Harbison
|
r53343 | def debugwireargs(self, one, two, three=None, four=None, five=None): | ||
Pulkit Goyal
|
r43078 | pass | ||
Matt Harbison
|
r53343 | def savecommitmessage(self, text): | ||
Pulkit Goyal
|
r43078 | pass | ||
Raphaël Gomès
|
r47846 | def register_sidedata_computer( | ||
Matt Harbison
|
r53343 | self, kind, category, keys, computer, flags, replace=False | ||
Raphaël Gomès
|
r47846 | ): | ||
Raphaël Gomès
|
r47447 | pass | ||
Matt Harbison
|
r53343 | def register_wanted_sidedata(self, category): | ||
Raphaël Gomès
|
r47447 | pass | ||
Augie Fackler
|
r43346 | |||
class completelocalrepository( | ||||
ilocalrepositorymain, ilocalrepositoryfilestorage | ||||
): | ||||
Pulkit Goyal
|
r43078 | """Complete interface for a local repository.""" | ||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r53342 | class iwireprotocolcommandcacher(Protocol): | ||
Pulkit Goyal
|
r43078 | """Represents a caching backend for wire protocol commands. | ||
Wire protocol version 2 supports transparent caching of many commands. | ||||
To leverage this caching, servers can activate objects that cache | ||||
command responses. Objects handle both cache writing and reading. | ||||
This interface defines how that response caching mechanism works. | ||||
Wire protocol version 2 commands emit a series of objects that are | ||||
serialized and sent to the client. The caching layer exists between | ||||
the invocation of the command function and the sending of its output | ||||
objects to an output layer. | ||||
Instances of this interface represent a binding to a cache that | ||||
can serve a response (in place of calling a command function) and/or | ||||
write responses to a cache for subsequent use. | ||||
When a command request arrives, the following happens with regards | ||||
to this interface: | ||||
1. The server determines whether the command request is cacheable. | ||||
2. If it is, an instance of this interface is spawned. | ||||
3. The cacher is activated in a context manager (``__enter__`` is called). | ||||
4. A cache *key* for that request is derived. This will call the | ||||
instance's ``adjustcachekeystate()`` method so the derivation | ||||
can be influenced. | ||||
5. The cacher is informed of the derived cache key via a call to | ||||
``setcachekey()``. | ||||
6. The cacher's ``lookup()`` method is called to test for presence of | ||||
the derived key in the cache. | ||||
7. If ``lookup()`` returns a hit, that cached result is used in place | ||||
of invoking the command function. ``__exit__`` is called and the instance | ||||
is discarded. | ||||
8. The command function is invoked. | ||||
9. ``onobject()`` is called for each object emitted by the command | ||||
function. | ||||
10. After the final object is seen, ``onfinished()`` is called. | ||||
11. ``__exit__`` is called to signal the end of use of the instance. | ||||
Cache *key* derivation can be influenced by the instance. | ||||
Cache keys are initially derived by a deterministic representation of | ||||
the command request. This includes the command name, arguments, protocol | ||||
version, etc. This initial key derivation is performed by CBOR-encoding a | ||||
data structure and feeding that output into a hasher. | ||||
Instances of this interface can influence this initial key derivation | ||||
via ``adjustcachekeystate()``. | ||||
The instance is informed of the derived cache key via a call to | ||||
``setcachekey()``. The instance must store the key locally so it can | ||||
be consulted on subsequent operations that may require it. | ||||
When constructed, the instance has access to a callable that can be used | ||||
for encoding response objects. This callable receives as its single | ||||
argument an object emitted by a command function. It returns an iterable | ||||
of bytes chunks representing the encoded object. Unless the cacher is | ||||
caching native Python objects in memory or has a way of reconstructing | ||||
the original Python objects, implementations typically call this function | ||||
to produce bytes from the output objects and then store those bytes in | ||||
the cache. When it comes time to re-emit those bytes, they are wrapped | ||||
in a ``wireprototypes.encodedresponse`` instance to tell the output | ||||
layer that they are pre-encoded. | ||||
When receiving the objects emitted by the command function, instances | ||||
can choose what to do with those objects. The simplest thing to do is | ||||
re-emit the original objects. They will be forwarded to the output | ||||
layer and will be processed as if the cacher did not exist. | ||||
Implementations could also choose to not emit objects - instead locally | ||||
buffering objects or their encoded representation. They could then emit | ||||
a single "coalesced" object when ``onfinished()`` is called. In | ||||
this way, the implementation would function as a filtering layer of | ||||
sorts. | ||||
When caching objects, typically the encoded form of the object will | ||||
be stored. Keep in mind that if the original object is forwarded to | ||||
the output layer, it will need to be encoded there as well. For large | ||||
output, this redundant encoding could add overhead. Implementations | ||||
could wrap the encoded object data in ``wireprototypes.encodedresponse`` | ||||
instances to avoid this overhead. | ||||
""" | ||||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r53343 | def __enter__(self): | ||
Pulkit Goyal
|
r43078 | """Marks the instance as active. | ||
Should return self. | ||||
""" | ||||
Matt Harbison
|
r53343 | def __exit__(self, exctype, excvalue, exctb): | ||
Pulkit Goyal
|
r43078 | """Called when cacher is no longer used. | ||
This can be used by implementations to perform cleanup actions (e.g. | ||||
disconnecting network sockets, aborting a partially cached response. | ||||
""" | ||||
Matt Harbison
|
r53343 | def adjustcachekeystate(self, state): | ||
Pulkit Goyal
|
r43078 | """Influences cache key derivation by adjusting state to derive key. | ||
A dict defining the state used to derive the cache key is passed. | ||||
Implementations can modify this dict to record additional state that | ||||
is wanted to influence key derivation. | ||||
Implementations are *highly* encouraged to not modify or delete | ||||
existing keys. | ||||
""" | ||||
Matt Harbison
|
r53343 | def setcachekey(self, key): | ||
Pulkit Goyal
|
r43078 | """Record the derived cache key for this request. | ||
Instances may mutate the key for internal usage, as desired. e.g. | ||||
instances may wish to prepend the repo name, introduce path | ||||
components for filesystem or URL addressing, etc. Behavior is up to | ||||
the cache. | ||||
Returns a bool indicating if the request is cacheable by this | ||||
instance. | ||||
""" | ||||
Matt Harbison
|
r53343 | def lookup(self): | ||
Pulkit Goyal
|
r43078 | """Attempt to resolve an entry in the cache. | ||
The instance is instructed to look for the cache key that it was | ||||
informed about via the call to ``setcachekey()``. | ||||
If there's no cache hit or the cacher doesn't wish to use the cached | ||||
entry, ``None`` should be returned. | ||||
Else, a dict defining the cached result should be returned. The | ||||
dict may have the following keys: | ||||
objs | ||||
An iterable of objects that should be sent to the client. That | ||||
iterable of objects is expected to be what the command function | ||||
would return if invoked or an equivalent representation thereof. | ||||
""" | ||||
Matt Harbison
|
r53343 | def onobject(self, obj): | ||
Pulkit Goyal
|
r43078 | """Called when a new object is emitted from the command function. | ||
Receives as its argument the object that was emitted from the | ||||
command function. | ||||
This method returns an iterator of objects to forward to the output | ||||
layer. The easiest implementation is a generator that just | ||||
``yield obj``. | ||||
""" | ||||
Matt Harbison
|
r53343 | def onfinished(self): | ||
Pulkit Goyal
|
r43078 | """Called after all objects have been emitted from the command function. | ||
Implementations should return an iterator of objects to forward to | ||||
the output layer. | ||||
This method can be a generator. | ||||
""" | ||||