parsers.py
840 lines
| 25.7 KiB
| text/x-python
|
PythonLexer
Martin Geisler
|
r7700 | # parsers.py - Python implementation of parsers.c | ||
# | ||||
Raphaël Gomès
|
r47575 | # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others | ||
Martin Geisler
|
r7700 | # | ||
Martin Geisler
|
r8225 | # This software may be used and distributed according to the terms of the | ||
Matt Mackall
|
r10263 | # GNU General Public License version 2 or any later version. | ||
Martin Geisler
|
r7700 | |||
Gregory Szorc
|
r27339 | from __future__ import absolute_import | ||
import struct | ||||
import zlib | ||||
Joerg Sonnenberger
|
r47771 | from ..node import ( | ||
nullrev, | ||||
sha1nodeconstants, | ||||
) | ||||
Augie Fackler
|
r48366 | from ..thirdparty import attr | ||
r43925 | from .. import ( | |||
r48009 | error, | |||
r43925 | pycompat, | |||
r48186 | revlogutils, | |||
r43925 | util, | |||
) | ||||
Augie Fackler
|
r43346 | |||
r44486 | from ..revlogutils import nodemap as nodemaputil | |||
r47616 | from ..revlogutils import constants as revlog_constants | |||
r44486 | ||||
Gregory Szorc
|
r36976 | stringio = pycompat.bytesio | ||
Martin Geisler
|
r7700 | |||
Pulkit Goyal
|
r31220 | |||
Martin Geisler
|
r7700 | _pack = struct.pack | ||
_unpack = struct.unpack | ||||
_compress = zlib.compress | ||||
_decompress = zlib.decompress | ||||
r48296 | ||||
r48303 | # a special value used internally for `size` if the file come from the other parent | |||
FROM_P2 = -2 | ||||
r48305 | # a special value used internally for `size` if the file is modified/merged/added | |||
NONNORMAL = -1 | ||||
r48329 | # a special value used internally for `time` if the time is ambigeous | |||
AMBIGUOUS_TIME = -1 | ||||
r48303 | ||||
r48464 | @attr.s(slots=True, init=False) | |||
r48328 | class DirstateItem(object): | |||
r48296 | """represent a dirstate entry | |||
It contains: | ||||
r48284 | - state (one of 'n', 'a', 'r', 'm') | |||
- mode, | ||||
- size, | ||||
- mtime, | ||||
""" | ||||
r48739 | _wc_tracked = attr.ib() | |||
_p1_tracked = attr.ib() | ||||
_p2_tracked = attr.ib() | ||||
# the three item above should probably be combined | ||||
# | ||||
# However it is unclear if they properly cover some of the most advanced | ||||
# merge case. So we should probably wait on this to be settled. | ||||
_merged = attr.ib() | ||||
_clean_p1 = attr.ib() | ||||
_clean_p2 = attr.ib() | ||||
_possibly_dirty = attr.ib() | ||||
Augie Fackler
|
r48366 | _mode = attr.ib() | ||
_size = attr.ib() | ||||
_mtime = attr.ib() | ||||
r48296 | ||||
r48706 | def __init__( | |||
self, | ||||
wc_tracked=False, | ||||
p1_tracked=False, | ||||
p2_tracked=False, | ||||
merged=False, | ||||
clean_p1=False, | ||||
clean_p2=False, | ||||
possibly_dirty=False, | ||||
parentfiledata=None, | ||||
): | ||||
if merged and (clean_p1 or clean_p2): | ||||
msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`' | ||||
raise error.ProgrammingError(msg) | ||||
r48739 | self._wc_tracked = wc_tracked | |||
self._p1_tracked = p1_tracked | ||||
self._p2_tracked = p2_tracked | ||||
self._merged = merged | ||||
self._clean_p1 = clean_p1 | ||||
self._clean_p2 = clean_p2 | ||||
self._possibly_dirty = possibly_dirty | ||||
if parentfiledata is None: | ||||
self._mode = None | ||||
self._size = None | ||||
self._mtime = None | ||||
else: | ||||
r48706 | self._mode = parentfiledata[0] | |||
self._size = parentfiledata[1] | ||||
self._mtime = parentfiledata[2] | ||||
r48464 | ||||
r48465 | @classmethod | |||
r48716 | def new_added(cls): | |||
"""constructor to help legacy API to build a new "added" item | ||||
Should eventually be removed | ||||
""" | ||||
instance = cls() | ||||
r48739 | instance._wc_tracked = True | |||
instance._p1_tracked = False | ||||
instance._p2_tracked = False | ||||
r48716 | return instance | |||
@classmethod | ||||
def new_merged(cls): | ||||
"""constructor to help legacy API to build a new "merged" item | ||||
Should eventually be removed | ||||
""" | ||||
instance = cls() | ||||
r48739 | instance._wc_tracked = True | |||
instance._p1_tracked = True # might not be True because of rename ? | ||||
instance._p2_tracked = True # might not be True because of rename ? | ||||
instance._merged = True | ||||
r48716 | return instance | |||
@classmethod | ||||
def new_from_p2(cls): | ||||
"""constructor to help legacy API to build a new "from_p2" item | ||||
Should eventually be removed | ||||
""" | ||||
instance = cls() | ||||
r48739 | instance._wc_tracked = True | |||
instance._p1_tracked = False # might actually be True | ||||
instance._p2_tracked = True | ||||
instance._clean_p2 = True | ||||
r48716 | return instance | |||
@classmethod | ||||
def new_possibly_dirty(cls): | ||||
"""constructor to help legacy API to build a new "possibly_dirty" item | ||||
Should eventually be removed | ||||
""" | ||||
instance = cls() | ||||
r48739 | instance._wc_tracked = True | |||
instance._p1_tracked = True | ||||
instance._possibly_dirty = True | ||||
r48716 | return instance | |||
@classmethod | ||||
def new_normal(cls, mode, size, mtime): | ||||
"""constructor to help legacy API to build a new "normal" item | ||||
Should eventually be removed | ||||
""" | ||||
assert size != FROM_P2 | ||||
assert size != NONNORMAL | ||||
instance = cls() | ||||
r48739 | instance._wc_tracked = True | |||
instance._p1_tracked = True | ||||
r48716 | instance._mode = mode | |||
instance._size = size | ||||
instance._mtime = mtime | ||||
return instance | ||||
@classmethod | ||||
r48465 | def from_v1_data(cls, state, mode, size, mtime): | |||
"""Build a new DirstateItem object from V1 data | ||||
Since the dirstate-v1 format is frozen, the signature of this function | ||||
is not expected to change, unlike the __init__ one. | ||||
""" | ||||
r48739 | if state == b'm': | |||
return cls.new_merged() | ||||
elif state == b'a': | ||||
return cls.new_added() | ||||
elif state == b'r': | ||||
instance = cls() | ||||
instance._wc_tracked = False | ||||
if size == NONNORMAL: | ||||
instance._merged = True | ||||
instance._p1_tracked = ( | ||||
True # might not be True because of rename ? | ||||
) | ||||
instance._p2_tracked = ( | ||||
True # might not be True because of rename ? | ||||
) | ||||
elif size == FROM_P2: | ||||
instance._clean_p2 = True | ||||
instance._p1_tracked = ( | ||||
False # We actually don't know (file history) | ||||
) | ||||
instance._p2_tracked = True | ||||
else: | ||||
instance._p1_tracked = True | ||||
return instance | ||||
elif state == b'n': | ||||
if size == FROM_P2: | ||||
return cls.new_from_p2() | ||||
elif size == NONNORMAL: | ||||
return cls.new_possibly_dirty() | ||||
elif mtime == AMBIGUOUS_TIME: | ||||
instance = cls.new_normal(mode, size, 42) | ||||
instance._mtime = None | ||||
instance._possibly_dirty = True | ||||
return instance | ||||
else: | ||||
return cls.new_normal(mode, size, mtime) | ||||
else: | ||||
raise RuntimeError(b'unknown state: %s' % state) | ||||
r48465 | ||||
r48466 | def set_possibly_dirty(self): | |||
"""Mark a file as "possibly dirty" | ||||
This means the next status call will have to actually check its content | ||||
to make sure it is correct. | ||||
""" | ||||
r48739 | self._possibly_dirty = True | |||
r48466 | ||||
r48788 | def set_clean(self, mode, size, mtime): | |||
"""mark a file as "clean" cancelling potential "possibly dirty call" | ||||
Note: this function is a descendant of `dirstate.normal` and is | ||||
currently expected to be call on "normal" entry only. There are not | ||||
reason for this to not change in the future as long as the ccode is | ||||
updated to preserve the proper state of the non-normal files. | ||||
""" | ||||
self._wc_tracked = True | ||||
self._p1_tracked = True | ||||
self._p2_tracked = False # this might be wrong | ||||
self._merged = False | ||||
self._clean_p2 = False | ||||
self._possibly_dirty = False | ||||
self._mode = mode | ||||
self._size = size | ||||
self._mtime = mtime | ||||
r48804 | def set_tracked(self): | |||
"""mark a file as tracked in the working copy | ||||
This will ultimately be called by command like `hg add`. | ||||
""" | ||||
self._wc_tracked = True | ||||
# `set_tracked` is replacing various `normallookup` call. So we set | ||||
# "possibly dirty" to stay on the safe side. | ||||
# | ||||
# Consider dropping this in the future in favor of something less broad. | ||||
self._possibly_dirty = True | ||||
r48701 | def set_untracked(self): | |||
"""mark a file as untracked in the working copy | ||||
This will ultimately be called by command like `hg remove`. | ||||
""" | ||||
# backup the previous state (useful for merge) | ||||
r48739 | self._wc_tracked = False | |||
self._mode = None | ||||
self._size = None | ||||
self._mtime = None | ||||
r48701 | ||||
r48874 | def drop_merge_data(self): | |||
"""remove all "merge-only" from a DirstateItem | ||||
This is to be call by the dirstatemap code when the second parent is dropped | ||||
""" | ||||
if not (self.merged or self.from_p2): | ||||
return | ||||
self._p1_tracked = self.merged # why is this not already properly set ? | ||||
self._merged = False | ||||
self._clean_p1 = False | ||||
self._clean_p2 = False | ||||
self._p2_tracked = False | ||||
self._possibly_dirty = True | ||||
self._mode = None | ||||
self._size = None | ||||
self._mtime = None | ||||
r48301 | @property | |||
r48325 | def mode(self): | |||
r48738 | return self.v1_mode() | |||
r48325 | ||||
@property | ||||
r48326 | def size(self): | |||
r48738 | return self.v1_size() | |||
r48326 | ||||
@property | ||||
r48327 | def mtime(self): | |||
r48738 | return self.v1_mtime() | |||
r48327 | ||||
@property | ||||
r48301 | def state(self): | |||
""" | ||||
States are: | ||||
n normal | ||||
m needs merging | ||||
r marked for removal | ||||
a marked for addition | ||||
XXX This "state" is a bit obscure and mostly a direct expression of the | ||||
dirstatev1 format. It would make sense to ultimately deprecate it in | ||||
favor of the more "semantic" attributes. | ||||
""" | ||||
r48738 | return self.v1_state() | |||
r48301 | ||||
r48302 | @property | |||
r48320 | def tracked(self): | |||
"""True is the file is tracked in the working copy""" | ||||
r48740 | return self._wc_tracked | |||
r48320 | ||||
@property | ||||
r48315 | def added(self): | |||
"""True if the file has been added""" | ||||
r48741 | return self._wc_tracked and not (self._p1_tracked or self._p2_tracked) | |||
r48315 | ||||
@property | ||||
r48302 | def merged(self): | |||
"""True if the file has been merged | ||||
Should only be set if a merge is in progress in the dirstate | ||||
""" | ||||
r48742 | return self._wc_tracked and self._merged | |||
r48302 | ||||
r48303 | @property | |||
def from_p2(self): | ||||
"""True if the file have been fetched from p2 during the current merge | ||||
r48306 | This is only True is the file is currently tracked. | |||
r48303 | Should only be set if a merge is in progress in the dirstate | |||
""" | ||||
r48747 | if not self._wc_tracked: | |||
return False | ||||
return self._clean_p2 or (not self._p1_tracked and self._p2_tracked) | ||||
r48303 | ||||
r48304 | @property | |||
r48305 | def from_p2_removed(self): | |||
"""True if the file has been removed, but was "from_p2" initially | ||||
This property seems like an abstraction leakage and should probably be | ||||
dealt in this class (or maybe the dirstatemap) directly. | ||||
""" | ||||
r48744 | return self.removed and self._clean_p2 | |||
r48305 | ||||
@property | ||||
r48304 | def removed(self): | |||
"""True if the file has been removed""" | ||||
r48745 | return not self._wc_tracked and (self._p1_tracked or self._p2_tracked) | |||
r48304 | ||||
r48305 | @property | |||
def merged_removed(self): | ||||
"""True if the file has been removed, but was "merged" initially | ||||
This property seems like an abstraction leakage and should probably be | ||||
dealt in this class (or maybe the dirstatemap) directly. | ||||
""" | ||||
r48746 | return self.removed and self._merged | |||
r48305 | ||||
r48485 | @property | |||
def dm_nonnormal(self): | ||||
"""True is the entry is non-normal in the dirstatemap sense | ||||
There is no reason for any code, but the dirstatemap one to use this. | ||||
""" | ||||
r48738 | return self.v1_state() != b'n' or self.v1_mtime() == AMBIGUOUS_TIME | |||
r48485 | ||||
r48486 | @property | |||
def dm_otherparent(self): | ||||
"""True is the entry is `otherparent` in the dirstatemap sense | ||||
There is no reason for any code, but the dirstatemap one to use this. | ||||
""" | ||||
r48738 | return self.v1_size() == FROM_P2 | |||
r48486 | ||||
r48298 | def v1_state(self): | |||
"""return a "state" suitable for v1 serialization""" | ||||
r48739 | if not (self._p1_tracked or self._p2_tracked or self._wc_tracked): | |||
# the object has no state to record, this is -currently- | ||||
# unsupported | ||||
raise RuntimeError('untracked item') | ||||
r48748 | elif self.removed: | |||
r48739 | return b'r' | |||
r48748 | elif self.merged: | |||
r48739 | return b'm' | |||
r48748 | elif self.added: | |||
r48739 | return b'a' | |||
r48748 | else: | |||
r48739 | return b'n' | |||
r48298 | ||||
def v1_mode(self): | ||||
"""return a "mode" suitable for v1 serialization""" | ||||
r48739 | return self._mode if self._mode is not None else 0 | |||
r48298 | ||||
def v1_size(self): | ||||
"""return a "size" suitable for v1 serialization""" | ||||
r48739 | if not (self._p1_tracked or self._p2_tracked or self._wc_tracked): | |||
# the object has no state to record, this is -currently- | ||||
# unsupported | ||||
raise RuntimeError('untracked item') | ||||
r48749 | elif self.merged_removed: | |||
return NONNORMAL | ||||
elif self.from_p2_removed: | ||||
r48739 | return FROM_P2 | |||
r48749 | elif self.removed: | |||
return 0 | ||||
elif self.merged: | ||||
return FROM_P2 | ||||
elif self.added: | ||||
r48739 | return NONNORMAL | |||
r48749 | elif self.from_p2: | |||
r48739 | return FROM_P2 | |||
elif self._possibly_dirty: | ||||
r48749 | return self._size if self._size is not None else NONNORMAL | |||
else: | ||||
r48739 | return self._size | |||
r48298 | ||||
def v1_mtime(self): | ||||
"""return a "mtime" suitable for v1 serialization""" | ||||
r48739 | if not (self._p1_tracked or self._p2_tracked or self._wc_tracked): | |||
# the object has no state to record, this is -currently- | ||||
# unsupported | ||||
raise RuntimeError('untracked item') | ||||
r48750 | elif self.removed: | |||
r48739 | return 0 | |||
elif self._possibly_dirty: | ||||
return AMBIGUOUS_TIME | ||||
r48750 | elif self.merged: | |||
r48739 | return AMBIGUOUS_TIME | |||
r48750 | elif self.added: | |||
r48739 | return AMBIGUOUS_TIME | |||
r48750 | elif self.from_p2: | |||
return AMBIGUOUS_TIME | ||||
r48739 | else: | |||
r48750 | return self._mtime if self._mtime is not None else 0 | |||
r48298 | ||||
r48321 | def need_delay(self, now): | |||
"""True if the stored mtime would be ambiguous with the current time""" | ||||
r48738 | return self.v1_state() == b'n' and self.v1_mtime() == now | |||
r48321 | ||||
Augie Fackler
|
r43346 | |||
Maciej Fijalkowski
|
r29133 | def gettype(q): | ||
return int(q & 0xFFFF) | ||||
Matt Mackall
|
r7945 | |||
Augie Fackler
|
r43346 | |||
Maciej Fijalkowski
|
r29133 | class BaseIndexObject(object): | ||
r48042 | # Can I be passed to an algorithme implemented in Rust ? | |||
rust_ext_compat = 0 | ||||
Raphaël Gomès
|
r47380 | # Format of an index entry according to Python's `struct` language | ||
r47619 | index_format = revlog_constants.INDEX_ENTRY_V1 | |||
Raphaël Gomès
|
r47380 | # Size of a C unsigned long long int, platform independent | ||
big_int_size = struct.calcsize(b'>Q') | ||||
# Size of a C long int, platform independent | ||||
int_size = struct.calcsize(b'>i') | ||||
# An empty index entry, used as a default value to be overridden, or nullrev | ||||
r48023 | null_item = ( | |||
0, | ||||
0, | ||||
0, | ||||
-1, | ||||
-1, | ||||
-1, | ||||
-1, | ||||
sha1nodeconstants.nullid, | ||||
0, | ||||
0, | ||||
revlog_constants.COMP_MODE_INLINE, | ||||
r48030 | revlog_constants.COMP_MODE_INLINE, | |||
r48023 | ) | |||
Raphaël Gomès
|
r47136 | |||
r47736 | @util.propertycache | |||
def entry_size(self): | ||||
return self.index_format.size | ||||
r43974 | @property | |||
def nodemap(self): | ||||
Denis Laxalde
|
r44018 | msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]" | ||
r43974 | util.nouideprecwarn(msg, b'5.3', stacklevel=2) | |||
return self._nodemap | ||||
r43925 | @util.propertycache | |||
r43974 | def _nodemap(self): | |||
Joerg Sonnenberger
|
r47771 | nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev}) | ||
r43925 | for r in range(0, len(self)): | |||
n = self[r][7] | ||||
nodemap[n] = r | ||||
return nodemap | ||||
r43934 | def has_node(self, node): | |||
"""return True if the node exist in the index""" | ||||
r43974 | return node in self._nodemap | |||
r43934 | ||||
r43952 | def rev(self, node): | |||
"""return a revision for a node | ||||
If the node is unknown, raise a RevlogError""" | ||||
r43974 | return self._nodemap[node] | |||
r43952 | ||||
r43954 | def get_rev(self, node): | |||
"""return a revision for a node | ||||
If the node is unknown, return None""" | ||||
r43974 | return self._nodemap.get(node) | |||
r43954 | ||||
r43933 | def _stripnodes(self, start): | |||
r43974 | if '_nodemap' in vars(self): | |||
r43933 | for r in range(start, len(self)): | |||
n = self[r][7] | ||||
r43974 | del self._nodemap[n] | |||
r43933 | ||||
r43925 | def clearcaches(self): | |||
r43974 | self.__dict__.pop('_nodemap', None) | |||
r43925 | ||||
Maciej Fijalkowski
|
r29133 | def __len__(self): | ||
Martin von Zweigbergk
|
r38887 | return self._lgt + len(self._extra) | ||
Maciej Fijalkowski
|
r29133 | |||
Martin von Zweigbergk
|
r38886 | def append(self, tup): | ||
r43974 | if '_nodemap' in vars(self): | |||
self._nodemap[tup[7]] = len(self) | ||||
r48041 | data = self._pack_entry(len(self), tup) | |||
Joerg Sonnenberger
|
r46548 | self._extra.append(data) | ||
Matt Mackall
|
r7945 | |||
r48041 | def _pack_entry(self, rev, entry): | |||
r47914 | assert entry[8] == 0 | |||
assert entry[9] == 0 | ||||
return self.index_format.pack(*entry[:8]) | ||||
Martin von Zweigbergk
|
r39251 | def _check_index(self, i): | ||
Maciej Fijalkowski
|
r29133 | if not isinstance(i, int): | ||
Augie Fackler
|
r43347 | raise TypeError(b"expecting int indexes") | ||
Martin von Zweigbergk
|
r39250 | if i < 0 or i >= len(self): | ||
Maciej Fijalkowski
|
r29133 | raise IndexError | ||
Matt Mackall
|
r7945 | |||
Maciej Fijalkowski
|
r29133 | def __getitem__(self, i): | ||
Augie Fackler
|
r39082 | if i == -1: | ||
Raphaël Gomès
|
r47136 | return self.null_item | ||
Martin von Zweigbergk
|
r39251 | self._check_index(i) | ||
Maciej Fijalkowski
|
r29133 | if i >= self._lgt: | ||
Joerg Sonnenberger
|
r46548 | data = self._extra[i - self._lgt] | ||
else: | ||||
index = self._calculate_index(i) | ||||
r47736 | data = self._data[index : index + self.entry_size] | |||
r48041 | r = self._unpack_entry(i, data) | |||
Joerg Sonnenberger
|
r46548 | if self._lgt and i == 0: | ||
r48186 | offset = revlogutils.offset_type(0, gettype(r[0])) | |||
r = (offset,) + r[1:] | ||||
Maciej Fijalkowski
|
r29133 | return r | ||
r48041 | def _unpack_entry(self, rev, data): | |||
r47913 | r = self.index_format.unpack(data) | |||
r48030 | r = r + ( | |||
0, | ||||
0, | ||||
revlog_constants.COMP_MODE_INLINE, | ||||
revlog_constants.COMP_MODE_INLINE, | ||||
) | ||||
r47913 | return r | |||
r47811 | def pack_header(self, header): | |||
"""pack header information as binary""" | ||||
v_fmt = revlog_constants.INDEX_HEADER | ||||
return v_fmt.pack(header) | ||||
def entry_binary(self, rev): | ||||
r47808 | """return the raw binary string representing a revision""" | |||
entry = self[rev] | ||||
r47913 | p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8]) | |||
r47808 | if rev == 0: | |||
r47811 | p = p[revlog_constants.INDEX_HEADER.size :] | |||
r47808 | return p | |||
Augie Fackler
|
r43346 | |||
Maciej Fijalkowski
|
r29133 | class IndexObject(BaseIndexObject): | ||
def __init__(self, data): | ||||
r47808 | assert len(data) % self.entry_size == 0, ( | |||
len(data), | ||||
self.entry_size, | ||||
len(data) % self.entry_size, | ||||
) | ||||
Maciej Fijalkowski
|
r29133 | self._data = data | ||
r47736 | self._lgt = len(data) // self.entry_size | |||
Maciej Fijalkowski
|
r29133 | self._extra = [] | ||
def _calculate_index(self, i): | ||||
r47736 | return i * self.entry_size | |||
Matt Mackall
|
r13253 | |||
Maciej Fijalkowski
|
r29133 | def __delitem__(self, i): | ||
Alex Gaynor
|
r34332 | if not isinstance(i, slice) or not i.stop == -1 or i.step is not None: | ||
Augie Fackler
|
r43347 | raise ValueError(b"deleting slices only supports a:-1 with step 1") | ||
Martin von Zweigbergk
|
r39251 | i = i.start | ||
self._check_index(i) | ||||
r43933 | self._stripnodes(i) | |||
Maciej Fijalkowski
|
r29133 | if i < self._lgt: | ||
r47736 | self._data = self._data[: i * self.entry_size] | |||
Maciej Fijalkowski
|
r29133 | self._lgt = i | ||
self._extra = [] | ||||
else: | ||||
Augie Fackler
|
r43346 | self._extra = self._extra[: i - self._lgt] | ||
Maciej Fijalkowski
|
r29133 | |||
r44794 | class PersistentNodeMapIndexObject(IndexObject): | |||
"""a Debug oriented class to test persistent nodemap | ||||
We need a simple python object to test API and higher level behavior. See | ||||
the Rust implementation for more serious usage. This should be used only | ||||
through the dedicated `devel.persistent-nodemap` config. | ||||
""" | ||||
r44795 | def nodemap_data_all(self): | |||
"""Return bytes containing a full serialization of a nodemap | ||||
The nodemap should be valid for the full set of revisions in the | ||||
index.""" | ||||
return nodemaputil.persistent_data(self) | ||||
r44805 | def nodemap_data_incremental(self): | |||
"""Return bytes containing a incremental update to persistent nodemap | ||||
This containst the data for an append-only update of the data provided | ||||
in the last call to `update_nodemap_data`. | ||||
""" | ||||
if self._nm_root is None: | ||||
return None | ||||
r44809 | docket = self._nm_docket | |||
r44808 | changed, data = nodemaputil.update_persistent_data( | |||
r44809 | self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev | |||
r44805 | ) | |||
r44809 | ||||
self._nm_root = self._nm_max_idx = self._nm_docket = None | ||||
return docket, changed, data | ||||
r44805 | ||||
r44807 | def update_nodemap_data(self, docket, nm_data): | |||
"""provide full block of persisted binary data for a nodemap | ||||
r44801 | ||||
The data are expected to come from disk. See `nodemap_data_all` for a | ||||
produceur of such data.""" | ||||
if nm_data is not None: | ||||
r44805 | self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data) | |||
if self._nm_root: | ||||
r44809 | self._nm_docket = docket | |||
r44805 | else: | |||
r44809 | self._nm_root = self._nm_max_idx = self._nm_docket = None | |||
r44801 | ||||
r44794 | ||||
Maciej Fijalkowski
|
r29133 | class InlinedIndexObject(BaseIndexObject): | ||
def __init__(self, data, inline=0): | ||||
self._data = data | ||||
self._lgt = self._inline_scan(None) | ||||
self._inline_scan(self._lgt) | ||||
self._extra = [] | ||||
Martin Geisler
|
r7700 | |||
Maciej Fijalkowski
|
r29133 | def _inline_scan(self, lgt): | ||
off = 0 | ||||
if lgt is not None: | ||||
self._offsets = [0] * lgt | ||||
count = 0 | ||||
r47736 | while off <= len(self._data) - self.entry_size: | |||
Raphaël Gomès
|
r47136 | start = off + self.big_int_size | ||
Augie Fackler
|
r43346 | (s,) = struct.unpack( | ||
Raphaël Gomès
|
r47136 | b'>i', | ||
self._data[start : start + self.int_size], | ||||
Augie Fackler
|
r43346 | ) | ||
Maciej Fijalkowski
|
r29133 | if lgt is not None: | ||
self._offsets[count] = off | ||||
count += 1 | ||||
r47736 | off += self.entry_size + s | |||
Maciej Fijalkowski
|
r29133 | if off != len(self._data): | ||
Augie Fackler
|
r43347 | raise ValueError(b"corrupted data") | ||
Maciej Fijalkowski
|
r29133 | return count | ||
Augie Fackler
|
r14421 | |||
Maciej Fijalkowski
|
r29133 | def __delitem__(self, i): | ||
Alex Gaynor
|
r34332 | if not isinstance(i, slice) or not i.stop == -1 or i.step is not None: | ||
Augie Fackler
|
r43347 | raise ValueError(b"deleting slices only supports a:-1 with step 1") | ||
Martin von Zweigbergk
|
r39251 | i = i.start | ||
self._check_index(i) | ||||
r43933 | self._stripnodes(i) | |||
Maciej Fijalkowski
|
r29133 | if i < self._lgt: | ||
self._offsets = self._offsets[:i] | ||||
self._lgt = i | ||||
self._extra = [] | ||||
else: | ||||
Augie Fackler
|
r43346 | self._extra = self._extra[: i - self._lgt] | ||
Martin Geisler
|
r7700 | |||
Maciej Fijalkowski
|
r29133 | def _calculate_index(self, i): | ||
return self._offsets[i] | ||||
Martin Geisler
|
r7700 | |||
Augie Fackler
|
r43346 | |||
Raphaël Gomès
|
r47438 | def parse_index2(data, inline, revlogv2=False): | ||
Maciej Fijalkowski
|
r29133 | if not inline: | ||
Raphaël Gomès
|
r47438 | cls = IndexObject2 if revlogv2 else IndexObject | ||
return cls(data), None | ||||
r48036 | cls = InlinedIndexObject | |||
Raphaël Gomès
|
r47438 | return cls(data, inline), (0, data) | ||
r48044 | def parse_index_cl_v2(data): | |||
return IndexChangelogV2(data), None | ||||
r48036 | class IndexObject2(IndexObject): | |||
r47619 | index_format = revlog_constants.INDEX_ENTRY_V2 | |||
Raphaël Gomès
|
r47438 | |||
Raphaël Gomès
|
r47844 | def replace_sidedata_info( | ||
r48033 | self, | |||
rev, | ||||
sidedata_offset, | ||||
sidedata_length, | ||||
offset_flags, | ||||
compression_mode, | ||||
Raphaël Gomès
|
r47844 | ): | ||
Raphaël Gomès
|
r47451 | """ | ||
Replace an existing index entry's sidedata offset and length with new | ||||
ones. | ||||
This cannot be used outside of the context of sidedata rewriting, | ||||
r48017 | inside the transaction that creates the revision `rev`. | |||
Raphaël Gomès
|
r47451 | """ | ||
r48017 | if rev < 0: | |||
Raphaël Gomès
|
r47451 | raise KeyError | ||
r48017 | self._check_index(rev) | |||
r48018 | if rev < self._lgt: | |||
Raphaël Gomès
|
r47451 | msg = b"cannot rewrite entries outside of this transaction" | ||
raise KeyError(msg) | ||||
r48018 | else: | |||
entry = list(self[rev]) | ||||
entry[0] = offset_flags | ||||
entry[8] = sidedata_offset | ||||
entry[9] = sidedata_length | ||||
r48033 | entry[11] = compression_mode | |||
r48018 | entry = tuple(entry) | |||
r48041 | new = self._pack_entry(rev, entry) | |||
r48018 | self._extra[rev - self._lgt] = new | |||
Raphaël Gomès
|
r47451 | |||
r48041 | def _unpack_entry(self, rev, data): | |||
r48030 | data = self.index_format.unpack(data) | |||
entry = data[:10] | ||||
data_comp = data[10] & 3 | ||||
sidedata_comp = (data[10] & (3 << 2)) >> 2 | ||||
return entry + (data_comp, sidedata_comp) | ||||
r47913 | ||||
r48041 | def _pack_entry(self, rev, entry): | |||
r48030 | data = entry[:10] | |||
data_comp = entry[10] & 3 | ||||
sidedata_comp = (entry[11] & 3) << 2 | ||||
data += (data_comp | sidedata_comp,) | ||||
return self.index_format.pack(*data) | ||||
r47914 | ||||
r47811 | def entry_binary(self, rev): | |||
r47808 | """return the raw binary string representing a revision""" | |||
entry = self[rev] | ||||
r48041 | return self._pack_entry(rev, entry) | |||
r47808 | ||||
r48009 | def pack_header(self, header): | |||
"""pack header information as binary""" | ||||
msg = 'version header should go in the docket, not the index: %d' | ||||
msg %= header | ||||
raise error.ProgrammingError(msg) | ||||
Raphaël Gomès
|
r47438 | |||
r48044 | class IndexChangelogV2(IndexObject2): | |||
index_format = revlog_constants.INDEX_ENTRY_CL_V2 | ||||
def _unpack_entry(self, rev, data, r=True): | ||||
items = self.index_format.unpack(data) | ||||
entry = items[:3] + (rev, rev) + items[3:8] | ||||
data_comp = items[8] & 3 | ||||
sidedata_comp = (items[8] >> 2) & 3 | ||||
return entry + (data_comp, sidedata_comp) | ||||
def _pack_entry(self, rev, entry): | ||||
assert entry[3] == rev, entry[3] | ||||
assert entry[4] == rev, entry[4] | ||||
data = entry[:3] + entry[5:10] | ||||
data_comp = entry[10] & 3 | ||||
sidedata_comp = (entry[11] & 3) << 2 | ||||
data += (data_comp | sidedata_comp,) | ||||
return self.index_format.pack(*data) | ||||
r44794 | def parse_index_devel_nodemap(data, inline): | |||
Augie Fackler
|
r46554 | """like parse_index2, but alway return a PersistentNodeMapIndexObject""" | ||
r44794 | return PersistentNodeMapIndexObject(data), None | |||
Martin Geisler
|
r7700 | def parse_dirstate(dmap, copymap, st): | ||
Augie Fackler
|
r43346 | parents = [st[:20], st[20:40]] | ||
Mads Kiilerich
|
r17425 | # dereference fields so they will be local in loop | ||
Augie Fackler
|
r43347 | format = b">cllll" | ||
Matt Mackall
|
r7945 | e_size = struct.calcsize(format) | ||
Martin Geisler
|
r7700 | pos1 = 40 | ||
l = len(st) | ||||
# the inner loop | ||||
while pos1 < l: | ||||
pos2 = pos1 + e_size | ||||
Augie Fackler
|
r43347 | e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster | ||
Martin Geisler
|
r7700 | pos1 = pos2 + e[4] | ||
f = st[pos2:pos1] | ||||
Augie Fackler
|
r43347 | if b'\0' in f: | ||
f, c = f.split(b'\0') | ||||
Martin Geisler
|
r7700 | copymap[f] = c | ||
r48465 | dmap[f] = DirstateItem.from_v1_data(*e[:4]) | |||
Martin Geisler
|
r7700 | return parents | ||
Siddharth Agarwal
|
r18567 | |||
Augie Fackler
|
r43346 | |||
Siddharth Agarwal
|
r18567 | def pack_dirstate(dmap, copymap, pl, now): | ||
now = int(now) | ||||
timeless
|
r28861 | cs = stringio() | ||
Siddharth Agarwal
|
r18567 | write = cs.write | ||
Augie Fackler
|
r43347 | write(b"".join(pl)) | ||
Gregory Szorc
|
r43376 | for f, e in pycompat.iteritems(dmap): | ||
r48329 | if e.need_delay(now): | |||
Siddharth Agarwal
|
r18567 | # The file was last modified "simultaneously" with the current | ||
# write to dirstate (i.e. within the same second for file- | ||||
# systems with a granularity of 1 sec). This commonly happens | ||||
# for at least a couple of files on 'update'. | ||||
# The user could change the file without changing its size | ||||
Siddharth Agarwal
|
r19652 | # within the same second. Invalidate the file's mtime in | ||
Siddharth Agarwal
|
r18567 | # dirstate, forcing future 'status' calls to compare the | ||
Siddharth Agarwal
|
r19652 | # contents of the file if the size is the same. This prevents | ||
# mistakenly treating such files as clean. | ||||
r48467 | e.set_possibly_dirty() | |||
Siddharth Agarwal
|
r18567 | |||
if f in copymap: | ||||
Augie Fackler
|
r43347 | f = b"%s\0%s" % (f, copymap[f]) | ||
r48298 | e = _pack( | |||
b">cllll", | ||||
e.v1_state(), | ||||
e.v1_mode(), | ||||
e.v1_size(), | ||||
e.v1_mtime(), | ||||
len(f), | ||||
) | ||||
Siddharth Agarwal
|
r18567 | write(e) | ||
write(f) | ||||
return cs.getvalue() | ||||