parsers.py
568 lines
| 16.6 KiB
| text/x-python
|
PythonLexer
Martin Geisler
|
r7700 | # parsers.py - Python implementation of parsers.c | ||
# | ||||
Raphaël Gomès
|
r47575 | # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others | ||
Martin Geisler
|
r7700 | # | ||
Martin Geisler
|
r8225 | # This software may be used and distributed according to the terms of the | ||
Matt Mackall
|
r10263 | # GNU General Public License version 2 or any later version. | ||
Martin Geisler
|
r7700 | |||
Gregory Szorc
|
r27339 | from __future__ import absolute_import | ||
import struct | ||||
import zlib | ||||
Joerg Sonnenberger
|
r47771 | from ..node import ( | ||
nullrev, | ||||
sha1nodeconstants, | ||||
) | ||||
r43925 | from .. import ( | |||
r48009 | error, | |||
r43925 | pycompat, | |||
r48186 | revlogutils, | |||
r43925 | util, | |||
) | ||||
Augie Fackler
|
r43346 | |||
r44486 | from ..revlogutils import nodemap as nodemaputil | |||
r47616 | from ..revlogutils import constants as revlog_constants | |||
r44486 | ||||
Gregory Szorc
|
r36976 | stringio = pycompat.bytesio | ||
Martin Geisler
|
r7700 | |||
Pulkit Goyal
|
r31220 | |||
Martin Geisler
|
r7700 | _pack = struct.pack | ||
_unpack = struct.unpack | ||||
_compress = zlib.compress | ||||
_decompress = zlib.decompress | ||||
r48296 | ||||
r48303 | # a special value used internally for `size` if the file come from the other parent | |||
FROM_P2 = -2 | ||||
r48305 | # a special value used internally for `size` if the file is modified/merged/added | |||
NONNORMAL = -1 | ||||
r48303 | ||||
r48296 | class dirstatetuple(object): | |||
"""represent a dirstate entry | ||||
It contains: | ||||
r48284 | - state (one of 'n', 'a', 'r', 'm') | |||
- mode, | ||||
- size, | ||||
- mtime, | ||||
""" | ||||
r48296 | __slot__ = ('_state', '_mode', '_size', '_mtime') | |||
def __init__(self, state, mode, size, mtime): | ||||
self._state = state | ||||
self._mode = mode | ||||
self._size = size | ||||
self._mtime = mtime | ||||
def __getitem__(self, idx): | ||||
if idx == 0 or idx == -4: | ||||
return self._state | ||||
elif idx == 1 or idx == -3: | ||||
return self._mode | ||||
elif idx == 2 or idx == -2: | ||||
return self._size | ||||
elif idx == 3 or idx == -1: | ||||
return self._mtime | ||||
else: | ||||
raise IndexError(idx) | ||||
Siddharth Agarwal
|
r21809 | |||
r48301 | @property | |||
r48325 | def mode(self): | |||
return self._mode | ||||
@property | ||||
r48326 | def size(self): | |||
return self._size | ||||
@property | ||||
r48327 | def mtime(self): | |||
return self._mtime | ||||
@property | ||||
r48301 | def state(self): | |||
""" | ||||
States are: | ||||
n normal | ||||
m needs merging | ||||
r marked for removal | ||||
a marked for addition | ||||
XXX This "state" is a bit obscure and mostly a direct expression of the | ||||
dirstatev1 format. It would make sense to ultimately deprecate it in | ||||
favor of the more "semantic" attributes. | ||||
""" | ||||
return self._state | ||||
r48302 | @property | |||
r48320 | def tracked(self): | |||
"""True is the file is tracked in the working copy""" | ||||
return self._state in b"nma" | ||||
@property | ||||
r48315 | def added(self): | |||
"""True if the file has been added""" | ||||
return self._state == b'a' | ||||
@property | ||||
r48302 | def merged(self): | |||
"""True if the file has been merged | ||||
Should only be set if a merge is in progress in the dirstate | ||||
""" | ||||
return self._state == b'm' | ||||
r48303 | @property | |||
def from_p2(self): | ||||
"""True if the file have been fetched from p2 during the current merge | ||||
r48306 | This is only True is the file is currently tracked. | |||
r48303 | Should only be set if a merge is in progress in the dirstate | |||
""" | ||||
r48306 | return self._state == b'n' and self._size == FROM_P2 | |||
r48303 | ||||
r48304 | @property | |||
r48305 | def from_p2_removed(self): | |||
"""True if the file has been removed, but was "from_p2" initially | ||||
This property seems like an abstraction leakage and should probably be | ||||
dealt in this class (or maybe the dirstatemap) directly. | ||||
""" | ||||
return self._state == b'r' and self._size == FROM_P2 | ||||
@property | ||||
r48304 | def removed(self): | |||
"""True if the file has been removed""" | ||||
return self._state == b'r' | ||||
r48305 | @property | |||
def merged_removed(self): | ||||
"""True if the file has been removed, but was "merged" initially | ||||
This property seems like an abstraction leakage and should probably be | ||||
dealt in this class (or maybe the dirstatemap) directly. | ||||
""" | ||||
return self._state == b'r' and self._size == NONNORMAL | ||||
r48298 | def v1_state(self): | |||
"""return a "state" suitable for v1 serialization""" | ||||
return self._state | ||||
def v1_mode(self): | ||||
"""return a "mode" suitable for v1 serialization""" | ||||
return self._mode | ||||
def v1_size(self): | ||||
"""return a "size" suitable for v1 serialization""" | ||||
return self._size | ||||
def v1_mtime(self): | ||||
"""return a "mtime" suitable for v1 serialization""" | ||||
return self._mtime | ||||
r48321 | def need_delay(self, now): | |||
"""True if the stored mtime would be ambiguous with the current time""" | ||||
return self._state == b'n' and self._mtime == now | ||||
Augie Fackler
|
r43346 | |||
Maciej Fijalkowski
|
r29133 | def gettype(q): | ||
return int(q & 0xFFFF) | ||||
Matt Mackall
|
r7945 | |||
Augie Fackler
|
r43346 | |||
Maciej Fijalkowski
|
r29133 | class BaseIndexObject(object): | ||
r48042 | # Can I be passed to an algorithme implemented in Rust ? | |||
rust_ext_compat = 0 | ||||
Raphaël Gomès
|
r47380 | # Format of an index entry according to Python's `struct` language | ||
r47619 | index_format = revlog_constants.INDEX_ENTRY_V1 | |||
Raphaël Gomès
|
r47380 | # Size of a C unsigned long long int, platform independent | ||
big_int_size = struct.calcsize(b'>Q') | ||||
# Size of a C long int, platform independent | ||||
int_size = struct.calcsize(b'>i') | ||||
# An empty index entry, used as a default value to be overridden, or nullrev | ||||
r48023 | null_item = ( | |||
0, | ||||
0, | ||||
0, | ||||
-1, | ||||
-1, | ||||
-1, | ||||
-1, | ||||
sha1nodeconstants.nullid, | ||||
0, | ||||
0, | ||||
revlog_constants.COMP_MODE_INLINE, | ||||
r48030 | revlog_constants.COMP_MODE_INLINE, | |||
r48023 | ) | |||
Raphaël Gomès
|
r47136 | |||
r47736 | @util.propertycache | |||
def entry_size(self): | ||||
return self.index_format.size | ||||
r43974 | @property | |||
def nodemap(self): | ||||
Denis Laxalde
|
r44018 | msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]" | ||
r43974 | util.nouideprecwarn(msg, b'5.3', stacklevel=2) | |||
return self._nodemap | ||||
r43925 | @util.propertycache | |||
r43974 | def _nodemap(self): | |||
Joerg Sonnenberger
|
r47771 | nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev}) | ||
r43925 | for r in range(0, len(self)): | |||
n = self[r][7] | ||||
nodemap[n] = r | ||||
return nodemap | ||||
r43934 | def has_node(self, node): | |||
"""return True if the node exist in the index""" | ||||
r43974 | return node in self._nodemap | |||
r43934 | ||||
r43952 | def rev(self, node): | |||
"""return a revision for a node | ||||
If the node is unknown, raise a RevlogError""" | ||||
r43974 | return self._nodemap[node] | |||
r43952 | ||||
r43954 | def get_rev(self, node): | |||
"""return a revision for a node | ||||
If the node is unknown, return None""" | ||||
r43974 | return self._nodemap.get(node) | |||
r43954 | ||||
r43933 | def _stripnodes(self, start): | |||
r43974 | if '_nodemap' in vars(self): | |||
r43933 | for r in range(start, len(self)): | |||
n = self[r][7] | ||||
r43974 | del self._nodemap[n] | |||
r43933 | ||||
r43925 | def clearcaches(self): | |||
r43974 | self.__dict__.pop('_nodemap', None) | |||
r43925 | ||||
Maciej Fijalkowski
|
r29133 | def __len__(self): | ||
Martin von Zweigbergk
|
r38887 | return self._lgt + len(self._extra) | ||
Maciej Fijalkowski
|
r29133 | |||
Martin von Zweigbergk
|
r38886 | def append(self, tup): | ||
r43974 | if '_nodemap' in vars(self): | |||
self._nodemap[tup[7]] = len(self) | ||||
r48041 | data = self._pack_entry(len(self), tup) | |||
Joerg Sonnenberger
|
r46548 | self._extra.append(data) | ||
Matt Mackall
|
r7945 | |||
r48041 | def _pack_entry(self, rev, entry): | |||
r47914 | assert entry[8] == 0 | |||
assert entry[9] == 0 | ||||
return self.index_format.pack(*entry[:8]) | ||||
Martin von Zweigbergk
|
r39251 | def _check_index(self, i): | ||
Maciej Fijalkowski
|
r29133 | if not isinstance(i, int): | ||
Augie Fackler
|
r43347 | raise TypeError(b"expecting int indexes") | ||
Martin von Zweigbergk
|
r39250 | if i < 0 or i >= len(self): | ||
Maciej Fijalkowski
|
r29133 | raise IndexError | ||
Matt Mackall
|
r7945 | |||
Maciej Fijalkowski
|
r29133 | def __getitem__(self, i): | ||
Augie Fackler
|
r39082 | if i == -1: | ||
Raphaël Gomès
|
r47136 | return self.null_item | ||
Martin von Zweigbergk
|
r39251 | self._check_index(i) | ||
Maciej Fijalkowski
|
r29133 | if i >= self._lgt: | ||
Joerg Sonnenberger
|
r46548 | data = self._extra[i - self._lgt] | ||
else: | ||||
index = self._calculate_index(i) | ||||
r47736 | data = self._data[index : index + self.entry_size] | |||
r48041 | r = self._unpack_entry(i, data) | |||
Joerg Sonnenberger
|
r46548 | if self._lgt and i == 0: | ||
r48186 | offset = revlogutils.offset_type(0, gettype(r[0])) | |||
r = (offset,) + r[1:] | ||||
Maciej Fijalkowski
|
r29133 | return r | ||
r48041 | def _unpack_entry(self, rev, data): | |||
r47913 | r = self.index_format.unpack(data) | |||
r48030 | r = r + ( | |||
0, | ||||
0, | ||||
revlog_constants.COMP_MODE_INLINE, | ||||
revlog_constants.COMP_MODE_INLINE, | ||||
) | ||||
r47913 | return r | |||
r47811 | def pack_header(self, header): | |||
"""pack header information as binary""" | ||||
v_fmt = revlog_constants.INDEX_HEADER | ||||
return v_fmt.pack(header) | ||||
def entry_binary(self, rev): | ||||
r47808 | """return the raw binary string representing a revision""" | |||
entry = self[rev] | ||||
r47913 | p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8]) | |||
r47808 | if rev == 0: | |||
r47811 | p = p[revlog_constants.INDEX_HEADER.size :] | |||
r47808 | return p | |||
Augie Fackler
|
r43346 | |||
Maciej Fijalkowski
|
r29133 | class IndexObject(BaseIndexObject): | ||
def __init__(self, data): | ||||
r47808 | assert len(data) % self.entry_size == 0, ( | |||
len(data), | ||||
self.entry_size, | ||||
len(data) % self.entry_size, | ||||
) | ||||
Maciej Fijalkowski
|
r29133 | self._data = data | ||
r47736 | self._lgt = len(data) // self.entry_size | |||
Maciej Fijalkowski
|
r29133 | self._extra = [] | ||
def _calculate_index(self, i): | ||||
r47736 | return i * self.entry_size | |||
Matt Mackall
|
r13253 | |||
Maciej Fijalkowski
|
r29133 | def __delitem__(self, i): | ||
Alex Gaynor
|
r34332 | if not isinstance(i, slice) or not i.stop == -1 or i.step is not None: | ||
Augie Fackler
|
r43347 | raise ValueError(b"deleting slices only supports a:-1 with step 1") | ||
Martin von Zweigbergk
|
r39251 | i = i.start | ||
self._check_index(i) | ||||
r43933 | self._stripnodes(i) | |||
Maciej Fijalkowski
|
r29133 | if i < self._lgt: | ||
r47736 | self._data = self._data[: i * self.entry_size] | |||
Maciej Fijalkowski
|
r29133 | self._lgt = i | ||
self._extra = [] | ||||
else: | ||||
Augie Fackler
|
r43346 | self._extra = self._extra[: i - self._lgt] | ||
Maciej Fijalkowski
|
r29133 | |||
r44794 | class PersistentNodeMapIndexObject(IndexObject): | |||
"""a Debug oriented class to test persistent nodemap | ||||
We need a simple python object to test API and higher level behavior. See | ||||
the Rust implementation for more serious usage. This should be used only | ||||
through the dedicated `devel.persistent-nodemap` config. | ||||
""" | ||||
r44795 | def nodemap_data_all(self): | |||
"""Return bytes containing a full serialization of a nodemap | ||||
The nodemap should be valid for the full set of revisions in the | ||||
index.""" | ||||
return nodemaputil.persistent_data(self) | ||||
r44805 | def nodemap_data_incremental(self): | |||
"""Return bytes containing a incremental update to persistent nodemap | ||||
This containst the data for an append-only update of the data provided | ||||
in the last call to `update_nodemap_data`. | ||||
""" | ||||
if self._nm_root is None: | ||||
return None | ||||
r44809 | docket = self._nm_docket | |||
r44808 | changed, data = nodemaputil.update_persistent_data( | |||
r44809 | self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev | |||
r44805 | ) | |||
r44809 | ||||
self._nm_root = self._nm_max_idx = self._nm_docket = None | ||||
return docket, changed, data | ||||
r44805 | ||||
r44807 | def update_nodemap_data(self, docket, nm_data): | |||
"""provide full block of persisted binary data for a nodemap | ||||
r44801 | ||||
The data are expected to come from disk. See `nodemap_data_all` for a | ||||
produceur of such data.""" | ||||
if nm_data is not None: | ||||
r44805 | self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data) | |||
if self._nm_root: | ||||
r44809 | self._nm_docket = docket | |||
r44805 | else: | |||
r44809 | self._nm_root = self._nm_max_idx = self._nm_docket = None | |||
r44801 | ||||
r44794 | ||||
Maciej Fijalkowski
|
r29133 | class InlinedIndexObject(BaseIndexObject): | ||
def __init__(self, data, inline=0): | ||||
self._data = data | ||||
self._lgt = self._inline_scan(None) | ||||
self._inline_scan(self._lgt) | ||||
self._extra = [] | ||||
Martin Geisler
|
r7700 | |||
Maciej Fijalkowski
|
r29133 | def _inline_scan(self, lgt): | ||
off = 0 | ||||
if lgt is not None: | ||||
self._offsets = [0] * lgt | ||||
count = 0 | ||||
r47736 | while off <= len(self._data) - self.entry_size: | |||
Raphaël Gomès
|
r47136 | start = off + self.big_int_size | ||
Augie Fackler
|
r43346 | (s,) = struct.unpack( | ||
Raphaël Gomès
|
r47136 | b'>i', | ||
self._data[start : start + self.int_size], | ||||
Augie Fackler
|
r43346 | ) | ||
Maciej Fijalkowski
|
r29133 | if lgt is not None: | ||
self._offsets[count] = off | ||||
count += 1 | ||||
r47736 | off += self.entry_size + s | |||
Maciej Fijalkowski
|
r29133 | if off != len(self._data): | ||
Augie Fackler
|
r43347 | raise ValueError(b"corrupted data") | ||
Maciej Fijalkowski
|
r29133 | return count | ||
Augie Fackler
|
r14421 | |||
Maciej Fijalkowski
|
r29133 | def __delitem__(self, i): | ||
Alex Gaynor
|
r34332 | if not isinstance(i, slice) or not i.stop == -1 or i.step is not None: | ||
Augie Fackler
|
r43347 | raise ValueError(b"deleting slices only supports a:-1 with step 1") | ||
Martin von Zweigbergk
|
r39251 | i = i.start | ||
self._check_index(i) | ||||
r43933 | self._stripnodes(i) | |||
Maciej Fijalkowski
|
r29133 | if i < self._lgt: | ||
self._offsets = self._offsets[:i] | ||||
self._lgt = i | ||||
self._extra = [] | ||||
else: | ||||
Augie Fackler
|
r43346 | self._extra = self._extra[: i - self._lgt] | ||
Martin Geisler
|
r7700 | |||
Maciej Fijalkowski
|
r29133 | def _calculate_index(self, i): | ||
return self._offsets[i] | ||||
Martin Geisler
|
r7700 | |||
Augie Fackler
|
r43346 | |||
Raphaël Gomès
|
r47438 | def parse_index2(data, inline, revlogv2=False): | ||
Maciej Fijalkowski
|
r29133 | if not inline: | ||
Raphaël Gomès
|
r47438 | cls = IndexObject2 if revlogv2 else IndexObject | ||
return cls(data), None | ||||
r48036 | cls = InlinedIndexObject | |||
Raphaël Gomès
|
r47438 | return cls(data, inline), (0, data) | ||
r48044 | def parse_index_cl_v2(data): | |||
return IndexChangelogV2(data), None | ||||
r48036 | class IndexObject2(IndexObject): | |||
r47619 | index_format = revlog_constants.INDEX_ENTRY_V2 | |||
Raphaël Gomès
|
r47438 | |||
Raphaël Gomès
|
r47844 | def replace_sidedata_info( | ||
r48033 | self, | |||
rev, | ||||
sidedata_offset, | ||||
sidedata_length, | ||||
offset_flags, | ||||
compression_mode, | ||||
Raphaël Gomès
|
r47844 | ): | ||
Raphaël Gomès
|
r47451 | """ | ||
Replace an existing index entry's sidedata offset and length with new | ||||
ones. | ||||
This cannot be used outside of the context of sidedata rewriting, | ||||
r48017 | inside the transaction that creates the revision `rev`. | |||
Raphaël Gomès
|
r47451 | """ | ||
r48017 | if rev < 0: | |||
Raphaël Gomès
|
r47451 | raise KeyError | ||
r48017 | self._check_index(rev) | |||
r48018 | if rev < self._lgt: | |||
Raphaël Gomès
|
r47451 | msg = b"cannot rewrite entries outside of this transaction" | ||
raise KeyError(msg) | ||||
r48018 | else: | |||
entry = list(self[rev]) | ||||
entry[0] = offset_flags | ||||
entry[8] = sidedata_offset | ||||
entry[9] = sidedata_length | ||||
r48033 | entry[11] = compression_mode | |||
r48018 | entry = tuple(entry) | |||
r48041 | new = self._pack_entry(rev, entry) | |||
r48018 | self._extra[rev - self._lgt] = new | |||
Raphaël Gomès
|
r47451 | |||
r48041 | def _unpack_entry(self, rev, data): | |||
r48030 | data = self.index_format.unpack(data) | |||
entry = data[:10] | ||||
data_comp = data[10] & 3 | ||||
sidedata_comp = (data[10] & (3 << 2)) >> 2 | ||||
return entry + (data_comp, sidedata_comp) | ||||
r47913 | ||||
r48041 | def _pack_entry(self, rev, entry): | |||
r48030 | data = entry[:10] | |||
data_comp = entry[10] & 3 | ||||
sidedata_comp = (entry[11] & 3) << 2 | ||||
data += (data_comp | sidedata_comp,) | ||||
return self.index_format.pack(*data) | ||||
r47914 | ||||
r47811 | def entry_binary(self, rev): | |||
r47808 | """return the raw binary string representing a revision""" | |||
entry = self[rev] | ||||
r48041 | return self._pack_entry(rev, entry) | |||
r47808 | ||||
r48009 | def pack_header(self, header): | |||
"""pack header information as binary""" | ||||
msg = 'version header should go in the docket, not the index: %d' | ||||
msg %= header | ||||
raise error.ProgrammingError(msg) | ||||
Raphaël Gomès
|
r47438 | |||
r48044 | class IndexChangelogV2(IndexObject2): | |||
index_format = revlog_constants.INDEX_ENTRY_CL_V2 | ||||
def _unpack_entry(self, rev, data, r=True): | ||||
items = self.index_format.unpack(data) | ||||
entry = items[:3] + (rev, rev) + items[3:8] | ||||
data_comp = items[8] & 3 | ||||
sidedata_comp = (items[8] >> 2) & 3 | ||||
return entry + (data_comp, sidedata_comp) | ||||
def _pack_entry(self, rev, entry): | ||||
assert entry[3] == rev, entry[3] | ||||
assert entry[4] == rev, entry[4] | ||||
data = entry[:3] + entry[5:10] | ||||
data_comp = entry[10] & 3 | ||||
sidedata_comp = (entry[11] & 3) << 2 | ||||
data += (data_comp | sidedata_comp,) | ||||
return self.index_format.pack(*data) | ||||
r44794 | def parse_index_devel_nodemap(data, inline): | |||
Augie Fackler
|
r46554 | """like parse_index2, but alway return a PersistentNodeMapIndexObject""" | ||
r44794 | return PersistentNodeMapIndexObject(data), None | |||
Martin Geisler
|
r7700 | def parse_dirstate(dmap, copymap, st): | ||
Augie Fackler
|
r43346 | parents = [st[:20], st[20:40]] | ||
Mads Kiilerich
|
r17425 | # dereference fields so they will be local in loop | ||
Augie Fackler
|
r43347 | format = b">cllll" | ||
Matt Mackall
|
r7945 | e_size = struct.calcsize(format) | ||
Martin Geisler
|
r7700 | pos1 = 40 | ||
l = len(st) | ||||
# the inner loop | ||||
while pos1 < l: | ||||
pos2 = pos1 + e_size | ||||
Augie Fackler
|
r43347 | e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster | ||
Martin Geisler
|
r7700 | pos1 = pos2 + e[4] | ||
f = st[pos2:pos1] | ||||
Augie Fackler
|
r43347 | if b'\0' in f: | ||
f, c = f.split(b'\0') | ||||
Martin Geisler
|
r7700 | copymap[f] = c | ||
r48297 | dmap[f] = dirstatetuple(*e[:4]) | |||
Martin Geisler
|
r7700 | return parents | ||
Siddharth Agarwal
|
r18567 | |||
Augie Fackler
|
r43346 | |||
Siddharth Agarwal
|
r18567 | def pack_dirstate(dmap, copymap, pl, now): | ||
now = int(now) | ||||
timeless
|
r28861 | cs = stringio() | ||
Siddharth Agarwal
|
r18567 | write = cs.write | ||
Augie Fackler
|
r43347 | write(b"".join(pl)) | ||
Gregory Szorc
|
r43376 | for f, e in pycompat.iteritems(dmap): | ||
Augie Fackler
|
r43347 | if e[0] == b'n' and e[3] == now: | ||
Siddharth Agarwal
|
r18567 | # The file was last modified "simultaneously" with the current | ||
# write to dirstate (i.e. within the same second for file- | ||||
# systems with a granularity of 1 sec). This commonly happens | ||||
# for at least a couple of files on 'update'. | ||||
# The user could change the file without changing its size | ||||
Siddharth Agarwal
|
r19652 | # within the same second. Invalidate the file's mtime in | ||
Siddharth Agarwal
|
r18567 | # dirstate, forcing future 'status' calls to compare the | ||
Siddharth Agarwal
|
r19652 | # contents of the file if the size is the same. This prevents | ||
# mistakenly treating such files as clean. | ||||
Siddharth Agarwal
|
r21809 | e = dirstatetuple(e[0], e[1], e[2], -1) | ||
Siddharth Agarwal
|
r18567 | dmap[f] = e | ||
if f in copymap: | ||||
Augie Fackler
|
r43347 | f = b"%s\0%s" % (f, copymap[f]) | ||
r48298 | e = _pack( | |||
b">cllll", | ||||
e.v1_state(), | ||||
e.v1_mode(), | ||||
e.v1_size(), | ||||
e.v1_mtime(), | ||||
len(f), | ||||
) | ||||
Siddharth Agarwal
|
r18567 | write(e) | ||
write(f) | ||||
return cs.getvalue() | ||||