##// END OF EJS Templates
sqlitestore: support for storing revisions without their parents...
sqlitestore: support for storing revisions without their parents This commit kinda/sorta implements the equivalent of ellipsis nodes for the SQLite storage backend. Without implementing full blown ellipsis nodes (and the necessary support for them in the wire protocol), we instead teach the store to rewrite the p1 and p2 nodes to nullid when the incoming parent isn't in the local store. This allows servers to remain dumb and send the real parent and have the clients deal with the missing parent problem. This obviously isn't ideal because a benefit of ellipsis nodes is we can insert a fake parent to ellide missing changesets. But neither solution is ideal because it drops the original parent from storage. We could probably teach the SQLite store to retain the original parent and handle missing parents at read time. However, parent revisions are stored as integers and it isn't trivial to store an "empty" revision in the store yet, which would be necessary to represent the "missing" parent. The store is somewhat intelligent in trying to remove the missing parents metadata when the revision is re-added. But, revision numbers will be all messed up in that case, so I'm not sure it is worth it. At some point we'll likely want to remove the concept of revision numbers from the database and have the store invent them at index generation time. Or even better, we can do away with revision numbers from the file storage interface completely. We'll get there eventually... Differential Revision: https://phab.mercurial-scm.org/D5168

File last commit:

r34648:dacfcdd8 default
r40428:595641bd default
Show More
osutil.py
102 lines | 3.5 KiB | text/x-python | PythonLexer
# osutil.py - CFFI version of osutil.c
#
# Copyright 2016 Maciej Fijalkowski <fijall@gmail.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import os
import stat as statmod
from ..pure.osutil import *
from .. import (
pycompat,
)
if pycompat.isdarwin:
from . import _osutil
ffi = _osutil.ffi
lib = _osutil.lib
listdir_batch_size = 4096
# tweakable number, only affects performance, which chunks
# of bytes do we get back from getattrlistbulk
attrkinds = [None] * 20 # we need the max no for enum VXXX, 20 is plenty
attrkinds[lib.VREG] = statmod.S_IFREG
attrkinds[lib.VDIR] = statmod.S_IFDIR
attrkinds[lib.VLNK] = statmod.S_IFLNK
attrkinds[lib.VBLK] = statmod.S_IFBLK
attrkinds[lib.VCHR] = statmod.S_IFCHR
attrkinds[lib.VFIFO] = statmod.S_IFIFO
attrkinds[lib.VSOCK] = statmod.S_IFSOCK
class stat_res(object):
def __init__(self, st_mode, st_mtime, st_size):
self.st_mode = st_mode
self.st_mtime = st_mtime
self.st_size = st_size
tv_sec_ofs = ffi.offsetof("struct timespec", "tv_sec")
buf = ffi.new("char[]", listdir_batch_size)
def listdirinternal(dfd, req, stat, skip):
ret = []
while True:
r = lib.getattrlistbulk(dfd, req, buf, listdir_batch_size, 0)
if r == 0:
break
if r == -1:
raise OSError(ffi.errno, os.strerror(ffi.errno))
cur = ffi.cast("val_attrs_t*", buf)
for i in range(r):
lgt = cur.length
assert lgt == ffi.cast('uint32_t*', cur)[0]
ofs = cur.name_info.attr_dataoffset
str_lgt = cur.name_info.attr_length
base_ofs = ffi.offsetof('val_attrs_t', 'name_info')
name = str(ffi.buffer(ffi.cast("char*", cur) + base_ofs + ofs,
str_lgt - 1))
tp = attrkinds[cur.obj_type]
if name == "." or name == "..":
continue
if skip == name and tp == statmod.S_ISDIR:
return []
if stat:
mtime = cur.mtime.tv_sec
mode = (cur.accessmask & ~lib.S_IFMT)| tp
ret.append((name, tp, stat_res(st_mode=mode, st_mtime=mtime,
st_size=cur.datalength)))
else:
ret.append((name, tp))
cur = ffi.cast("val_attrs_t*", int(ffi.cast("intptr_t", cur))
+ lgt)
return ret
def listdir(path, stat=False, skip=None):
req = ffi.new("struct attrlist*")
req.bitmapcount = lib.ATTR_BIT_MAP_COUNT
req.commonattr = (lib.ATTR_CMN_RETURNED_ATTRS |
lib.ATTR_CMN_NAME |
lib.ATTR_CMN_OBJTYPE |
lib.ATTR_CMN_ACCESSMASK |
lib.ATTR_CMN_MODTIME)
req.fileattr = lib.ATTR_FILE_DATALENGTH
dfd = lib.open(path, lib.O_RDONLY, 0)
if dfd == -1:
raise OSError(ffi.errno, os.strerror(ffi.errno))
try:
ret = listdirinternal(dfd, req, stat, skip)
finally:
try:
lib.close(dfd)
except BaseException:
pass # we ignore all the errors from closing, not
# much we can do about that
return ret