##// END OF EJS Templates
localrepo: resolve store and cachevfs in makelocalrepository()...
localrepo: resolve store and cachevfs in makelocalrepository() This is mostly a code move and refactor. One change is that we now explicitly look for requirements indicating a share is being used rather than blindly try to read from .hg/sharedpath. Requirements *should* be all that is necessary to dictate high-level behavior and I'm not sure why the previous code was doing what it was. The previous code has been in place since 87d1fd40f57e (authored in 2009). And the commit immediately after that (971e38a9344b) introduced ``hg.share()`` and always wrote the ``shared`` requirement. And as far as I can tell, every revision of ``hg.share()`` since has written either the ``shared`` or ``relshared`` requirement. So I'm pretty sure we don't need to maintain BC by always looking for and honoring the ``.hg/sharedpath`` file even if a requirement isn't present. .. bc:: A repository will no longer use shared storage if it has a ``.hg/sharedpath`` file but no entry in ``.hg/requires`` saying it is shared. This change should not have any end-user impact, as all shared repos should have a ``.hg/requires`` file indicating this. Differential Revision: https://phab.mercurial-scm.org/D4573

File last commit:

r34648:dacfcdd8 default
r39733:98ca9078 default
Show More
osutil.py
102 lines | 3.5 KiB | text/x-python | PythonLexer
# osutil.py - CFFI version of osutil.c
#
# Copyright 2016 Maciej Fijalkowski <fijall@gmail.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import os
import stat as statmod
from ..pure.osutil import *
from .. import (
pycompat,
)
if pycompat.isdarwin:
from . import _osutil
ffi = _osutil.ffi
lib = _osutil.lib
listdir_batch_size = 4096
# tweakable number, only affects performance, which chunks
# of bytes do we get back from getattrlistbulk
attrkinds = [None] * 20 # we need the max no for enum VXXX, 20 is plenty
attrkinds[lib.VREG] = statmod.S_IFREG
attrkinds[lib.VDIR] = statmod.S_IFDIR
attrkinds[lib.VLNK] = statmod.S_IFLNK
attrkinds[lib.VBLK] = statmod.S_IFBLK
attrkinds[lib.VCHR] = statmod.S_IFCHR
attrkinds[lib.VFIFO] = statmod.S_IFIFO
attrkinds[lib.VSOCK] = statmod.S_IFSOCK
class stat_res(object):
def __init__(self, st_mode, st_mtime, st_size):
self.st_mode = st_mode
self.st_mtime = st_mtime
self.st_size = st_size
tv_sec_ofs = ffi.offsetof("struct timespec", "tv_sec")
buf = ffi.new("char[]", listdir_batch_size)
def listdirinternal(dfd, req, stat, skip):
ret = []
while True:
r = lib.getattrlistbulk(dfd, req, buf, listdir_batch_size, 0)
if r == 0:
break
if r == -1:
raise OSError(ffi.errno, os.strerror(ffi.errno))
cur = ffi.cast("val_attrs_t*", buf)
for i in range(r):
lgt = cur.length
assert lgt == ffi.cast('uint32_t*', cur)[0]
ofs = cur.name_info.attr_dataoffset
str_lgt = cur.name_info.attr_length
base_ofs = ffi.offsetof('val_attrs_t', 'name_info')
name = str(ffi.buffer(ffi.cast("char*", cur) + base_ofs + ofs,
str_lgt - 1))
tp = attrkinds[cur.obj_type]
if name == "." or name == "..":
continue
if skip == name and tp == statmod.S_ISDIR:
return []
if stat:
mtime = cur.mtime.tv_sec
mode = (cur.accessmask & ~lib.S_IFMT)| tp
ret.append((name, tp, stat_res(st_mode=mode, st_mtime=mtime,
st_size=cur.datalength)))
else:
ret.append((name, tp))
cur = ffi.cast("val_attrs_t*", int(ffi.cast("intptr_t", cur))
+ lgt)
return ret
def listdir(path, stat=False, skip=None):
req = ffi.new("struct attrlist*")
req.bitmapcount = lib.ATTR_BIT_MAP_COUNT
req.commonattr = (lib.ATTR_CMN_RETURNED_ATTRS |
lib.ATTR_CMN_NAME |
lib.ATTR_CMN_OBJTYPE |
lib.ATTR_CMN_ACCESSMASK |
lib.ATTR_CMN_MODTIME)
req.fileattr = lib.ATTR_FILE_DATALENGTH
dfd = lib.open(path, lib.O_RDONLY, 0)
if dfd == -1:
raise OSError(ffi.errno, os.strerror(ffi.errno))
try:
ret = listdirinternal(dfd, req, stat, skip)
finally:
try:
lib.close(dfd)
except BaseException:
pass # we ignore all the errors from closing, not
# much we can do about that
return ret