##// END OF EJS Templates
rebase: allow in-memory merge of the working copy parent...
rebase: allow in-memory merge of the working copy parent Before this patch and when the rebase involved the working copy parent (and thus the working copy too), we would not do in-memory rebase even if requested to. The in-code comment explains that the reason had something to do with avoiding an extra update, but I don't know which update that refers to. Perhaps an earlier version of the code used to update to the destination before rebasing even if in-memory rebase was requested? That seems to not be done at least since aa660c1203a9 (rebase: do not bail on uncomitted changes if rebasing in-memory, 2017-12-07). To see if this still made it slower, I create a single tiny commit on top of one branch of the mozilla-unified repo (commit a1098c82 to be exact) and rebased it to another branch (commit d4e9a7be). Before this patch that took 11.8s and after this patch it took 8.6s (I only did two runs each, but the timings were very consistent). Differential Revision: https://phab.mercurial-scm.org/D2876

File last commit:

r34648:dacfcdd8 default
r36993:795eb53f default
Show More
osutil.py
102 lines | 3.5 KiB | text/x-python | PythonLexer
# osutil.py - CFFI version of osutil.c
#
# Copyright 2016 Maciej Fijalkowski <fijall@gmail.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import os
import stat as statmod
from ..pure.osutil import *
from .. import (
pycompat,
)
if pycompat.isdarwin:
from . import _osutil
ffi = _osutil.ffi
lib = _osutil.lib
listdir_batch_size = 4096
# tweakable number, only affects performance, which chunks
# of bytes do we get back from getattrlistbulk
attrkinds = [None] * 20 # we need the max no for enum VXXX, 20 is plenty
attrkinds[lib.VREG] = statmod.S_IFREG
attrkinds[lib.VDIR] = statmod.S_IFDIR
attrkinds[lib.VLNK] = statmod.S_IFLNK
attrkinds[lib.VBLK] = statmod.S_IFBLK
attrkinds[lib.VCHR] = statmod.S_IFCHR
attrkinds[lib.VFIFO] = statmod.S_IFIFO
attrkinds[lib.VSOCK] = statmod.S_IFSOCK
class stat_res(object):
def __init__(self, st_mode, st_mtime, st_size):
self.st_mode = st_mode
self.st_mtime = st_mtime
self.st_size = st_size
tv_sec_ofs = ffi.offsetof("struct timespec", "tv_sec")
buf = ffi.new("char[]", listdir_batch_size)
def listdirinternal(dfd, req, stat, skip):
ret = []
while True:
r = lib.getattrlistbulk(dfd, req, buf, listdir_batch_size, 0)
if r == 0:
break
if r == -1:
raise OSError(ffi.errno, os.strerror(ffi.errno))
cur = ffi.cast("val_attrs_t*", buf)
for i in range(r):
lgt = cur.length
assert lgt == ffi.cast('uint32_t*', cur)[0]
ofs = cur.name_info.attr_dataoffset
str_lgt = cur.name_info.attr_length
base_ofs = ffi.offsetof('val_attrs_t', 'name_info')
name = str(ffi.buffer(ffi.cast("char*", cur) + base_ofs + ofs,
str_lgt - 1))
tp = attrkinds[cur.obj_type]
if name == "." or name == "..":
continue
if skip == name and tp == statmod.S_ISDIR:
return []
if stat:
mtime = cur.mtime.tv_sec
mode = (cur.accessmask & ~lib.S_IFMT)| tp
ret.append((name, tp, stat_res(st_mode=mode, st_mtime=mtime,
st_size=cur.datalength)))
else:
ret.append((name, tp))
cur = ffi.cast("val_attrs_t*", int(ffi.cast("intptr_t", cur))
+ lgt)
return ret
def listdir(path, stat=False, skip=None):
req = ffi.new("struct attrlist*")
req.bitmapcount = lib.ATTR_BIT_MAP_COUNT
req.commonattr = (lib.ATTR_CMN_RETURNED_ATTRS |
lib.ATTR_CMN_NAME |
lib.ATTR_CMN_OBJTYPE |
lib.ATTR_CMN_ACCESSMASK |
lib.ATTR_CMN_MODTIME)
req.fileattr = lib.ATTR_FILE_DATALENGTH
dfd = lib.open(path, lib.O_RDONLY, 0)
if dfd == -1:
raise OSError(ffi.errno, os.strerror(ffi.errno))
try:
ret = listdirinternal(dfd, req, stat, skip)
finally:
try:
lib.close(dfd)
except BaseException:
pass # we ignore all the errors from closing, not
# much we can do about that
return ret