##// END OF EJS Templates
util: optimize cost auditing on insert...
util: optimize cost auditing on insert Calling popoldest() on insert with cost auditing enabled introduces significant overhead. The primary reason for this overhead is that popoldest() needs to walk the linked list to find the first non-empty node. When we call popoldest() within a loop, this can become quadratic. The performance impact is more pronounced on caches with large capacities. This commit effectively inlines the popoldest() call into _enforcecostlimit(). By doing so, we only do the backwards walk to find the first empty node once. However, we still may still perform this work on insert when the cache is near cost capacity. So this is only a partial performance win. $ hg perflrucachedict --size 4 --gets 1000000 --sets 1000000 --mixed 1000000 --costlimit 100 ! gets w/ cost limit ! wall 0.598737 comb 0.590000 user 0.590000 sys 0.000000 (best of 17) ! inserts w/ cost limit ! wall 1.694282 comb 1.700000 user 1.700000 sys 0.000000 (best of 6) ! wall 1.659181 comb 1.650000 user 1.650000 sys 0.000000 (best of 7) ! mixed w/ cost limit ! wall 1.157655 comb 1.150000 user 1.150000 sys 0.000000 (best of 9) ! wall 1.139955 comb 1.140000 user 1.140000 sys 0.000000 (best of 9) $ hg perflrucachedict --size 1000 --gets 1000000 --sets 1000000 --mixed 1000000 --costlimit 10000 ! gets w/ cost limit ! wall 0.598526 comb 0.600000 user 0.600000 sys 0.000000 (best of 17) ! wall 0.601993 comb 0.600000 user 0.600000 sys 0.000000 (best of 17) ! inserts w/ cost limit ! wall 37.838315 comb 37.840000 user 37.840000 sys 0.000000 (best of 3) ! wall 25.105273 comb 25.080000 user 25.080000 sys 0.000000 (best of 3) ! mixed w/ cost limit ! wall 18.060198 comb 18.060000 user 18.060000 sys 0.000000 (best of 3) ! wall 12.104470 comb 12.070000 user 12.070000 sys 0.000000 (best of 3) $ hg perflrucachedict --size 1000 --gets 1000000 --sets 1000000 --mixed 1000000 --costlimit 10000 --mixedgetfreq 90 ! gets w/ cost limit ! wall 0.600024 comb 0.600000 user 0.600000 sys 0.000000 (best of 17) ! wall 0.614439 comb 0.620000 user 0.620000 sys 0.000000 (best of 17) ! inserts w/ cost limit ! wall 37.154547 comb 37.120000 user 37.120000 sys 0.000000 (best of 3) ! wall 25.963028 comb 25.960000 user 25.960000 sys 0.000000 (best of 3) ! mixed w/ cost limit ! wall 4.381602 comb 4.380000 user 4.370000 sys 0.010000 (best of 3) ! wall 3.174256 comb 3.170000 user 3.170000 sys 0.000000 (best of 4) Differential Revision: https://phab.mercurial-scm.org/D4504

File last commit:

r38763:c08ea1e2 stable
r39605:cc23c09b default
Show More
worker.py
369 lines | 13.1 KiB | text/x-python | PythonLexer
# worker.py - master-slave parallelism support
#
# Copyright 2013 Facebook, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import errno
import os
import signal
import sys
import threading
import time
try:
import selectors
selectors.BaseSelector
except ImportError:
from .thirdparty import selectors2 as selectors
from .i18n import _
from . import (
encoding,
error,
pycompat,
scmutil,
util,
)
def countcpus():
'''try to count the number of CPUs on the system'''
# posix
try:
n = int(os.sysconf(r'SC_NPROCESSORS_ONLN'))
if n > 0:
return n
except (AttributeError, ValueError):
pass
# windows
try:
n = int(encoding.environ['NUMBER_OF_PROCESSORS'])
if n > 0:
return n
except (KeyError, ValueError):
pass
return 1
def _numworkers(ui):
s = ui.config('worker', 'numcpus')
if s:
try:
n = int(s)
if n >= 1:
return n
except ValueError:
raise error.Abort(_('number of cpus must be an integer'))
return min(max(countcpus(), 4), 32)
if pycompat.isposix or pycompat.iswindows:
_STARTUP_COST = 0.01
# The Windows worker is thread based. If tasks are CPU bound, threads
# in the presence of the GIL result in excessive context switching and
# this overhead can slow down execution.
_DISALLOW_THREAD_UNSAFE = pycompat.iswindows
else:
_STARTUP_COST = 1e30
_DISALLOW_THREAD_UNSAFE = False
def worthwhile(ui, costperop, nops, threadsafe=True):
'''try to determine whether the benefit of multiple processes can
outweigh the cost of starting them'''
if not threadsafe and _DISALLOW_THREAD_UNSAFE:
return False
linear = costperop * nops
workers = _numworkers(ui)
benefit = linear - (_STARTUP_COST * workers + linear / workers)
return benefit >= 0.15
def worker(ui, costperarg, func, staticargs, args, threadsafe=True):
'''run a function, possibly in parallel in multiple worker
processes.
returns a progress iterator
costperarg - cost of a single task
func - function to run
staticargs - arguments to pass to every invocation of the function
args - arguments to split into chunks, to pass to individual
workers
threadsafe - whether work items are thread safe and can be executed using
a thread-based worker. Should be disabled for CPU heavy tasks that don't
release the GIL.
'''
enabled = ui.configbool('worker', 'enabled')
if enabled and worthwhile(ui, costperarg, len(args), threadsafe=threadsafe):
return _platformworker(ui, func, staticargs, args)
return func(*staticargs + (args,))
def _posixworker(ui, func, staticargs, args):
workers = _numworkers(ui)
oldhandler = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, signal.SIG_IGN)
pids, problem = set(), [0]
def killworkers():
# unregister SIGCHLD handler as all children will be killed. This
# function shouldn't be interrupted by another SIGCHLD; otherwise pids
# could be updated while iterating, which would cause inconsistency.
signal.signal(signal.SIGCHLD, oldchldhandler)
# if one worker bails, there's no good reason to wait for the rest
for p in pids:
try:
os.kill(p, signal.SIGTERM)
except OSError as err:
if err.errno != errno.ESRCH:
raise
def waitforworkers(blocking=True):
for pid in pids.copy():
p = st = 0
while True:
try:
p, st = os.waitpid(pid, (0 if blocking else os.WNOHANG))
break
except OSError as e:
if e.errno == errno.EINTR:
continue
elif e.errno == errno.ECHILD:
# child would already be reaped, but pids yet been
# updated (maybe interrupted just after waitpid)
pids.discard(pid)
break
else:
raise
if not p:
# skip subsequent steps, because child process should
# be still running in this case
continue
pids.discard(p)
st = _exitstatus(st)
if st and not problem[0]:
problem[0] = st
def sigchldhandler(signum, frame):
waitforworkers(blocking=False)
if problem[0]:
killworkers()
oldchldhandler = signal.signal(signal.SIGCHLD, sigchldhandler)
ui.flush()
parentpid = os.getpid()
pipes = []
for pargs in partition(args, workers):
# Every worker gets its own pipe to send results on, so we don't have to
# implement atomic writes larger than PIPE_BUF. Each forked process has
# its own pipe's descriptors in the local variables, and the parent
# process has the full list of pipe descriptors (and it doesn't really
# care what order they're in).
rfd, wfd = os.pipe()
pipes.append((rfd, wfd))
# make sure we use os._exit in all worker code paths. otherwise the
# worker may do some clean-ups which could cause surprises like
# deadlock. see sshpeer.cleanup for example.
# override error handling *before* fork. this is necessary because
# exception (signal) may arrive after fork, before "pid =" assignment
# completes, and other exception handler (dispatch.py) can lead to
# unexpected code path without os._exit.
ret = -1
try:
pid = os.fork()
if pid == 0:
signal.signal(signal.SIGINT, oldhandler)
signal.signal(signal.SIGCHLD, oldchldhandler)
def workerfunc():
for r, w in pipes[:-1]:
os.close(r)
os.close(w)
os.close(rfd)
for result in func(*(staticargs + (pargs,))):
os.write(wfd, util.pickle.dumps(result))
return 0
ret = scmutil.callcatch(ui, workerfunc)
except: # parent re-raises, child never returns
if os.getpid() == parentpid:
raise
exctype = sys.exc_info()[0]
force = not issubclass(exctype, KeyboardInterrupt)
ui.traceback(force=force)
finally:
if os.getpid() != parentpid:
try:
ui.flush()
except: # never returns, no re-raises
pass
finally:
os._exit(ret & 255)
pids.add(pid)
selector = selectors.DefaultSelector()
for rfd, wfd in pipes:
os.close(wfd)
selector.register(os.fdopen(rfd, r'rb', 0), selectors.EVENT_READ)
def cleanup():
signal.signal(signal.SIGINT, oldhandler)
waitforworkers()
signal.signal(signal.SIGCHLD, oldchldhandler)
selector.close()
status = problem[0]
if status:
if status < 0:
os.kill(os.getpid(), -status)
sys.exit(status)
try:
openpipes = len(pipes)
while openpipes > 0:
for key, events in selector.select():
try:
yield util.pickle.load(key.fileobj)
except EOFError:
selector.unregister(key.fileobj)
key.fileobj.close()
openpipes -= 1
except IOError as e:
if e.errno == errno.EINTR:
continue
raise
except: # re-raises
killworkers()
cleanup()
raise
cleanup()
def _posixexitstatus(code):
'''convert a posix exit status into the same form returned by
os.spawnv
returns None if the process was stopped instead of exiting'''
if os.WIFEXITED(code):
return os.WEXITSTATUS(code)
elif os.WIFSIGNALED(code):
return -os.WTERMSIG(code)
def _windowsworker(ui, func, staticargs, args):
class Worker(threading.Thread):
def __init__(self, taskqueue, resultqueue, func, staticargs,
group=None, target=None, name=None, verbose=None):
threading.Thread.__init__(self, group=group, target=target,
name=name, verbose=verbose)
self._taskqueue = taskqueue
self._resultqueue = resultqueue
self._func = func
self._staticargs = staticargs
self._interrupted = False
self.daemon = True
self.exception = None
def interrupt(self):
self._interrupted = True
def run(self):
try:
while not self._taskqueue.empty():
try:
args = self._taskqueue.get_nowait()
for res in self._func(*self._staticargs + (args,)):
self._resultqueue.put(res)
# threading doesn't provide a native way to
# interrupt execution. handle it manually at every
# iteration.
if self._interrupted:
return
except pycompat.queue.Empty:
break
except Exception as e:
# store the exception such that the main thread can resurface
# it as if the func was running without workers.
self.exception = e
raise
threads = []
def trykillworkers():
# Allow up to 1 second to clean worker threads nicely
cleanupend = time.time() + 1
for t in threads:
t.interrupt()
for t in threads:
remainingtime = cleanupend - time.time()
t.join(remainingtime)
if t.is_alive():
# pass over the workers joining failure. it is more
# important to surface the inital exception than the
# fact that one of workers may be processing a large
# task and does not get to handle the interruption.
ui.warn(_("failed to kill worker threads while "
"handling an exception\n"))
return
workers = _numworkers(ui)
resultqueue = pycompat.queue.Queue()
taskqueue = pycompat.queue.Queue()
# partition work to more pieces than workers to minimize the chance
# of uneven distribution of large tasks between the workers
for pargs in partition(args, workers * 20):
taskqueue.put(pargs)
for _i in range(workers):
t = Worker(taskqueue, resultqueue, func, staticargs)
threads.append(t)
t.start()
try:
while len(threads) > 0:
while not resultqueue.empty():
yield resultqueue.get()
threads[0].join(0.05)
finishedthreads = [_t for _t in threads if not _t.is_alive()]
for t in finishedthreads:
if t.exception is not None:
raise t.exception
threads.remove(t)
except (Exception, KeyboardInterrupt): # re-raises
trykillworkers()
raise
while not resultqueue.empty():
yield resultqueue.get()
if pycompat.iswindows:
_platformworker = _windowsworker
else:
_platformworker = _posixworker
_exitstatus = _posixexitstatus
def partition(lst, nslices):
'''partition a list into N slices of roughly equal size
The current strategy takes every Nth element from the input. If
we ever write workers that need to preserve grouping in input
we should consider allowing callers to specify a partition strategy.
mpm is not a fan of this partitioning strategy when files are involved.
In his words:
Single-threaded Mercurial makes a point of creating and visiting
files in a fixed order (alphabetical). When creating files in order,
a typical filesystem is likely to allocate them on nearby regions on
disk. Thus, when revisiting in the same order, locality is maximized
and various forms of OS and disk-level caching and read-ahead get a
chance to work.
This effect can be quite significant on spinning disks. I discovered it
circa Mercurial v0.4 when revlogs were named by hashes of filenames.
Tarring a repo and copying it to another disk effectively randomized
the revlog ordering on disk by sorting the revlogs by hash and suddenly
performance of my kernel checkout benchmark dropped by ~10x because the
"working set" of sectors visited no longer fit in the drive's cache and
the workload switched from streaming to random I/O.
What we should really be doing is have workers read filenames from a
ordered queue. This preserves locality and also keeps any worker from
getting more than one file out of balance.
'''
for i in range(nslices):
yield lst[i::nslices]