##// END OF EJS Templates
revset: inline spanset containment check (fix perf regression)...
revset: inline spanset containment check (fix perf regression) Calling a function is super expensive in python. We inline the trivial range comparison to get back to more sensible performance on common revset operation. Benchmark result below: Revision mapping: 0) 3f83fc5cfe71 2.9.2 release 1) bcfd44abad93 current @ 2) This revision revset #0: public() 0) wall 0.010890 comb 0.010000 user 0.010000 sys 0.000000 (best of 201) 1) wall 0.012109 comb 0.010000 user 0.010000 sys 0.000000 (best of 199) 2) wall 0.012211 comb 0.020000 user 0.020000 sys 0.000000 (best of 197) revset #1: :10000 and public() 0) wall 0.007141 comb 0.010000 user 0.010000 sys 0.000000 (best of 361) 1) wall 0.014139 comb 0.010000 user 0.010000 sys 0.000000 (best of 186) 2) wall 0.008334 comb 0.010000 user 0.010000 sys 0.000000 (best of 308) revset #2: draft() 0) wall 0.009610 comb 0.010000 user 0.010000 sys 0.000000 (best of 279) 1) wall 0.010942 comb 0.010000 user 0.010000 sys 0.000000 (best of 243) 2) wall 0.011036 comb 0.010000 user 0.010000 sys 0.000000 (best of 239) revset #3: :10000 and draft() 0) wall 0.006852 comb 0.010000 user 0.010000 sys 0.000000 (best of 383) 1) wall 0.014641 comb 0.010000 user 0.010000 sys 0.000000 (best of 183) 2) wall 0.008314 comb 0.010000 user 0.010000 sys 0.000000 (best of 299) We can see this changeset gains back the regression for `and` operation on spanset. We are still a bit slowerfor the `public()` and `draft()`. Predicates not touched by this changeset.

File last commit:

r20034:1e5b38a9 default
r21204:1d7a2771 stable
Show More
worker.py
158 lines | 4.4 KiB | text/x-python | PythonLexer
Bryan O'Sullivan
worker: count the number of CPUs...
r18635 # worker.py - master-slave parallelism support
#
# Copyright 2013 Facebook, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
Bryan O'Sullivan
worker: estimate whether it's worth running a task in parallel...
r18636 from i18n import _
Augie Fackler
cleanup: move stdlib imports to their own import statement...
r20034 import errno, os, signal, sys, threading
import util
Bryan O'Sullivan
worker: count the number of CPUs...
r18635
def countcpus():
'''try to count the number of CPUs on the system'''
# posix
try:
n = int(os.sysconf('SC_NPROCESSORS_ONLN'))
if n > 0:
return n
except (AttributeError, ValueError):
pass
# windows
try:
n = int(os.environ['NUMBER_OF_PROCESSORS'])
if n > 0:
return n
except (KeyError, ValueError):
pass
return 1
Bryan O'Sullivan
worker: estimate whether it's worth running a task in parallel...
r18636
def _numworkers(ui):
s = ui.config('worker', 'numcpus')
if s:
try:
n = int(s)
if n >= 1:
return n
except ValueError:
raise util.Abort(_('number of cpus must be an integer'))
return min(max(countcpus(), 4), 32)
if os.name == 'posix':
_startupcost = 0.01
else:
_startupcost = 1e30
def worthwhile(ui, costperop, nops):
'''try to determine whether the benefit of multiple processes can
outweigh the cost of starting them'''
linear = costperop * nops
workers = _numworkers(ui)
benefit = linear - (_startupcost * workers + linear / workers)
return benefit >= 0.15
Bryan O'Sullivan
worker: partition a list (of tasks) into equal-sized chunks
r18637
Bryan O'Sullivan
worker: allow a function to be run in multiple worker processes...
r18638 def worker(ui, costperarg, func, staticargs, args):
'''run a function, possibly in parallel in multiple worker
processes.
returns a progress iterator
costperarg - cost of a single task
func - function to run
staticargs - arguments to pass to every invocation of the function
args - arguments to split into chunks, to pass to individual
workers
'''
if worthwhile(ui, costperarg, len(args)):
return _platformworker(ui, func, staticargs, args)
return func(*staticargs + (args,))
def _posixworker(ui, func, staticargs, args):
rfd, wfd = os.pipe()
workers = _numworkers(ui)
Bryan O'Sullivan
worker: fix a race in SIGINT handling...
r18708 oldhandler = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, signal.SIG_IGN)
Bryan O'Sullivan
worker: handle worker failures more aggressively...
r18709 pids, problem = [], [0]
Bryan O'Sullivan
worker: allow a function to be run in multiple worker processes...
r18638 for pargs in partition(args, workers):
pid = os.fork()
if pid == 0:
Bryan O'Sullivan
worker: fix a race in SIGINT handling...
r18708 signal.signal(signal.SIGINT, oldhandler)
Bryan O'Sullivan
worker: allow a function to be run in multiple worker processes...
r18638 try:
os.close(rfd)
for i, item in func(*(staticargs + (pargs,))):
os.write(wfd, '%d %s\n' % (i, item))
os._exit(0)
except KeyboardInterrupt:
os._exit(255)
Matt Mackall
worker: properly report errors from worker processes (issue3982)
r19408 # other exceptions are allowed to propagate, we rely
# on lock.py's pid checks to avoid release callbacks
Bryan O'Sullivan
worker: handle worker failures more aggressively...
r18709 pids.append(pid)
pids.reverse()
Bryan O'Sullivan
worker: allow a function to be run in multiple worker processes...
r18638 os.close(wfd)
fp = os.fdopen(rfd, 'rb', 0)
Bryan O'Sullivan
worker: handle worker failures more aggressively...
r18709 def killworkers():
# if one worker bails, there's no good reason to wait for the rest
for p in pids:
try:
os.kill(p, signal.SIGTERM)
except OSError, err:
if err.errno != errno.ESRCH:
raise
def waitforworkers():
for _ in pids:
st = _exitstatus(os.wait()[1])
Matt Mackall
worker: check problem state correctly (issue3982)...
r19406 if st and not problem[0]:
Bryan O'Sullivan
worker: handle worker failures more aggressively...
r18709 problem[0] = st
killworkers()
t = threading.Thread(target=waitforworkers)
t.start()
Bryan O'Sullivan
worker: allow a function to be run in multiple worker processes...
r18638 def cleanup():
signal.signal(signal.SIGINT, oldhandler)
Bryan O'Sullivan
worker: handle worker failures more aggressively...
r18709 t.join()
status = problem[0]
if status:
if status < 0:
os.kill(os.getpid(), -status)
sys.exit(status)
Bryan O'Sullivan
worker: allow a function to be run in multiple worker processes...
r18638 try:
for line in fp:
l = line.split(' ', 1)
yield int(l[0]), l[1][:-1]
except: # re-raises
Bryan O'Sullivan
worker: handle worker failures more aggressively...
r18709 killworkers()
Bryan O'Sullivan
worker: allow a function to be run in multiple worker processes...
r18638 cleanup()
raise
cleanup()
Bryan O'Sullivan
worker: on error, exit similarly to the first failing worker...
r18707 def _posixexitstatus(code):
'''convert a posix exit status into the same form returned by
os.spawnv
returns None if the process was stopped instead of exiting'''
if os.WIFEXITED(code):
return os.WEXITSTATUS(code)
elif os.WIFSIGNALED(code):
return -os.WTERMSIG(code)
Bryan O'Sullivan
worker: allow a function to be run in multiple worker processes...
r18638 if os.name != 'nt':
_platformworker = _posixworker
Bryan O'Sullivan
worker: on error, exit similarly to the first failing worker...
r18707 _exitstatus = _posixexitstatus
Bryan O'Sullivan
worker: allow a function to be run in multiple worker processes...
r18638
Bryan O'Sullivan
worker: partition a list (of tasks) into equal-sized chunks
r18637 def partition(lst, nslices):
'''partition a list into N slices of equal size'''
n = len(lst)
chunk, slop = n / nslices, n % nslices
end = 0
for i in xrange(nslices):
start = end
end = start + chunk
if slop:
end += 1
slop -= 1
yield lst[start:end]