admin.py
203 lines
| 6.5 KiB
| text/x-python
|
PythonLexer
r0 | # -*- coding: utf-8 -*- | |||
r112 | # Copyright 2010 - 2017 RhodeCode GmbH and the AppEnlight project authors | |||
r0 | # | |||
r112 | # Licensed under the Apache License, Version 2.0 (the "License"); | |||
# you may not use this file except in compliance with the License. | ||||
# You may obtain a copy of the License at | ||||
r0 | # | |||
r112 | # http://www.apache.org/licenses/LICENSE-2.0 | |||
r0 | # | |||
r112 | # Unless required by applicable law or agreed to in writing, software | |||
# distributed under the License is distributed on an "AS IS" BASIS, | ||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
# See the License for the specific language governing permissions and | ||||
# limitations under the License. | ||||
r0 | ||||
import logging | ||||
import os | ||||
import pkg_resources | ||||
from datetime import datetime, timedelta | ||||
import psutil | ||||
import redis | ||||
from pyramid.view import view_config | ||||
from appenlight.models import DBSession | ||||
from appenlight.models import Datastores | ||||
from appenlight.lib.redis_keys import REDIS_KEYS | ||||
def bytes2human(total): | ||||
giga = 1024.0 ** 3 | ||||
mega = 1024.0 ** 2 | ||||
kilo = 1024.0 | ||||
if giga <= total: | ||||
r153 | return "{:0.1f}G".format(total / giga) | |||
r0 | elif mega <= total: | |||
r153 | return "{:0.1f}M".format(total / mega) | |||
r0 | else: | |||
r153 | return "{:0.1f}K".format(total / kilo) | |||
r0 | ||||
log = logging.getLogger(__name__) | ||||
r153 | @view_config( | |||
route_name="section_view", | ||||
match_param=["section=admin_section", "view=system"], | ||||
renderer="json", | ||||
permission="root_administration", | ||||
) | ||||
r0 | def system(request): | |||
r153 | current_time = datetime.utcnow().replace(second=0, microsecond=0) - timedelta( | |||
minutes=1 | ||||
) | ||||
r0 | # global app counter | |||
r87 | processed_reports = request.registry.redis_conn.get( | |||
r153 | REDIS_KEYS["counters"]["reports_per_minute"].format(current_time) | |||
) | ||||
r0 | processed_reports = int(processed_reports) if processed_reports else 0 | |||
r87 | processed_logs = request.registry.redis_conn.get( | |||
r153 | REDIS_KEYS["counters"]["logs_per_minute"].format(current_time) | |||
) | ||||
r0 | processed_logs = int(processed_logs) if processed_logs else 0 | |||
r87 | processed_metrics = request.registry.redis_conn.get( | |||
r153 | REDIS_KEYS["counters"]["metrics_per_minute"].format(current_time) | |||
) | ||||
r0 | processed_metrics = int(processed_metrics) if processed_metrics else 0 | |||
waiting_reports = 0 | ||||
waiting_logs = 0 | ||||
waiting_metrics = 0 | ||||
waiting_other = 0 | ||||
r153 | if "redis" in request.registry.settings["celery.broker_type"]: | |||
r0 | redis_client = redis.StrictRedis.from_url( | |||
r153 | request.registry.settings["celery.broker_url"] | |||
) | ||||
waiting_reports = redis_client.llen("reports") | ||||
waiting_logs = redis_client.llen("logs") | ||||
waiting_metrics = redis_client.llen("metrics") | ||||
waiting_other = redis_client.llen("default") | ||||
r0 | ||||
# process | ||||
def replace_inf(val): | ||||
r153 | return val if val != psutil.RLIM_INFINITY else "unlimited" | |||
r0 | ||||
p = psutil.Process() | ||||
fd = p.rlimit(psutil.RLIMIT_NOFILE) | ||||
memlock = p.rlimit(psutil.RLIMIT_MEMLOCK) | ||||
self_info = { | ||||
r153 | "fds": {"soft": replace_inf(fd[0]), "hard": replace_inf(fd[1])}, | |||
"memlock": {"soft": replace_inf(memlock[0]), "hard": replace_inf(memlock[1])}, | ||||
r0 | } | |||
# disks | ||||
disks = [] | ||||
for part in psutil.disk_partitions(all=False): | ||||
r153 | if os.name == "nt": | |||
if "cdrom" in part.opts or part.fstype == "": | ||||
r0 | continue | |||
usage = psutil.disk_usage(part.mountpoint) | ||||
r153 | disks.append( | |||
{ | ||||
"device": part.device, | ||||
"total": bytes2human(usage.total), | ||||
"used": bytes2human(usage.used), | ||||
"free": bytes2human(usage.free), | ||||
"percentage": int(usage.percent), | ||||
"mountpoint": part.mountpoint, | ||||
"fstype": part.fstype, | ||||
} | ||||
) | ||||
r0 | ||||
# memory | ||||
memory_v = psutil.virtual_memory() | ||||
memory_s = psutil.swap_memory() | ||||
memory = { | ||||
r153 | "total": bytes2human(memory_v.total), | |||
"available": bytes2human(memory_v.available), | ||||
"percentage": memory_v.percent, | ||||
"used": bytes2human(memory_v.used), | ||||
"free": bytes2human(memory_v.free), | ||||
"active": bytes2human(memory_v.active), | ||||
"inactive": bytes2human(memory_v.inactive), | ||||
"buffers": bytes2human(memory_v.buffers), | ||||
"cached": bytes2human(memory_v.cached), | ||||
"swap_total": bytes2human(memory_s.total), | ||||
"swap_used": bytes2human(memory_s.used), | ||||
r0 | } | |||
# load | ||||
system_load = os.getloadavg() | ||||
# processes | ||||
min_mem = 1024 * 1024 * 40 # 40MB | ||||
process_info = [] | ||||
for p in psutil.process_iter(): | ||||
r152 | mem_used = p.memory_info().rss | |||
r0 | if mem_used < min_mem: | |||
continue | ||||
r153 | process_info.append( | |||
{ | ||||
"owner": p.username(), | ||||
"pid": p.pid, | ||||
"cpu": round(p.cpu_percent(interval=0), 1), | ||||
"mem_percentage": round(p.memory_percent(), 1), | ||||
"mem_usage": bytes2human(mem_used), | ||||
"name": p.name(), | ||||
"command": " ".join(p.cmdline()), | ||||
} | ||||
) | ||||
process_info = sorted(process_info, key=lambda x: x["mem_percentage"], reverse=True) | ||||
r0 | ||||
# pg tables | ||||
r153 | db_size_query = """ | |||
r0 | SELECT tablename, pg_total_relation_size(tablename::text) size | |||
FROM pg_tables WHERE tablename NOT LIKE 'pg_%' AND | ||||
r153 | tablename NOT LIKE 'sql_%' ORDER BY size DESC;""" | |||
r0 | ||||
db_tables = [] | ||||
for row in DBSession.execute(db_size_query): | ||||
r153 | db_tables.append( | |||
{"size_human": bytes2human(row.size), "table_name": row.tablename} | ||||
) | ||||
r0 | ||||
# es indices | ||||
es_indices = [] | ||||
r153 | result = Datastores.es.indices.stats(metric=["store, docs"]) | |||
for ix, stats in result["indices"].items(): | ||||
size = stats["primaries"]["store"]["size_in_bytes"] | ||||
es_indices.append({"name": ix, "size": size, "size_human": bytes2human(size)}) | ||||
r0 | ||||
# packages | ||||
r153 | packages = ( | |||
{"name": p.project_name, "version": p.version} | ||||
for p in pkg_resources.working_set | ||||
) | ||||
return { | ||||
"db_tables": db_tables, | ||||
"es_indices": sorted(es_indices, key=lambda x: x["size"], reverse=True), | ||||
"process_info": process_info, | ||||
"system_load": system_load, | ||||
"disks": disks, | ||||
"memory": memory, | ||||
"packages": sorted(packages, key=lambda x: x["name"].lower()), | ||||
"current_time": current_time, | ||||
"queue_stats": { | ||||
"processed_reports": processed_reports, | ||||
"processed_logs": processed_logs, | ||||
"processed_metrics": processed_metrics, | ||||
"waiting_reports": waiting_reports, | ||||
"waiting_logs": waiting_logs, | ||||
"waiting_metrics": waiting_metrics, | ||||
"waiting_other": waiting_other, | ||||
}, | ||||
"self_info": self_info, | ||||
} | ||||