__init__.py
560 lines
| 18.6 KiB
| text/x-python
|
PythonLexer
r2 | # -*- coding: utf-8 -*- | |||
r112 | # Copyright 2010 - 2017 RhodeCode GmbH and the AppEnlight project authors | |||
r2 | # | |||
r112 | # Licensed under the Apache License, Version 2.0 (the "License"); | |||
# you may not use this file except in compliance with the License. | ||||
# You may obtain a copy of the License at | ||||
r2 | # | |||
r112 | # http://www.apache.org/licenses/LICENSE-2.0 | |||
r2 | # | |||
r112 | # Unless required by applicable law or agreed to in writing, software | |||
# distributed under the License is distributed on an "AS IS" BASIS, | ||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
# See the License for the specific language governing permissions and | ||||
# limitations under the License. | ||||
r2 | ||||
""" | ||||
Utility functions. | ||||
""" | ||||
import logging | ||||
import requests | ||||
import hashlib | ||||
import json | ||||
import copy | ||||
import uuid | ||||
import appenlight.lib.helpers as h | ||||
from collections import namedtuple | ||||
from datetime import timedelta, datetime, date | ||||
from dogpile.cache.api import NO_VALUE | ||||
from appenlight.models import Datastores | ||||
r153 | from appenlight.validators import LogSearchSchema, TagListSchema, accepted_search_params | |||
r2 | from itsdangerous import TimestampSigner | |||
from ziggurat_foundations.permissions import ALL_PERMISSIONS | ||||
r135 | from ziggurat_foundations.models.services.user import UserService | |||
r2 | from dateutil.relativedelta import relativedelta | |||
from dateutil.rrule import rrule, MONTHLY, DAILY | ||||
log = logging.getLogger(__name__) | ||||
r153 | Stat = namedtuple("Stat", "start_interval value") | |||
r2 | ||||
def default_extractor(item): | ||||
""" | ||||
:param item - item to extract date from | ||||
""" | ||||
r153 | if hasattr(item, "start_interval"): | |||
r2 | return item.start_interval | |||
r153 | return item["start_interval"] | |||
r2 | ||||
# fast gap generator | ||||
r153 | def gap_gen_default(start, step, itemiterator, end_time=None, iv_extractor=None): | |||
r2 | """ generates a list of time/value items based on step and itemiterator | |||
if there are entries missing from iterator time/None will be returned | ||||
instead | ||||
:param start - datetime - what time should we start generating our values | ||||
:param step - timedelta - stepsize | ||||
:param itemiterator - iterable - we will check this iterable for values | ||||
corresponding to generated steps | ||||
:param end_time - datetime - when last step is >= end_time stop iterating | ||||
:param iv_extractor - extracts current step from iterable items | ||||
""" | ||||
if not iv_extractor: | ||||
iv_extractor = default_extractor | ||||
next_step = start | ||||
minutes = step.total_seconds() / 60.0 | ||||
while next_step.minute % minutes != 0: | ||||
next_step = next_step.replace(minute=next_step.minute - 1) | ||||
for item in itemiterator: | ||||
item_start_interval = iv_extractor(item) | ||||
# do we have a match for current time step in our data? | ||||
# no gen a new tuple with 0 values | ||||
while next_step < item_start_interval: | ||||
yield Stat(next_step, None) | ||||
next_step = next_step + step | ||||
if next_step == item_start_interval: | ||||
yield Stat(item_start_interval, item) | ||||
next_step = next_step + step | ||||
if end_time: | ||||
while next_step < end_time: | ||||
yield Stat(next_step, None) | ||||
next_step = next_step + step | ||||
class DateTimeEncoder(json.JSONEncoder): | ||||
""" Simple datetime to ISO encoder for json serialization""" | ||||
def default(self, obj): | ||||
if isinstance(obj, date): | ||||
return obj.isoformat() | ||||
if isinstance(obj, datetime): | ||||
return obj.isoformat() | ||||
return json.JSONEncoder.default(self, obj) | ||||
r153 | def channelstream_request( | |||
secret, endpoint, payload, throw_exceptions=False, servers=None | ||||
): | ||||
r2 | responses = [] | |||
if not servers: | ||||
servers = [] | ||||
signer = TimestampSigner(secret) | ||||
sig_for_server = signer.sign(endpoint) | ||||
r153 | for secret, server in [(s["secret"], s["server"]) for s in servers]: | |||
r2 | response = {} | |||
r153 | secret_headers = { | |||
"x-channelstream-secret": sig_for_server, | ||||
"x-channelstream-endpoint": endpoint, | ||||
"Content-Type": "application/json", | ||||
} | ||||
url = "%s%s" % (server, endpoint) | ||||
r2 | try: | |||
r153 | response = requests.post( | |||
url, | ||||
data=json.dumps(payload, cls=DateTimeEncoder), | ||||
headers=secret_headers, | ||||
verify=False, | ||||
timeout=2, | ||||
).json() | ||||
r2 | except requests.exceptions.RequestException as e: | |||
if throw_exceptions: | ||||
raise | ||||
responses.append(response) | ||||
return responses | ||||
def add_cors_headers(response): | ||||
# allow CORS | ||||
r153 | response.headers.add("Access-Control-Allow-Origin", "*") | |||
response.headers.add("XDomainRequestAllowed", "1") | ||||
response.headers.add("Access-Control-Allow-Methods", "GET, POST, OPTIONS") | ||||
r2 | # response.headers.add('Access-Control-Allow-Credentials', 'true') | |||
r153 | response.headers.add( | |||
"Access-Control-Allow-Headers", | ||||
"Content-Type, Depth, User-Agent, X-File-Size, X-Requested-With, If-Modified-Since, X-File-Name, Cache-Control, Pragma, Origin, Connection, Referer, Cookie", | ||||
) | ||||
response.headers.add("Access-Control-Max-Age", "86400") | ||||
r2 | ||||
from sqlalchemy.sql import compiler | ||||
from psycopg2.extensions import adapt as sqlescape | ||||
# or use the appropiate escape function from your db driver | ||||
r153 | ||||
r2 | def compile_query(query): | |||
dialect = query.session.bind.dialect | ||||
statement = query.statement | ||||
comp = compiler.SQLCompiler(dialect, statement) | ||||
comp.compile() | ||||
enc = dialect.encoding | ||||
params = {} | ||||
for k, v in comp.params.items(): | ||||
if isinstance(v, str): | ||||
v = v.encode(enc) | ||||
params[k] = sqlescape(v) | ||||
return (comp.string.encode(enc) % params).decode(enc) | ||||
def convert_es_type(input_data): | ||||
""" | ||||
This might need to convert some text or other types to corresponding ES types | ||||
""" | ||||
return str(input_data) | ||||
r153 | ProtoVersion = namedtuple("ProtoVersion", ["major", "minor", "patch"]) | |||
r2 | ||||
def parse_proto(input_data): | ||||
try: | ||||
r153 | parts = [int(x) for x in input_data.split(".")] | |||
r2 | while len(parts) < 3: | |||
parts.append(0) | ||||
return ProtoVersion(*parts) | ||||
except Exception as e: | ||||
r153 | log.info("Unknown protocol version: %s" % e) | |||
r2 | return ProtoVersion(99, 99, 99) | |||
r153 | def es_index_name_limiter( | |||
start_date=None, end_date=None, months_in_past=6, ixtypes=None | ||||
): | ||||
r2 | """ | |||
This function limits the search to 6 months by default so we don't have to | ||||
query 300 elasticsearch indices for 20 years of historical data for example | ||||
""" | ||||
# should be cached later | ||||
def get_possible_names(): | ||||
r153 | return list(Datastores.es.indices.get_alias("*")) | |||
r2 | ||||
possible_names = get_possible_names() | ||||
es_index_types = [] | ||||
if not ixtypes: | ||||
r153 | ixtypes = ["reports", "metrics", "logs"] | |||
r2 | for t in ixtypes: | |||
r153 | if t == "reports": | |||
es_index_types.append("rcae_r_%s") | ||||
elif t == "logs": | ||||
es_index_types.append("rcae_l_%s") | ||||
elif t == "metrics": | ||||
es_index_types.append("rcae_m_%s") | ||||
elif t == "uptime": | ||||
r169 | es_index_types.append("rcae_uptime_ce_%s") | |||
r153 | elif t == "slow_calls": | |||
es_index_types.append("rcae_sc_%s") | ||||
r2 | ||||
if start_date: | ||||
start_date = copy.copy(start_date) | ||||
else: | ||||
if not end_date: | ||||
end_date = datetime.utcnow() | ||||
start_date = end_date + relativedelta(months=months_in_past * -1) | ||||
if not end_date: | ||||
end_date = start_date + relativedelta(months=months_in_past) | ||||
r153 | index_dates = list( | |||
rrule( | ||||
MONTHLY, | ||||
dtstart=start_date.date().replace(day=1), | ||||
until=end_date.date(), | ||||
count=36, | ||||
) | ||||
) | ||||
r2 | index_names = [] | |||
for ix_type in es_index_types: | ||||
r153 | to_extend = [ | |||
ix_type % d.strftime("%Y_%m") | ||||
for d in index_dates | ||||
if ix_type % d.strftime("%Y_%m") in possible_names | ||||
] | ||||
r2 | index_names.extend(to_extend) | |||
r153 | for day in list( | |||
rrule(DAILY, dtstart=start_date.date(), until=end_date.date(), count=366) | ||||
): | ||||
ix_name = ix_type % day.strftime("%Y_%m_%d") | ||||
r2 | if ix_name in possible_names: | |||
index_names.append(ix_name) | ||||
return index_names | ||||
def build_filter_settings_from_query_dict( | ||||
r153 | request, params=None, override_app_ids=None, resource_permissions=None | |||
): | ||||
r2 | """ | |||
Builds list of normalized search terms for ES from query params | ||||
ensuring application list is restricted to only applications user | ||||
has access to | ||||
:param params (dictionary) | ||||
:param override_app_ids - list of application id's to use instead of | ||||
applications user normally has access to | ||||
""" | ||||
params = copy.deepcopy(params) | ||||
applications = [] | ||||
if not resource_permissions: | ||||
r153 | resource_permissions = ["view"] | |||
r2 | ||||
if request.user: | ||||
r135 | applications = UserService.resources_with_perms( | |||
r153 | request.user, resource_permissions, resource_types=["application"] | |||
) | ||||
r2 | ||||
# CRITICAL - this ensures our resultset is limited to only the ones | ||||
# user has view permissions | ||||
all_possible_app_ids = set([app.resource_id for app in applications]) | ||||
# if override is preset we force permission for app to be present | ||||
# this allows users to see dashboards and applications they would | ||||
# normally not be able to | ||||
if override_app_ids: | ||||
all_possible_app_ids = set(override_app_ids) | ||||
schema = LogSearchSchema().bind(resources=all_possible_app_ids) | ||||
tag_schema = TagListSchema() | ||||
filter_settings = schema.deserialize(params) | ||||
tag_list = [] | ||||
for k, v in list(filter_settings.items()): | ||||
if k in accepted_search_params: | ||||
continue | ||||
r153 | tag_list.append({"name": k, "value": v, "op": "eq"}) | |||
r2 | # remove the key from filter_settings | |||
filter_settings.pop(k, None) | ||||
tags = tag_schema.deserialize(tag_list) | ||||
r153 | filter_settings["tags"] = tags | |||
r2 | return filter_settings | |||
def gen_uuid(): | ||||
return str(uuid.uuid4()) | ||||
def gen_uuid4_sha_hex(): | ||||
return hashlib.sha1(uuid.uuid4().bytes).hexdigest() | ||||
def permission_tuple_to_dict(data): | ||||
out = { | ||||
"user_name": None, | ||||
"perm_name": data.perm_name, | ||||
"owner": data.owner, | ||||
"type": data.type, | ||||
"resource_name": None, | ||||
"resource_type": None, | ||||
"resource_id": None, | ||||
"group_name": None, | ||||
r153 | "group_id": None, | |||
r2 | } | |||
if data.user: | ||||
out["user_name"] = data.user.user_name | ||||
if data.perm_name == ALL_PERMISSIONS: | ||||
r153 | out["perm_name"] = "__all_permissions__" | |||
r2 | if data.resource: | |||
r153 | out["resource_name"] = data.resource.resource_name | |||
out["resource_type"] = data.resource.resource_type | ||||
out["resource_id"] = data.resource.resource_id | ||||
r2 | if data.group: | |||
r153 | out["group_name"] = data.group.group_name | |||
out["group_id"] = data.group.id | ||||
r2 | return out | |||
r153 | def get_cached_buckets( | |||
request, | ||||
stats_since, | ||||
end_time, | ||||
fn, | ||||
cache_key, | ||||
gap_gen=None, | ||||
db_session=None, | ||||
step_interval=None, | ||||
iv_extractor=None, | ||||
rerange=False, | ||||
*args, | ||||
**kwargs | ||||
): | ||||
r2 | """ Takes "fn" that should return some data and tries to load the data | |||
dividing it into daily buckets - if the stats_since and end time give a | ||||
delta bigger than 24hours, then only "todays" data is computed on the fly | ||||
:param request: (request) request object | ||||
:param stats_since: (datetime) start date of buckets range | ||||
:param end_time: (datetime) end date of buckets range - utcnow() if None | ||||
:param fn: (callable) callable to use to populate buckets should have | ||||
following signature: | ||||
def get_data(request, since_when, until, *args, **kwargs): | ||||
:param cache_key: (string) cache key that will be used to build bucket | ||||
caches | ||||
:param gap_gen: (callable) gap generator - should return step intervals | ||||
to use with out `fn` callable | ||||
:param db_session: (Session) sqlalchemy session | ||||
:param step_interval: (timedelta) optional step interval if we want to | ||||
override the default determined from total start/end time delta | ||||
:param iv_extractor: (callable) used to get step intervals from data | ||||
returned by `fn` callable | ||||
:param rerange: (bool) handy if we want to change ranges from hours to | ||||
days when cached data is missing - will shorten execution time if `fn` | ||||
callable supports that and we are working with multiple rows - like metrics | ||||
:param args: | ||||
:param kwargs: | ||||
:return: iterable | ||||
""" | ||||
if not end_time: | ||||
end_time = datetime.utcnow().replace(second=0, microsecond=0) | ||||
delta = end_time - stats_since | ||||
# if smaller than 3 days we want to group by 5min else by 1h, | ||||
# for 60 min group by min | ||||
if not gap_gen: | ||||
gap_gen = gap_gen_default | ||||
if not iv_extractor: | ||||
iv_extractor = default_extractor | ||||
# do not use custom interval if total time range with new iv would exceed | ||||
# end time | ||||
if not step_interval or stats_since + step_interval >= end_time: | ||||
r153 | if delta < h.time_deltas.get("12h")["delta"]: | |||
r2 | step_interval = timedelta(seconds=60) | |||
r153 | elif delta < h.time_deltas.get("3d")["delta"]: | |||
r2 | step_interval = timedelta(seconds=60 * 5) | |||
r153 | elif delta > h.time_deltas.get("2w")["delta"]: | |||
r2 | step_interval = timedelta(days=1) | |||
else: | ||||
step_interval = timedelta(minutes=60) | ||||
if step_interval >= timedelta(minutes=60): | ||||
r153 | log.info( | |||
"cached_buckets:{}: adjusting start time " | ||||
"for hourly or daily intervals".format(cache_key) | ||||
) | ||||
r2 | stats_since = stats_since.replace(hour=0, minute=0) | |||
r153 | ranges = [ | |||
i.start_interval | ||||
for i in list(gap_gen(stats_since, step_interval, [], end_time=end_time)) | ||||
] | ||||
r2 | buckets = {} | |||
r153 | storage_key = "buckets:" + cache_key + "{}|{}" | |||
r2 | # this means we basicly cache per hour in 3-14 day intervals but i think | |||
# its fine at this point - will be faster than db access anyways | ||||
if len(ranges) >= 1: | ||||
last_ranges = [ranges[-1]] | ||||
else: | ||||
last_ranges = [] | ||||
if step_interval >= timedelta(minutes=60): | ||||
for r in ranges: | ||||
k = storage_key.format(step_interval.total_seconds(), r) | ||||
value = request.registry.cache_regions.redis_day_30.get(k) | ||||
# last buckets are never loaded from cache | ||||
r153 | is_last_result = r >= end_time - timedelta(hours=6) or r in last_ranges | |||
r2 | if value is not NO_VALUE and not is_last_result: | |||
r153 | log.info( | |||
"cached_buckets:{}: " | ||||
"loading range {} from cache".format(cache_key, r) | ||||
) | ||||
r2 | buckets[r] = value | |||
else: | ||||
r153 | log.info( | |||
"cached_buckets:{}: " | ||||
"loading range {} from storage".format(cache_key, r) | ||||
) | ||||
r2 | range_size = step_interval | |||
r153 | if ( | |||
step_interval == timedelta(minutes=60) | ||||
and not is_last_result | ||||
and rerange | ||||
): | ||||
r2 | range_size = timedelta(days=1) | |||
r = r.replace(hour=0, minute=0) | ||||
r153 | log.info( | |||
"cached_buckets:{}: " | ||||
"loading collapsed " | ||||
"range {} {}".format(cache_key, r, r + range_size) | ||||
) | ||||
r2 | bucket_data = fn( | |||
r153 | request, | |||
r, | ||||
r + range_size, | ||||
step_interval, | ||||
gap_gen, | ||||
bucket_count=len(ranges), | ||||
*args, | ||||
**kwargs | ||||
) | ||||
r2 | for b in bucket_data: | |||
b_iv = iv_extractor(b) | ||||
buckets[b_iv] = b | ||||
r153 | k2 = storage_key.format(step_interval.total_seconds(), b_iv) | |||
r2 | request.registry.cache_regions.redis_day_30.set(k2, b) | |||
log.info("cached_buckets:{}: saving cache".format(cache_key)) | ||||
else: | ||||
# bucket count is 1 for short time ranges <= 24h from now | ||||
r153 | bucket_data = fn( | |||
request, | ||||
stats_since, | ||||
end_time, | ||||
step_interval, | ||||
gap_gen, | ||||
bucket_count=1, | ||||
*args, | ||||
**kwargs | ||||
) | ||||
r2 | for b in bucket_data: | |||
buckets[iv_extractor(b)] = b | ||||
return buckets | ||||
r153 | def get_cached_split_data( | |||
request, stats_since, end_time, fn, cache_key, db_session=None, *args, **kwargs | ||||
): | ||||
r2 | """ Takes "fn" that should return some data and tries to load the data | |||
dividing it into 2 buckets - cached "since_from" bucket and "today" | ||||
bucket - then the data can be reduced into single value | ||||
Data is cached if the stats_since and end time give a delta bigger | ||||
than 24hours - then only 24h is computed on the fly | ||||
""" | ||||
if not end_time: | ||||
end_time = datetime.utcnow().replace(second=0, microsecond=0) | ||||
delta = end_time - stats_since | ||||
if delta >= timedelta(minutes=60): | ||||
r153 | log.info( | |||
"cached_split_data:{}: adjusting start time " | ||||
"for hourly or daily intervals".format(cache_key) | ||||
) | ||||
r2 | stats_since = stats_since.replace(hour=0, minute=0) | |||
r153 | storage_key = "buckets_split_data:" + cache_key + ":{}|{}" | |||
r2 | old_end_time = end_time.replace(hour=0, minute=0) | |||
r153 | final_storage_key = storage_key.format(delta.total_seconds(), old_end_time) | |||
r2 | older_data = None | |||
r153 | cdata = request.registry.cache_regions.redis_day_7.get(final_storage_key) | |||
r2 | ||||
if cdata: | ||||
r153 | log.info("cached_split_data:{}: found old " "bucket data".format(cache_key)) | |||
r2 | older_data = cdata | |||
r153 | if stats_since < end_time - h.time_deltas.get("24h")["delta"] and not cdata: | |||
log.info( | ||||
"cached_split_data:{}: didn't find the " | ||||
"start bucket in cache so load older data".format(cache_key) | ||||
) | ||||
r2 | recent_stats_since = old_end_time | |||
r153 | older_data = fn( | |||
request, | ||||
stats_since, | ||||
recent_stats_since, | ||||
db_session=db_session, | ||||
*args, | ||||
**kwargs | ||||
) | ||||
request.registry.cache_regions.redis_day_7.set(final_storage_key, older_data) | ||||
elif stats_since < end_time - h.time_deltas.get("24h")["delta"]: | ||||
r2 | recent_stats_since = old_end_time | |||
else: | ||||
recent_stats_since = stats_since | ||||
r153 | log.info( | |||
"cached_split_data:{}: loading fresh " | ||||
"data bucksts from last 24h ".format(cache_key) | ||||
) | ||||
todays_data = fn( | ||||
request, recent_stats_since, end_time, db_session=db_session, *args, **kwargs | ||||
) | ||||
r2 | return older_data, todays_data | |||
def in_batches(seq, size): | ||||
""" | ||||
Splits am iterable into batches of specified size | ||||
:param seq (iterable) | ||||
:param size integer | ||||
""" | ||||
r153 | return (seq[pos : pos + size] for pos in range(0, len(seq), size)) | |||
r159 | ||||
def get_es_info(cache_regions, es_conn): | ||||
@cache_regions.memory_min_10.cache_on_arguments() | ||||
def get_es_info_cached(): | ||||
returned_info = {"raw_info": es_conn.info()} | ||||
r175 | returned_info["version"] = returned_info["raw_info"]["version"]["number"].split( | |||
"." | ||||
) | ||||
r159 | return returned_info | |||
return get_es_info_cached() | ||||