Auto status change to "Under Review"
Show More
@@ -1,707 +1,705 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 |
|
2 | |||
3 | # Copyright 2010 - 2017 RhodeCode GmbH and the AppEnlight project authors |
|
3 | # Copyright 2010 - 2017 RhodeCode GmbH and the AppEnlight project authors | |
4 | # |
|
4 | # | |
5 | # Licensed under the Apache License, Version 2.0 (the "License"); |
|
5 | # Licensed under the Apache License, Version 2.0 (the "License"); | |
6 | # you may not use this file except in compliance with the License. |
|
6 | # you may not use this file except in compliance with the License. | |
7 | # You may obtain a copy of the License at |
|
7 | # You may obtain a copy of the License at | |
8 | # |
|
8 | # | |
9 | # http://www.apache.org/licenses/LICENSE-2.0 |
|
9 | # http://www.apache.org/licenses/LICENSE-2.0 | |
10 | # |
|
10 | # | |
11 | # Unless required by applicable law or agreed to in writing, software |
|
11 | # Unless required by applicable law or agreed to in writing, software | |
12 | # distributed under the License is distributed on an "AS IS" BASIS, |
|
12 | # distributed under the License is distributed on an "AS IS" BASIS, | |
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
14 | # See the License for the specific language governing permissions and |
|
14 | # See the License for the specific language governing permissions and | |
15 | # limitations under the License. |
|
15 | # limitations under the License. | |
16 |
|
16 | |||
17 | import bisect |
|
17 | import bisect | |
18 | import collections |
|
18 | import collections | |
19 | import math |
|
19 | import math | |
20 | from datetime import datetime, timedelta |
|
20 | from datetime import datetime, timedelta | |
21 |
|
21 | |||
22 | import sqlalchemy as sa |
|
22 | import sqlalchemy as sa | |
23 | import elasticsearch.exceptions |
|
23 | import elasticsearch.exceptions | |
24 | import elasticsearch.helpers |
|
24 | import elasticsearch.helpers | |
25 |
|
25 | |||
26 | from celery.utils.log import get_task_logger |
|
26 | from celery.utils.log import get_task_logger | |
27 | from zope.sqlalchemy import mark_changed |
|
27 | from zope.sqlalchemy import mark_changed | |
28 | from pyramid.threadlocal import get_current_request, get_current_registry |
|
28 | from pyramid.threadlocal import get_current_request, get_current_registry | |
29 | from ziggurat_foundations.models.services.resource import ResourceService |
|
29 | from ziggurat_foundations.models.services.resource import ResourceService | |
30 |
|
30 | |||
31 | from appenlight.celery import celery |
|
31 | from appenlight.celery import celery | |
32 | from appenlight.models.report_group import ReportGroup |
|
32 | from appenlight.models.report_group import ReportGroup | |
33 | from appenlight.models import DBSession, Datastores |
|
33 | from appenlight.models import DBSession, Datastores | |
34 | from appenlight.models.report import Report |
|
34 | from appenlight.models.report import Report | |
35 | from appenlight.models.log import Log |
|
35 | from appenlight.models.log import Log | |
36 | from appenlight.models.metric import Metric |
|
36 | from appenlight.models.metric import Metric | |
37 | from appenlight.models.event import Event |
|
37 | from appenlight.models.event import Event | |
38 |
|
38 | |||
39 | from appenlight.models.services.application import ApplicationService |
|
39 | from appenlight.models.services.application import ApplicationService | |
40 | from appenlight.models.services.event import EventService |
|
40 | from appenlight.models.services.event import EventService | |
41 | from appenlight.models.services.log import LogService |
|
41 | from appenlight.models.services.log import LogService | |
42 | from appenlight.models.services.report import ReportService |
|
42 | from appenlight.models.services.report import ReportService | |
43 | from appenlight.models.services.report_group import ReportGroupService |
|
43 | from appenlight.models.services.report_group import ReportGroupService | |
44 | from appenlight.models.services.user import UserService |
|
44 | from appenlight.models.services.user import UserService | |
45 | from appenlight.models.tag import Tag |
|
45 | from appenlight.models.tag import Tag | |
46 | from appenlight.lib import print_traceback |
|
46 | from appenlight.lib import print_traceback | |
47 | from appenlight.lib.utils import parse_proto, in_batches |
|
47 | from appenlight.lib.utils import parse_proto, in_batches | |
48 | from appenlight.lib.ext_json import json |
|
48 | from appenlight.lib.ext_json import json | |
49 | from appenlight.lib.redis_keys import REDIS_KEYS |
|
49 | from appenlight.lib.redis_keys import REDIS_KEYS | |
50 | from appenlight.lib.enums import ReportType |
|
50 | from appenlight.lib.enums import ReportType | |
51 |
|
51 | |||
52 | log = get_task_logger(__name__) |
|
52 | log = get_task_logger(__name__) | |
53 |
|
53 | |||
54 | sample_boundries = ( |
|
54 | sample_boundries = ( | |
55 | list(range(100, 1000, 100)) |
|
55 | list(range(100, 1000, 100)) | |
56 | + list(range(1000, 10000, 1000)) |
|
56 | + list(range(1000, 10000, 1000)) | |
57 | + list(range(10000, 100000, 5000)) |
|
57 | + list(range(10000, 100000, 5000)) | |
58 | ) |
|
58 | ) | |
59 |
|
59 | |||
60 |
|
60 | |||
61 | def pick_sample(total_occurences, report_type=None): |
|
61 | def pick_sample(total_occurences, report_type=None): | |
62 | every = 1.0 |
|
62 | every = 1.0 | |
63 | position = bisect.bisect_left(sample_boundries, total_occurences) |
|
63 | position = bisect.bisect_left(sample_boundries, total_occurences) | |
64 | if position > 0: |
|
64 | if position > 0: | |
65 | if report_type == ReportType.not_found: |
|
65 | if report_type == ReportType.not_found: | |
66 | divide = 10.0 |
|
66 | divide = 10.0 | |
67 | else: |
|
67 | else: | |
68 | divide = 100.0 |
|
68 | divide = 100.0 | |
69 | every = sample_boundries[position - 1] / divide |
|
69 | every = sample_boundries[position - 1] / divide | |
70 | return total_occurences % every == 0 |
|
70 | return total_occurences % every == 0 | |
71 |
|
71 | |||
72 |
|
72 | |||
73 | @celery.task(queue="default", default_retry_delay=1, max_retries=2) |
|
73 | @celery.task(queue="default", default_retry_delay=1, max_retries=2) | |
74 | def test_exception_task(): |
|
74 | def test_exception_task(): | |
75 | log.error("test celery log", extra={"location": "celery"}) |
|
75 | log.error("test celery log", extra={"location": "celery"}) | |
76 | log.warning("test celery log", extra={"location": "celery"}) |
|
76 | log.warning("test celery log", extra={"location": "celery"}) | |
77 | raise Exception("Celery exception test") |
|
77 | raise Exception("Celery exception test") | |
78 |
|
78 | |||
79 |
|
79 | |||
80 | @celery.task(queue="default", default_retry_delay=1, max_retries=2) |
|
80 | @celery.task(queue="default", default_retry_delay=1, max_retries=2) | |
81 | def test_retry_exception_task(): |
|
81 | def test_retry_exception_task(): | |
82 | try: |
|
82 | try: | |
83 | import time |
|
83 | import time | |
84 |
|
84 | |||
85 | time.sleep(1.3) |
|
85 | time.sleep(1.3) | |
86 | log.error("test retry celery log", extra={"location": "celery"}) |
|
86 | log.error("test retry celery log", extra={"location": "celery"}) | |
87 | log.warning("test retry celery log", extra={"location": "celery"}) |
|
87 | log.warning("test retry celery log", extra={"location": "celery"}) | |
88 | raise Exception("Celery exception test") |
|
88 | raise Exception("Celery exception test") | |
89 | except Exception as exc: |
|
89 | except Exception as exc: | |
90 | if celery.conf["CELERY_EAGER_PROPAGATES_EXCEPTIONS"]: |
|
90 | if celery.conf["CELERY_EAGER_PROPAGATES_EXCEPTIONS"]: | |
91 | raise |
|
91 | raise | |
92 | test_retry_exception_task.retry(exc=exc) |
|
92 | test_retry_exception_task.retry(exc=exc) | |
93 |
|
93 | |||
94 |
|
94 | |||
95 | @celery.task(queue="reports", default_retry_delay=600, max_retries=144) |
|
95 | @celery.task(queue="reports", default_retry_delay=600, max_retries=144) | |
96 | def add_reports(resource_id, request_params, dataset, **kwargs): |
|
96 | def add_reports(resource_id, request_params, dataset, **kwargs): | |
97 | proto_version = parse_proto(request_params.get("protocol_version", "")) |
|
97 | proto_version = parse_proto(request_params.get("protocol_version", "")) | |
98 | current_time = datetime.utcnow().replace(second=0, microsecond=0) |
|
98 | current_time = datetime.utcnow().replace(second=0, microsecond=0) | |
99 | try: |
|
99 | try: | |
100 | # we will store solr docs here for single insert |
|
100 | # we will store solr docs here for single insert | |
101 | es_report_docs = {} |
|
101 | es_report_docs = {} | |
102 | es_report_group_docs = {} |
|
102 | es_report_group_docs = {} | |
103 | resource = ApplicationService.by_id(resource_id) |
|
103 | resource = ApplicationService.by_id(resource_id) | |
104 |
|
104 | |||
105 | tags = [] |
|
105 | tags = [] | |
106 | es_slow_calls_docs = {} |
|
106 | es_slow_calls_docs = {} | |
107 | es_reports_stats_rows = {} |
|
107 | es_reports_stats_rows = {} | |
108 | for report_data in dataset: |
|
108 | for report_data in dataset: | |
109 | # build report details for later |
|
109 | # build report details for later | |
110 | added_details = 0 |
|
110 | added_details = 0 | |
111 | report = Report() |
|
111 | report = Report() | |
112 | report.set_data(report_data, resource, proto_version) |
|
112 | report.set_data(report_data, resource, proto_version) | |
113 | report._skip_ft_index = True |
|
113 | report._skip_ft_index = True | |
114 |
|
114 | |||
115 | # find latest group in this months partition |
|
115 | # find latest group in this months partition | |
116 | report_group = ReportGroupService.by_hash_and_resource( |
|
116 | report_group = ReportGroupService.by_hash_and_resource( | |
117 | report.resource_id, |
|
117 | report.resource_id, | |
118 | report.grouping_hash, |
|
118 | report.grouping_hash, | |
119 | since_when=datetime.utcnow().date().replace(day=1), |
|
119 | since_when=datetime.utcnow().date().replace(day=1), | |
120 | ) |
|
120 | ) | |
121 | occurences = report_data.get("occurences", 1) |
|
121 | occurences = report_data.get("occurences", 1) | |
122 | if not report_group: |
|
122 | if not report_group: | |
123 | # total reports will be +1 moment later |
|
123 | # total reports will be +1 moment later | |
124 | report_group = ReportGroup( |
|
124 | report_group = ReportGroup( | |
125 | grouping_hash=report.grouping_hash, |
|
125 | grouping_hash=report.grouping_hash, | |
126 | occurences=0, |
|
126 | occurences=0, | |
127 | total_reports=0, |
|
127 | total_reports=0, | |
128 | last_report=0, |
|
128 | last_report=0, | |
129 | priority=report.priority, |
|
129 | priority=report.priority, | |
130 | error=report.error, |
|
130 | error=report.error, | |
131 | first_timestamp=report.start_time, |
|
131 | first_timestamp=report.start_time, | |
132 | ) |
|
132 | ) | |
133 | report_group._skip_ft_index = True |
|
133 | report_group._skip_ft_index = True | |
134 | report_group.report_type = report.report_type |
|
134 | report_group.report_type = report.report_type | |
135 | report.report_group_time = report_group.first_timestamp |
|
135 | report.report_group_time = report_group.first_timestamp | |
136 | add_sample = pick_sample( |
|
136 | add_sample = pick_sample( | |
137 | report_group.occurences, report_type=report_group.report_type |
|
137 | report_group.occurences, report_type=report_group.report_type | |
138 | ) |
|
138 | ) | |
139 | if add_sample: |
|
139 | if add_sample: | |
140 | resource.report_groups.append(report_group) |
|
140 | resource.report_groups.append(report_group) | |
141 | report_group.reports.append(report) |
|
141 | report_group.reports.append(report) | |
142 | added_details += 1 |
|
142 | added_details += 1 | |
143 | DBSession.flush() |
|
143 | DBSession.flush() | |
144 | if report.partition_id not in es_report_docs: |
|
144 | if report.partition_id not in es_report_docs: | |
145 | es_report_docs[report.partition_id] = [] |
|
145 | es_report_docs[report.partition_id] = [] | |
146 | es_report_docs[report.partition_id].append(report.es_doc()) |
|
146 | es_report_docs[report.partition_id].append(report.es_doc()) | |
147 | tags.extend(list(report.tags.items())) |
|
147 | tags.extend(list(report.tags.items())) | |
148 | slow_calls = report.add_slow_calls(report_data, report_group) |
|
148 | slow_calls = report.add_slow_calls(report_data, report_group) | |
149 | DBSession.flush() |
|
149 | DBSession.flush() | |
150 | for s_call in slow_calls: |
|
150 | for s_call in slow_calls: | |
151 | if s_call.partition_id not in es_slow_calls_docs: |
|
151 | if s_call.partition_id not in es_slow_calls_docs: | |
152 | es_slow_calls_docs[s_call.partition_id] = [] |
|
152 | es_slow_calls_docs[s_call.partition_id] = [] | |
153 | es_slow_calls_docs[s_call.partition_id].append(s_call.es_doc()) |
|
153 | es_slow_calls_docs[s_call.partition_id].append(s_call.es_doc()) | |
154 | # try generating new stat rows if needed |
|
154 | # try generating new stat rows if needed | |
155 | else: |
|
155 | else: | |
156 | # required for postprocessing to not fail later |
|
156 | # required for postprocessing to not fail later | |
157 | report.report_group = report_group |
|
157 | report.report_group = report_group | |
158 |
|
158 | |||
159 | stat_row = ReportService.generate_stat_rows(report, resource, report_group) |
|
159 | stat_row = ReportService.generate_stat_rows(report, resource, report_group) | |
160 | if stat_row.partition_id not in es_reports_stats_rows: |
|
160 | if stat_row.partition_id not in es_reports_stats_rows: | |
161 | es_reports_stats_rows[stat_row.partition_id] = [] |
|
161 | es_reports_stats_rows[stat_row.partition_id] = [] | |
162 | es_reports_stats_rows[stat_row.partition_id].append(stat_row.es_doc()) |
|
162 | es_reports_stats_rows[stat_row.partition_id].append(stat_row.es_doc()) | |
163 |
|
163 | |||
164 | # see if we should mark 10th occurence of report |
|
164 | # see if we should mark 10th occurence of report | |
165 | last_occurences_10 = int(math.floor(report_group.occurences / 10)) |
|
165 | last_occurences_10 = int(math.floor(report_group.occurences / 10)) | |
166 | curr_occurences_10 = int( |
|
166 | curr_occurences_10 = int( | |
167 | math.floor((report_group.occurences + report.occurences) / 10) |
|
167 | math.floor((report_group.occurences + report.occurences) / 10) | |
168 | ) |
|
168 | ) | |
169 | last_occurences_100 = int(math.floor(report_group.occurences / 100)) |
|
169 | last_occurences_100 = int(math.floor(report_group.occurences / 100)) | |
170 | curr_occurences_100 = int( |
|
170 | curr_occurences_100 = int( | |
171 | math.floor((report_group.occurences + report.occurences) / 100) |
|
171 | math.floor((report_group.occurences + report.occurences) / 100) | |
172 | ) |
|
172 | ) | |
173 | notify_occurences_10 = last_occurences_10 != curr_occurences_10 |
|
173 | notify_occurences_10 = last_occurences_10 != curr_occurences_10 | |
174 | notify_occurences_100 = last_occurences_100 != curr_occurences_100 |
|
174 | notify_occurences_100 = last_occurences_100 != curr_occurences_100 | |
175 | report_group.occurences = ReportGroup.occurences + occurences |
|
175 | report_group.occurences = ReportGroup.occurences + occurences | |
176 | report_group.last_timestamp = report.start_time |
|
176 | report_group.last_timestamp = report.start_time | |
177 | report_group.summed_duration = ReportGroup.summed_duration + report.duration |
|
177 | report_group.summed_duration = ReportGroup.summed_duration + report.duration | |
178 | summed_duration = ReportGroup.summed_duration + report.duration |
|
178 | summed_duration = ReportGroup.summed_duration + report.duration | |
179 | summed_occurences = ReportGroup.occurences + occurences |
|
179 | summed_occurences = ReportGroup.occurences + occurences | |
180 | report_group.average_duration = summed_duration / summed_occurences |
|
180 | report_group.average_duration = summed_duration / summed_occurences | |
181 | report_group.run_postprocessing(report) |
|
181 | report_group.run_postprocessing(report) | |
182 | if added_details: |
|
182 | if added_details: | |
183 | report_group.total_reports = ReportGroup.total_reports + 1 |
|
183 | report_group.total_reports = ReportGroup.total_reports + 1 | |
184 | report_group.last_report = report.id |
|
184 | report_group.last_report = report.id | |
185 | report_group.set_notification_info( |
|
185 | report_group.set_notification_info( | |
186 | notify_10=notify_occurences_10, notify_100=notify_occurences_100 |
|
186 | notify_10=notify_occurences_10, notify_100=notify_occurences_100 | |
187 | ) |
|
187 | ) | |
188 | DBSession.flush() |
|
188 | DBSession.flush() | |
189 | report_group.get_report().notify_channel(report_group) |
|
189 | report_group.get_report().notify_channel(report_group) | |
190 | if report_group.partition_id not in es_report_group_docs: |
|
190 | if report_group.partition_id not in es_report_group_docs: | |
191 | es_report_group_docs[report_group.partition_id] = [] |
|
191 | es_report_group_docs[report_group.partition_id] = [] | |
192 | es_report_group_docs[report_group.partition_id].append( |
|
192 | es_report_group_docs[report_group.partition_id].append( | |
193 | report_group.es_doc() |
|
193 | report_group.es_doc() | |
194 | ) |
|
194 | ) | |
195 |
|
195 | |||
196 | action = "REPORT" |
|
196 | action = "REPORT" | |
197 | log_msg = "%s: %s %s, client: %s, proto: %s" % ( |
|
197 | log_msg = "%s: %s %s, client: %s, proto: %s" % ( | |
198 | action, |
|
198 | action, | |
199 | report_data.get("http_status", "unknown"), |
|
199 | report_data.get("http_status", "unknown"), | |
200 | str(resource), |
|
200 | str(resource), | |
201 | report_data.get("client"), |
|
201 | report_data.get("client"), | |
202 | proto_version, |
|
202 | proto_version, | |
203 | ) |
|
203 | ) | |
204 | log.info(log_msg) |
|
204 | log.info(log_msg) | |
205 | total_reports = len(dataset) |
|
205 | total_reports = len(dataset) | |
206 | redis_pipeline = Datastores.redis.pipeline(transaction=False) |
|
206 | redis_pipeline = Datastores.redis.pipeline(transaction=False) | |
207 | key = REDIS_KEYS["counters"]["reports_per_minute"].format(current_time) |
|
207 | key = REDIS_KEYS["counters"]["reports_per_minute"].format(current_time) | |
208 | redis_pipeline.incr(key, total_reports) |
|
208 | redis_pipeline.incr(key, total_reports) | |
209 | redis_pipeline.expire(key, 3600 * 24) |
|
209 | redis_pipeline.expire(key, 3600 * 24) | |
210 | key = REDIS_KEYS["counters"]["events_per_minute_per_user"].format( |
|
210 | key = REDIS_KEYS["counters"]["events_per_minute_per_user"].format( | |
211 | resource.owner_user_id, current_time |
|
211 | resource.owner_user_id, current_time | |
212 | ) |
|
212 | ) | |
213 | redis_pipeline.incr(key, total_reports) |
|
213 | redis_pipeline.incr(key, total_reports) | |
214 | redis_pipeline.expire(key, 3600) |
|
214 | redis_pipeline.expire(key, 3600) | |
215 | key = REDIS_KEYS["counters"]["reports_per_hour_per_app"].format( |
|
215 | key = REDIS_KEYS["counters"]["reports_per_hour_per_app"].format( | |
216 | resource_id, current_time.replace(minute=0) |
|
216 | resource_id, current_time.replace(minute=0) | |
217 | ) |
|
217 | ) | |
218 | redis_pipeline.incr(key, total_reports) |
|
218 | redis_pipeline.incr(key, total_reports) | |
219 | redis_pipeline.expire(key, 3600 * 24 * 7) |
|
219 | redis_pipeline.expire(key, 3600 * 24 * 7) | |
220 | redis_pipeline.sadd( |
|
220 | redis_pipeline.sadd( | |
221 | REDIS_KEYS["apps_that_got_new_data_per_hour"].format( |
|
221 | REDIS_KEYS["apps_that_got_new_data_per_hour"].format( | |
222 | current_time.replace(minute=0) |
|
222 | current_time.replace(minute=0) | |
223 | ), |
|
223 | ), | |
224 | resource_id, |
|
224 | resource_id, | |
225 | ) |
|
225 | ) | |
226 | redis_pipeline.execute() |
|
226 | redis_pipeline.execute() | |
227 |
|
227 | |||
228 | add_reports_es(es_report_group_docs, es_report_docs) |
|
228 | add_reports_es(es_report_group_docs, es_report_docs) | |
229 | add_reports_slow_calls_es(es_slow_calls_docs) |
|
229 | add_reports_slow_calls_es(es_slow_calls_docs) | |
230 | add_reports_stats_rows_es(es_reports_stats_rows) |
|
230 | add_reports_stats_rows_es(es_reports_stats_rows) | |
231 | return True |
|
231 | return True | |
232 | except Exception as exc: |
|
232 | except Exception as exc: | |
233 | print_traceback(log) |
|
233 | print_traceback(log) | |
234 | if celery.conf["CELERY_EAGER_PROPAGATES_EXCEPTIONS"]: |
|
234 | if celery.conf["CELERY_EAGER_PROPAGATES_EXCEPTIONS"]: | |
235 | raise |
|
235 | raise | |
236 | add_reports.retry(exc=exc) |
|
236 | add_reports.retry(exc=exc) | |
237 |
|
237 | |||
238 |
|
238 | |||
239 | @celery.task(queue="es", default_retry_delay=600, max_retries=144) |
|
239 | @celery.task(queue="es", default_retry_delay=600, max_retries=144) | |
240 | def add_reports_es(report_group_docs, report_docs): |
|
240 | def add_reports_es(report_group_docs, report_docs): | |
241 | for k, v in report_group_docs.items(): |
|
241 | for k, v in report_group_docs.items(): | |
242 | to_update = {"_index": k, "_type": "report"} |
|
242 | to_update = {"_index": k, "_type": "report"} | |
243 | [i.update(to_update) for i in v] |
|
243 | [i.update(to_update) for i in v] | |
244 | elasticsearch.helpers.bulk(Datastores.es, v) |
|
244 | elasticsearch.helpers.bulk(Datastores.es, v) | |
245 | for k, v in report_docs.items(): |
|
245 | for k, v in report_docs.items(): | |
246 | to_update = {"_index": k, "_type": "report"} |
|
246 | to_update = {"_index": k, "_type": "report"} | |
247 | [i.update(to_update) for i in v] |
|
247 | [i.update(to_update) for i in v] | |
248 | elasticsearch.helpers.bulk(Datastores.es, v) |
|
248 | elasticsearch.helpers.bulk(Datastores.es, v) | |
249 |
|
249 | |||
250 |
|
250 | |||
251 | @celery.task(queue="es", default_retry_delay=600, max_retries=144) |
|
251 | @celery.task(queue="es", default_retry_delay=600, max_retries=144) | |
252 | def add_reports_slow_calls_es(es_docs): |
|
252 | def add_reports_slow_calls_es(es_docs): | |
253 | for k, v in es_docs.items(): |
|
253 | for k, v in es_docs.items(): | |
254 | to_update = {"_index": k, "_type": "log"} |
|
254 | to_update = {"_index": k, "_type": "log"} | |
255 | [i.update(to_update) for i in v] |
|
255 | [i.update(to_update) for i in v] | |
256 | elasticsearch.helpers.bulk(Datastores.es, v) |
|
256 | elasticsearch.helpers.bulk(Datastores.es, v) | |
257 |
|
257 | |||
258 |
|
258 | |||
259 | @celery.task(queue="es", default_retry_delay=600, max_retries=144) |
|
259 | @celery.task(queue="es", default_retry_delay=600, max_retries=144) | |
260 | def add_reports_stats_rows_es(es_docs): |
|
260 | def add_reports_stats_rows_es(es_docs): | |
261 | for k, v in es_docs.items(): |
|
261 | for k, v in es_docs.items(): | |
262 | to_update = {"_index": k, "_type": "report"} |
|
262 | to_update = {"_index": k, "_type": "report"} | |
263 | [i.update(to_update) for i in v] |
|
263 | [i.update(to_update) for i in v] | |
264 | elasticsearch.helpers.bulk(Datastores.es, v) |
|
264 | elasticsearch.helpers.bulk(Datastores.es, v) | |
265 |
|
265 | |||
266 |
|
266 | |||
267 | @celery.task(queue="logs", default_retry_delay=600, max_retries=144) |
|
267 | @celery.task(queue="logs", default_retry_delay=600, max_retries=144) | |
268 | def add_logs(resource_id, request_params, dataset, **kwargs): |
|
268 | def add_logs(resource_id, request_params, dataset, **kwargs): | |
269 | proto_version = request_params.get("protocol_version") |
|
269 | proto_version = request_params.get("protocol_version") | |
270 | current_time = datetime.utcnow().replace(second=0, microsecond=0) |
|
270 | current_time = datetime.utcnow().replace(second=0, microsecond=0) | |
271 |
|
271 | |||
272 | try: |
|
272 | try: | |
273 | es_docs = collections.defaultdict(list) |
|
273 | es_docs = collections.defaultdict(list) | |
274 | resource = ApplicationService.by_id_cached()(resource_id) |
|
274 | resource = ApplicationService.by_id_cached()(resource_id) | |
275 | resource = DBSession.merge(resource, load=False) |
|
275 | resource = DBSession.merge(resource, load=False) | |
276 | ns_pairs = [] |
|
276 | ns_pairs = [] | |
277 | for entry in dataset: |
|
277 | for entry in dataset: | |
278 | # gather pk and ns so we can remove older versions of row later |
|
278 | # gather pk and ns so we can remove older versions of row later | |
279 | if entry["primary_key"] is not None: |
|
279 | if entry["primary_key"] is not None: | |
280 | ns_pairs.append({"pk": entry["primary_key"], "ns": entry["namespace"]}) |
|
280 | ns_pairs.append({"pk": entry["primary_key"], "ns": entry["namespace"]}) | |
281 | log_entry = Log() |
|
281 | log_entry = Log() | |
282 | log_entry.set_data(entry, resource=resource) |
|
282 | log_entry.set_data(entry, resource=resource) | |
283 | log_entry._skip_ft_index = True |
|
283 | log_entry._skip_ft_index = True | |
284 | resource.logs.append(log_entry) |
|
284 | resource.logs.append(log_entry) | |
285 | DBSession.flush() |
|
285 | DBSession.flush() | |
286 | # insert non pk rows first |
|
286 | # insert non pk rows first | |
287 | if entry["primary_key"] is None: |
|
287 | if entry["primary_key"] is None: | |
288 | es_docs[log_entry.partition_id].append(log_entry.es_doc()) |
|
288 | es_docs[log_entry.partition_id].append(log_entry.es_doc()) | |
289 |
|
289 | |||
290 | # 2nd pass to delete all log entries from db for same pk/ns pair |
|
290 | # 2nd pass to delete all log entries from db for same pk/ns pair | |
291 | if ns_pairs: |
|
291 | if ns_pairs: | |
292 | ids_to_delete = [] |
|
292 | ids_to_delete = [] | |
293 | es_docs = collections.defaultdict(list) |
|
293 | es_docs = collections.defaultdict(list) | |
294 | es_docs_to_delete = collections.defaultdict(list) |
|
294 | es_docs_to_delete = collections.defaultdict(list) | |
295 | found_pkey_logs = LogService.query_by_primary_key_and_namespace( |
|
295 | found_pkey_logs = LogService.query_by_primary_key_and_namespace( | |
296 | list_of_pairs=ns_pairs |
|
296 | list_of_pairs=ns_pairs | |
297 | ) |
|
297 | ) | |
298 | log_dict = {} |
|
298 | log_dict = {} | |
299 | for log_entry in found_pkey_logs: |
|
299 | for log_entry in found_pkey_logs: | |
300 | log_key = (log_entry.primary_key, log_entry.namespace) |
|
300 | log_key = (log_entry.primary_key, log_entry.namespace) | |
301 | if log_key not in log_dict: |
|
301 | if log_key not in log_dict: | |
302 | log_dict[log_key] = [] |
|
302 | log_dict[log_key] = [] | |
303 | log_dict[log_key].append(log_entry) |
|
303 | log_dict[log_key].append(log_entry) | |
304 |
|
304 | |||
305 | for ns, entry_list in log_dict.items(): |
|
305 | for ns, entry_list in log_dict.items(): | |
306 | entry_list = sorted(entry_list, key=lambda x: x.timestamp) |
|
306 | entry_list = sorted(entry_list, key=lambda x: x.timestamp) | |
307 | # newest row needs to be indexed in es |
|
307 | # newest row needs to be indexed in es | |
308 | log_entry = entry_list[-1] |
|
308 | log_entry = entry_list[-1] | |
309 | # delete everything from pg and ES, leave the last row in pg |
|
309 | # delete everything from pg and ES, leave the last row in pg | |
310 | for e in entry_list[:-1]: |
|
310 | for e in entry_list[:-1]: | |
311 | ids_to_delete.append(e.log_id) |
|
311 | ids_to_delete.append(e.log_id) | |
312 | es_docs_to_delete[e.partition_id].append(e.delete_hash) |
|
312 | es_docs_to_delete[e.partition_id].append(e.delete_hash) | |
313 |
|
313 | |||
314 | es_docs_to_delete[log_entry.partition_id].append(log_entry.delete_hash) |
|
314 | es_docs_to_delete[log_entry.partition_id].append(log_entry.delete_hash) | |
315 |
|
315 | |||
316 | es_docs[log_entry.partition_id].append(log_entry.es_doc()) |
|
316 | es_docs[log_entry.partition_id].append(log_entry.es_doc()) | |
317 |
|
317 | |||
318 | if ids_to_delete: |
|
318 | if ids_to_delete: | |
319 | query = DBSession.query(Log).filter(Log.log_id.in_(ids_to_delete)) |
|
319 | query = DBSession.query(Log).filter(Log.log_id.in_(ids_to_delete)) | |
320 | query.delete(synchronize_session=False) |
|
320 | query.delete(synchronize_session=False) | |
321 | if es_docs_to_delete: |
|
321 | if es_docs_to_delete: | |
322 | # batch this to avoid problems with default ES bulk limits |
|
322 | # batch this to avoid problems with default ES bulk limits | |
323 | for es_index in es_docs_to_delete.keys(): |
|
323 | for es_index in es_docs_to_delete.keys(): | |
324 | for batch in in_batches(es_docs_to_delete[es_index], 20): |
|
324 | for batch in in_batches(es_docs_to_delete[es_index], 20): | |
325 | query = {"query": {"terms": {"delete_hash": batch}}} |
|
325 | query = {"query": {"terms": {"delete_hash": batch}}} | |
326 |
|
326 | |||
327 | try: |
|
327 | try: | |
328 | Datastores.es.delete_by_query( |
|
328 | Datastores.es.delete_by_query( | |
329 |
index=es_index, |
|
329 | index=es_index, | |
330 |
|
|
330 | doc_type="log", | |
|
331 | body=query, | |||
|
332 | conflicts="proceed", | |||
331 | ) |
|
333 | ) | |
332 | except elasticsearch.exceptions.NotFoundError as exc: |
|
334 | except elasticsearch.exceptions.NotFoundError as exc: | |
333 | msg = "skipping index {}".format(es_index) |
|
335 | msg = "skipping index {}".format(es_index) | |
334 | log.info(msg) |
|
336 | log.info(msg) | |
335 |
|
337 | |||
336 | total_logs = len(dataset) |
|
338 | total_logs = len(dataset) | |
337 |
|
339 | |||
338 | log_msg = "LOG_NEW: %s, entries: %s, proto:%s" % ( |
|
340 | log_msg = "LOG_NEW: %s, entries: %s, proto:%s" % ( | |
339 | str(resource), |
|
341 | str(resource), | |
340 | total_logs, |
|
342 | total_logs, | |
341 | proto_version, |
|
343 | proto_version, | |
342 | ) |
|
344 | ) | |
343 | log.info(log_msg) |
|
345 | log.info(log_msg) | |
344 | # mark_changed(session) |
|
346 | # mark_changed(session) | |
345 | redis_pipeline = Datastores.redis.pipeline(transaction=False) |
|
347 | redis_pipeline = Datastores.redis.pipeline(transaction=False) | |
346 | key = REDIS_KEYS["counters"]["logs_per_minute"].format(current_time) |
|
348 | key = REDIS_KEYS["counters"]["logs_per_minute"].format(current_time) | |
347 | redis_pipeline.incr(key, total_logs) |
|
349 | redis_pipeline.incr(key, total_logs) | |
348 | redis_pipeline.expire(key, 3600 * 24) |
|
350 | redis_pipeline.expire(key, 3600 * 24) | |
349 | key = REDIS_KEYS["counters"]["events_per_minute_per_user"].format( |
|
351 | key = REDIS_KEYS["counters"]["events_per_minute_per_user"].format( | |
350 | resource.owner_user_id, current_time |
|
352 | resource.owner_user_id, current_time | |
351 | ) |
|
353 | ) | |
352 | redis_pipeline.incr(key, total_logs) |
|
354 | redis_pipeline.incr(key, total_logs) | |
353 | redis_pipeline.expire(key, 3600) |
|
355 | redis_pipeline.expire(key, 3600) | |
354 | key = REDIS_KEYS["counters"]["logs_per_hour_per_app"].format( |
|
356 | key = REDIS_KEYS["counters"]["logs_per_hour_per_app"].format( | |
355 | resource_id, current_time.replace(minute=0) |
|
357 | resource_id, current_time.replace(minute=0) | |
356 | ) |
|
358 | ) | |
357 | redis_pipeline.incr(key, total_logs) |
|
359 | redis_pipeline.incr(key, total_logs) | |
358 | redis_pipeline.expire(key, 3600 * 24 * 7) |
|
360 | redis_pipeline.expire(key, 3600 * 24 * 7) | |
359 | redis_pipeline.sadd( |
|
361 | redis_pipeline.sadd( | |
360 | REDIS_KEYS["apps_that_got_new_data_per_hour"].format( |
|
362 | REDIS_KEYS["apps_that_got_new_data_per_hour"].format( | |
361 | current_time.replace(minute=0) |
|
363 | current_time.replace(minute=0) | |
362 | ), |
|
364 | ), | |
363 | resource_id, |
|
365 | resource_id, | |
364 | ) |
|
366 | ) | |
365 | redis_pipeline.execute() |
|
367 | redis_pipeline.execute() | |
366 | add_logs_es(es_docs) |
|
368 | add_logs_es(es_docs) | |
367 | return True |
|
369 | return True | |
368 | except Exception as exc: |
|
370 | except Exception as exc: | |
369 | print_traceback(log) |
|
371 | print_traceback(log) | |
370 | if celery.conf["CELERY_EAGER_PROPAGATES_EXCEPTIONS"]: |
|
372 | if celery.conf["CELERY_EAGER_PROPAGATES_EXCEPTIONS"]: | |
371 | raise |
|
373 | raise | |
372 | add_logs.retry(exc=exc) |
|
374 | add_logs.retry(exc=exc) | |
373 |
|
375 | |||
374 |
|
376 | |||
375 | @celery.task(queue="es", default_retry_delay=600, max_retries=144) |
|
377 | @celery.task(queue="es", default_retry_delay=600, max_retries=144) | |
376 | def add_logs_es(es_docs): |
|
378 | def add_logs_es(es_docs): | |
377 | for k, v in es_docs.items(): |
|
379 | for k, v in es_docs.items(): | |
378 | to_update = {"_index": k, "_type": "log"} |
|
380 | to_update = {"_index": k, "_type": "log"} | |
379 | [i.update(to_update) for i in v] |
|
381 | [i.update(to_update) for i in v] | |
380 | elasticsearch.helpers.bulk(Datastores.es, v) |
|
382 | elasticsearch.helpers.bulk(Datastores.es, v) | |
381 |
|
383 | |||
382 |
|
384 | |||
383 | @celery.task(queue="metrics", default_retry_delay=600, max_retries=144) |
|
385 | @celery.task(queue="metrics", default_retry_delay=600, max_retries=144) | |
384 | def add_metrics(resource_id, request_params, dataset, proto_version): |
|
386 | def add_metrics(resource_id, request_params, dataset, proto_version): | |
385 | current_time = datetime.utcnow().replace(second=0, microsecond=0) |
|
387 | current_time = datetime.utcnow().replace(second=0, microsecond=0) | |
386 | try: |
|
388 | try: | |
387 | resource = ApplicationService.by_id_cached()(resource_id) |
|
389 | resource = ApplicationService.by_id_cached()(resource_id) | |
388 | resource = DBSession.merge(resource, load=False) |
|
390 | resource = DBSession.merge(resource, load=False) | |
389 | es_docs = [] |
|
391 | es_docs = [] | |
390 | rows = [] |
|
392 | rows = [] | |
391 | for metric in dataset: |
|
393 | for metric in dataset: | |
392 | tags = dict(metric["tags"]) |
|
394 | tags = dict(metric["tags"]) | |
393 | server_n = tags.get("server_name", metric["server_name"]).lower() |
|
395 | server_n = tags.get("server_name", metric["server_name"]).lower() | |
394 | tags["server_name"] = server_n or "unknown" |
|
396 | tags["server_name"] = server_n or "unknown" | |
395 | new_metric = Metric( |
|
397 | new_metric = Metric( | |
396 | timestamp=metric["timestamp"], |
|
398 | timestamp=metric["timestamp"], | |
397 | resource_id=resource.resource_id, |
|
399 | resource_id=resource.resource_id, | |
398 | namespace=metric["namespace"], |
|
400 | namespace=metric["namespace"], | |
399 | tags=tags, |
|
401 | tags=tags, | |
400 | ) |
|
402 | ) | |
401 | rows.append(new_metric) |
|
403 | rows.append(new_metric) | |
402 | es_docs.append(new_metric.es_doc()) |
|
404 | es_docs.append(new_metric.es_doc()) | |
403 | session = DBSession() |
|
405 | session = DBSession() | |
404 | session.bulk_save_objects(rows) |
|
406 | session.bulk_save_objects(rows) | |
405 | session.flush() |
|
407 | session.flush() | |
406 |
|
408 | |||
407 | action = "METRICS" |
|
409 | action = "METRICS" | |
408 | metrics_msg = "%s: %s, metrics: %s, proto:%s" % ( |
|
410 | metrics_msg = "%s: %s, metrics: %s, proto:%s" % ( | |
409 | action, |
|
411 | action, | |
410 | str(resource), |
|
412 | str(resource), | |
411 | len(dataset), |
|
413 | len(dataset), | |
412 | proto_version, |
|
414 | proto_version, | |
413 | ) |
|
415 | ) | |
414 | log.info(metrics_msg) |
|
416 | log.info(metrics_msg) | |
415 |
|
417 | |||
416 | mark_changed(session) |
|
418 | mark_changed(session) | |
417 | redis_pipeline = Datastores.redis.pipeline(transaction=False) |
|
419 | redis_pipeline = Datastores.redis.pipeline(transaction=False) | |
418 | key = REDIS_KEYS["counters"]["metrics_per_minute"].format(current_time) |
|
420 | key = REDIS_KEYS["counters"]["metrics_per_minute"].format(current_time) | |
419 | redis_pipeline.incr(key, len(rows)) |
|
421 | redis_pipeline.incr(key, len(rows)) | |
420 | redis_pipeline.expire(key, 3600 * 24) |
|
422 | redis_pipeline.expire(key, 3600 * 24) | |
421 | key = REDIS_KEYS["counters"]["events_per_minute_per_user"].format( |
|
423 | key = REDIS_KEYS["counters"]["events_per_minute_per_user"].format( | |
422 | resource.owner_user_id, current_time |
|
424 | resource.owner_user_id, current_time | |
423 | ) |
|
425 | ) | |
424 | redis_pipeline.incr(key, len(rows)) |
|
426 | redis_pipeline.incr(key, len(rows)) | |
425 | redis_pipeline.expire(key, 3600) |
|
427 | redis_pipeline.expire(key, 3600) | |
426 | key = REDIS_KEYS["counters"]["metrics_per_hour_per_app"].format( |
|
428 | key = REDIS_KEYS["counters"]["metrics_per_hour_per_app"].format( | |
427 | resource_id, current_time.replace(minute=0) |
|
429 | resource_id, current_time.replace(minute=0) | |
428 | ) |
|
430 | ) | |
429 | redis_pipeline.incr(key, len(rows)) |
|
431 | redis_pipeline.incr(key, len(rows)) | |
430 | redis_pipeline.expire(key, 3600 * 24 * 7) |
|
432 | redis_pipeline.expire(key, 3600 * 24 * 7) | |
431 | redis_pipeline.sadd( |
|
433 | redis_pipeline.sadd( | |
432 | REDIS_KEYS["apps_that_got_new_data_per_hour"].format( |
|
434 | REDIS_KEYS["apps_that_got_new_data_per_hour"].format( | |
433 | current_time.replace(minute=0) |
|
435 | current_time.replace(minute=0) | |
434 | ), |
|
436 | ), | |
435 | resource_id, |
|
437 | resource_id, | |
436 | ) |
|
438 | ) | |
437 | redis_pipeline.execute() |
|
439 | redis_pipeline.execute() | |
438 | add_metrics_es(es_docs) |
|
440 | add_metrics_es(es_docs) | |
439 | return True |
|
441 | return True | |
440 | except Exception as exc: |
|
442 | except Exception as exc: | |
441 | print_traceback(log) |
|
443 | print_traceback(log) | |
442 | if celery.conf["CELERY_EAGER_PROPAGATES_EXCEPTIONS"]: |
|
444 | if celery.conf["CELERY_EAGER_PROPAGATES_EXCEPTIONS"]: | |
443 | raise |
|
445 | raise | |
444 | add_metrics.retry(exc=exc) |
|
446 | add_metrics.retry(exc=exc) | |
445 |
|
447 | |||
446 |
|
448 | |||
447 | @celery.task(queue="es", default_retry_delay=600, max_retries=144) |
|
449 | @celery.task(queue="es", default_retry_delay=600, max_retries=144) | |
448 | def add_metrics_es(es_docs): |
|
450 | def add_metrics_es(es_docs): | |
449 | for doc in es_docs: |
|
451 | for doc in es_docs: | |
450 | partition = "rcae_m_%s" % doc["timestamp"].strftime("%Y_%m_%d") |
|
452 | partition = "rcae_m_%s" % doc["timestamp"].strftime("%Y_%m_%d") | |
451 | Datastores.es.index(partition, "log", doc) |
|
453 | Datastores.es.index(partition, "log", doc) | |
452 |
|
454 | |||
453 |
|
455 | |||
454 | @celery.task(queue="default", default_retry_delay=5, max_retries=2) |
|
456 | @celery.task(queue="default", default_retry_delay=5, max_retries=2) | |
455 | def check_user_report_notifications(resource_id): |
|
457 | def check_user_report_notifications(resource_id): | |
456 | since_when = datetime.utcnow() |
|
458 | since_when = datetime.utcnow() | |
457 | try: |
|
459 | try: | |
458 | request = get_current_request() |
|
460 | request = get_current_request() | |
459 | application = ApplicationService.by_id(resource_id) |
|
461 | application = ApplicationService.by_id(resource_id) | |
460 | if not application: |
|
462 | if not application: | |
461 | return |
|
463 | return | |
462 | error_key = REDIS_KEYS["reports_to_notify_per_type_per_app"].format( |
|
464 | error_key = REDIS_KEYS["reports_to_notify_per_type_per_app"].format( | |
463 | ReportType.error, resource_id |
|
465 | ReportType.error, resource_id | |
464 | ) |
|
466 | ) | |
465 | slow_key = REDIS_KEYS["reports_to_notify_per_type_per_app"].format( |
|
467 | slow_key = REDIS_KEYS["reports_to_notify_per_type_per_app"].format( | |
466 | ReportType.slow, resource_id |
|
468 | ReportType.slow, resource_id | |
467 | ) |
|
469 | ) | |
468 | error_group_ids = Datastores.redis.smembers(error_key) |
|
470 | error_group_ids = Datastores.redis.smembers(error_key) | |
469 | slow_group_ids = Datastores.redis.smembers(slow_key) |
|
471 | slow_group_ids = Datastores.redis.smembers(slow_key) | |
470 | Datastores.redis.delete(error_key) |
|
472 | Datastores.redis.delete(error_key) | |
471 | Datastores.redis.delete(slow_key) |
|
473 | Datastores.redis.delete(slow_key) | |
472 | err_gids = [int(g_id) for g_id in error_group_ids] |
|
474 | err_gids = [int(g_id) for g_id in error_group_ids] | |
473 | slow_gids = [int(g_id) for g_id in list(slow_group_ids)] |
|
475 | slow_gids = [int(g_id) for g_id in list(slow_group_ids)] | |
474 | group_ids = err_gids + slow_gids |
|
476 | group_ids = err_gids + slow_gids | |
475 | occurence_dict = {} |
|
477 | occurence_dict = {} | |
476 | for g_id in group_ids: |
|
478 | for g_id in group_ids: | |
477 | key = REDIS_KEYS["counters"]["report_group_occurences"].format(g_id) |
|
479 | key = REDIS_KEYS["counters"]["report_group_occurences"].format(g_id) | |
478 | val = Datastores.redis.get(key) |
|
480 | val = Datastores.redis.get(key) | |
479 | Datastores.redis.delete(key) |
|
481 | Datastores.redis.delete(key) | |
480 | if val: |
|
482 | if val: | |
481 | occurence_dict[g_id] = int(val) |
|
483 | occurence_dict[g_id] = int(val) | |
482 | else: |
|
484 | else: | |
483 | occurence_dict[g_id] = 1 |
|
485 | occurence_dict[g_id] = 1 | |
484 | report_groups = ReportGroupService.by_ids(group_ids) |
|
486 | report_groups = ReportGroupService.by_ids(group_ids) | |
485 | report_groups.options(sa.orm.joinedload(ReportGroup.last_report_ref)) |
|
487 | report_groups.options(sa.orm.joinedload(ReportGroup.last_report_ref)) | |
486 |
|
488 | |||
487 | ApplicationService.check_for_groups_alert( |
|
489 | ApplicationService.check_for_groups_alert( | |
488 | application, |
|
490 | application, | |
489 | "alert", |
|
491 | "alert", | |
490 | report_groups=report_groups, |
|
492 | report_groups=report_groups, | |
491 | occurence_dict=occurence_dict, |
|
493 | occurence_dict=occurence_dict, | |
492 | ) |
|
494 | ) | |
493 | users = set( |
|
495 | users = set( | |
494 | [p.user for p in ResourceService.users_for_perm(application, "view")] |
|
496 | [p.user for p in ResourceService.users_for_perm(application, "view")] | |
495 | ) |
|
497 | ) | |
496 | report_groups = report_groups.all() |
|
498 | report_groups = report_groups.all() | |
497 | for user in users: |
|
499 | for user in users: | |
498 | UserService.report_notify( |
|
500 | UserService.report_notify( | |
499 | user, |
|
501 | user, | |
500 | request, |
|
502 | request, | |
501 | application, |
|
503 | application, | |
502 | report_groups=report_groups, |
|
504 | report_groups=report_groups, | |
503 | occurence_dict=occurence_dict, |
|
505 | occurence_dict=occurence_dict, | |
504 | ) |
|
506 | ) | |
505 | for group in report_groups: |
|
507 | for group in report_groups: | |
506 | # marks report_groups as notified |
|
508 | # marks report_groups as notified | |
507 | if not group.notified: |
|
509 | if not group.notified: | |
508 | group.notified = True |
|
510 | group.notified = True | |
509 | except Exception as exc: |
|
511 | except Exception as exc: | |
510 | print_traceback(log) |
|
512 | print_traceback(log) | |
511 | raise |
|
513 | raise | |
512 |
|
514 | |||
513 |
|
515 | |||
514 | @celery.task(queue="default", default_retry_delay=5, max_retries=2) |
|
516 | @celery.task(queue="default", default_retry_delay=5, max_retries=2) | |
515 | def check_alerts(resource_id): |
|
517 | def check_alerts(resource_id): | |
516 | since_when = datetime.utcnow() |
|
518 | since_when = datetime.utcnow() | |
517 | try: |
|
519 | try: | |
518 | request = get_current_request() |
|
520 | request = get_current_request() | |
519 | application = ApplicationService.by_id(resource_id) |
|
521 | application = ApplicationService.by_id(resource_id) | |
520 | if not application: |
|
522 | if not application: | |
521 | return |
|
523 | return | |
522 | error_key = REDIS_KEYS["reports_to_notify_per_type_per_app_alerting"].format( |
|
524 | error_key = REDIS_KEYS["reports_to_notify_per_type_per_app_alerting"].format( | |
523 | ReportType.error, resource_id |
|
525 | ReportType.error, resource_id | |
524 | ) |
|
526 | ) | |
525 | slow_key = REDIS_KEYS["reports_to_notify_per_type_per_app_alerting"].format( |
|
527 | slow_key = REDIS_KEYS["reports_to_notify_per_type_per_app_alerting"].format( | |
526 | ReportType.slow, resource_id |
|
528 | ReportType.slow, resource_id | |
527 | ) |
|
529 | ) | |
528 | error_group_ids = Datastores.redis.smembers(error_key) |
|
530 | error_group_ids = Datastores.redis.smembers(error_key) | |
529 | slow_group_ids = Datastores.redis.smembers(slow_key) |
|
531 | slow_group_ids = Datastores.redis.smembers(slow_key) | |
530 | Datastores.redis.delete(error_key) |
|
532 | Datastores.redis.delete(error_key) | |
531 | Datastores.redis.delete(slow_key) |
|
533 | Datastores.redis.delete(slow_key) | |
532 | err_gids = [int(g_id) for g_id in error_group_ids] |
|
534 | err_gids = [int(g_id) for g_id in error_group_ids] | |
533 | slow_gids = [int(g_id) for g_id in list(slow_group_ids)] |
|
535 | slow_gids = [int(g_id) for g_id in list(slow_group_ids)] | |
534 | group_ids = err_gids + slow_gids |
|
536 | group_ids = err_gids + slow_gids | |
535 | occurence_dict = {} |
|
537 | occurence_dict = {} | |
536 | for g_id in group_ids: |
|
538 | for g_id in group_ids: | |
537 | key = REDIS_KEYS["counters"]["report_group_occurences_alerting"].format( |
|
539 | key = REDIS_KEYS["counters"]["report_group_occurences_alerting"].format( | |
538 | g_id |
|
540 | g_id | |
539 | ) |
|
541 | ) | |
540 | val = Datastores.redis.get(key) |
|
542 | val = Datastores.redis.get(key) | |
541 | Datastores.redis.delete(key) |
|
543 | Datastores.redis.delete(key) | |
542 | if val: |
|
544 | if val: | |
543 | occurence_dict[g_id] = int(val) |
|
545 | occurence_dict[g_id] = int(val) | |
544 | else: |
|
546 | else: | |
545 | occurence_dict[g_id] = 1 |
|
547 | occurence_dict[g_id] = 1 | |
546 | report_groups = ReportGroupService.by_ids(group_ids) |
|
548 | report_groups = ReportGroupService.by_ids(group_ids) | |
547 | report_groups.options(sa.orm.joinedload(ReportGroup.last_report_ref)) |
|
549 | report_groups.options(sa.orm.joinedload(ReportGroup.last_report_ref)) | |
548 |
|
550 | |||
549 | ApplicationService.check_for_groups_alert( |
|
551 | ApplicationService.check_for_groups_alert( | |
550 | application, |
|
552 | application, | |
551 | "alert", |
|
553 | "alert", | |
552 | report_groups=report_groups, |
|
554 | report_groups=report_groups, | |
553 | occurence_dict=occurence_dict, |
|
555 | occurence_dict=occurence_dict, | |
554 | since_when=since_when, |
|
556 | since_when=since_when, | |
555 | ) |
|
557 | ) | |
556 | except Exception as exc: |
|
558 | except Exception as exc: | |
557 | print_traceback(log) |
|
559 | print_traceback(log) | |
558 | raise |
|
560 | raise | |
559 |
|
561 | |||
560 |
|
562 | |||
561 | @celery.task(queue="default", default_retry_delay=1, max_retries=2) |
|
563 | @celery.task(queue="default", default_retry_delay=1, max_retries=2) | |
562 | def close_alerts(): |
|
564 | def close_alerts(): | |
563 | log.warning("Checking alerts") |
|
565 | log.warning("Checking alerts") | |
564 | since_when = datetime.utcnow() |
|
566 | since_when = datetime.utcnow() | |
565 | try: |
|
567 | try: | |
566 | event_types = [ |
|
568 | event_types = [ | |
567 | Event.types["error_report_alert"], |
|
569 | Event.types["error_report_alert"], | |
568 | Event.types["slow_report_alert"], |
|
570 | Event.types["slow_report_alert"], | |
569 | ] |
|
571 | ] | |
570 | statuses = [Event.statuses["active"]] |
|
572 | statuses = [Event.statuses["active"]] | |
571 | # get events older than 5 min |
|
573 | # get events older than 5 min | |
572 | events = EventService.by_type_and_status( |
|
574 | events = EventService.by_type_and_status( | |
573 | event_types, statuses, older_than=(since_when - timedelta(minutes=5)) |
|
575 | event_types, statuses, older_than=(since_when - timedelta(minutes=5)) | |
574 | ) |
|
576 | ) | |
575 | for event in events: |
|
577 | for event in events: | |
576 | # see if we can close them |
|
578 | # see if we can close them | |
577 | event.validate_or_close(since_when=(since_when - timedelta(minutes=1))) |
|
579 | event.validate_or_close(since_when=(since_when - timedelta(minutes=1))) | |
578 | except Exception as exc: |
|
580 | except Exception as exc: | |
579 | print_traceback(log) |
|
581 | print_traceback(log) | |
580 | raise |
|
582 | raise | |
581 |
|
583 | |||
582 |
|
584 | |||
583 | @celery.task(queue="default", default_retry_delay=600, max_retries=144) |
|
585 | @celery.task(queue="default", default_retry_delay=600, max_retries=144) | |
584 | def update_tag_counter(tag_name, tag_value, count): |
|
586 | def update_tag_counter(tag_name, tag_value, count): | |
585 | try: |
|
587 | try: | |
586 | query = ( |
|
588 | query = ( | |
587 | DBSession.query(Tag) |
|
589 | DBSession.query(Tag) | |
588 | .filter(Tag.name == tag_name) |
|
590 | .filter(Tag.name == tag_name) | |
589 | .filter( |
|
591 | .filter( | |
590 | sa.cast(Tag.value, sa.types.TEXT) |
|
592 | sa.cast(Tag.value, sa.types.TEXT) | |
591 | == sa.cast(json.dumps(tag_value), sa.types.TEXT) |
|
593 | == sa.cast(json.dumps(tag_value), sa.types.TEXT) | |
592 | ) |
|
594 | ) | |
593 | ) |
|
595 | ) | |
594 | query.update( |
|
596 | query.update( | |
595 | {"times_seen": Tag.times_seen + count, "last_timestamp": datetime.utcnow()}, |
|
597 | {"times_seen": Tag.times_seen + count, "last_timestamp": datetime.utcnow()}, | |
596 | synchronize_session=False, |
|
598 | synchronize_session=False, | |
597 | ) |
|
599 | ) | |
598 | session = DBSession() |
|
600 | session = DBSession() | |
599 | mark_changed(session) |
|
601 | mark_changed(session) | |
600 | return True |
|
602 | return True | |
601 | except Exception as exc: |
|
603 | except Exception as exc: | |
602 | print_traceback(log) |
|
604 | print_traceback(log) | |
603 | if celery.conf["CELERY_EAGER_PROPAGATES_EXCEPTIONS"]: |
|
605 | if celery.conf["CELERY_EAGER_PROPAGATES_EXCEPTIONS"]: | |
604 | raise |
|
606 | raise | |
605 | update_tag_counter.retry(exc=exc) |
|
607 | update_tag_counter.retry(exc=exc) | |
606 |
|
608 | |||
607 |
|
609 | |||
608 | @celery.task(queue="default") |
|
610 | @celery.task(queue="default") | |
609 | def update_tag_counters(): |
|
611 | def update_tag_counters(): | |
610 | """ |
|
612 | """ | |
611 | Sets task to update counters for application tags |
|
613 | Sets task to update counters for application tags | |
612 | """ |
|
614 | """ | |
613 | tags = Datastores.redis.lrange(REDIS_KEYS["seen_tag_list"], 0, -1) |
|
615 | tags = Datastores.redis.lrange(REDIS_KEYS["seen_tag_list"], 0, -1) | |
614 | Datastores.redis.delete(REDIS_KEYS["seen_tag_list"]) |
|
616 | Datastores.redis.delete(REDIS_KEYS["seen_tag_list"]) | |
615 | c = collections.Counter(tags) |
|
617 | c = collections.Counter(tags) | |
616 | for t_json, count in c.items(): |
|
618 | for t_json, count in c.items(): | |
617 | tag_info = json.loads(t_json) |
|
619 | tag_info = json.loads(t_json) | |
618 | update_tag_counter.delay(tag_info[0], tag_info[1], count) |
|
620 | update_tag_counter.delay(tag_info[0], tag_info[1], count) | |
619 |
|
621 | |||
620 |
|
622 | |||
621 | @celery.task(queue="default") |
|
623 | @celery.task(queue="default") | |
622 | def daily_digest(): |
|
624 | def daily_digest(): | |
623 | """ |
|
625 | """ | |
624 | Sends daily digest with top 50 error reports |
|
626 | Sends daily digest with top 50 error reports | |
625 | """ |
|
627 | """ | |
626 | request = get_current_request() |
|
628 | request = get_current_request() | |
627 | apps = Datastores.redis.smembers(REDIS_KEYS["apps_that_had_reports"]) |
|
629 | apps = Datastores.redis.smembers(REDIS_KEYS["apps_that_had_reports"]) | |
628 | Datastores.redis.delete(REDIS_KEYS["apps_that_had_reports"]) |
|
630 | Datastores.redis.delete(REDIS_KEYS["apps_that_had_reports"]) | |
629 | since_when = datetime.utcnow() - timedelta(hours=8) |
|
631 | since_when = datetime.utcnow() - timedelta(hours=8) | |
630 | log.warning("Generating daily digests") |
|
632 | log.warning("Generating daily digests") | |
631 | for resource_id in apps: |
|
633 | for resource_id in apps: | |
632 | resource_id = resource_id.decode("utf8") |
|
634 | resource_id = resource_id.decode("utf8") | |
633 | end_date = datetime.utcnow().replace(microsecond=0, second=0) |
|
635 | end_date = datetime.utcnow().replace(microsecond=0, second=0) | |
634 | filter_settings = { |
|
636 | filter_settings = { | |
635 | "resource": [resource_id], |
|
637 | "resource": [resource_id], | |
636 | "tags": [{"name": "type", "value": ["error"], "op": None}], |
|
638 | "tags": [{"name": "type", "value": ["error"], "op": None}], | |
637 | "type": "error", |
|
639 | "type": "error", | |
638 | "start_date": since_when, |
|
640 | "start_date": since_when, | |
639 | "end_date": end_date, |
|
641 | "end_date": end_date, | |
640 | } |
|
642 | } | |
641 |
|
643 | |||
642 | reports = ReportGroupService.get_trending( |
|
644 | reports = ReportGroupService.get_trending( | |
643 | request, filter_settings=filter_settings, limit=50 |
|
645 | request, filter_settings=filter_settings, limit=50 | |
644 | ) |
|
646 | ) | |
645 |
|
647 | |||
646 | application = ApplicationService.by_id(resource_id) |
|
648 | application = ApplicationService.by_id(resource_id) | |
647 | if application: |
|
649 | if application: | |
648 | users = set( |
|
650 | users = set( | |
649 | [p.user for p in ResourceService.users_for_perm(application, "view")] |
|
651 | [p.user for p in ResourceService.users_for_perm(application, "view")] | |
650 | ) |
|
652 | ) | |
651 | for user in users: |
|
653 | for user in users: | |
652 | user.send_digest( |
|
654 | user.send_digest( | |
653 | request, application, reports=reports, since_when=since_when |
|
655 | request, application, reports=reports, since_when=since_when | |
654 | ) |
|
656 | ) | |
655 |
|
657 | |||
656 |
|
658 | |||
657 | @celery.task(queue="default") |
|
659 | @celery.task(queue="default") | |
658 | def notifications_reports(): |
|
660 | def notifications_reports(): | |
659 | """ |
|
661 | """ | |
660 | Loop that checks redis for info and then issues new tasks to celery to |
|
662 | Loop that checks redis for info and then issues new tasks to celery to | |
661 | issue notifications |
|
663 | issue notifications | |
662 | """ |
|
664 | """ | |
663 | apps = Datastores.redis.smembers(REDIS_KEYS["apps_that_had_reports"]) |
|
665 | apps = Datastores.redis.smembers(REDIS_KEYS["apps_that_had_reports"]) | |
664 | Datastores.redis.delete(REDIS_KEYS["apps_that_had_reports"]) |
|
666 | Datastores.redis.delete(REDIS_KEYS["apps_that_had_reports"]) | |
665 | for app in apps: |
|
667 | for app in apps: | |
666 | log.warning("Notify for app: %s" % app) |
|
668 | log.warning("Notify for app: %s" % app) | |
667 | check_user_report_notifications.delay(app.decode("utf8")) |
|
669 | check_user_report_notifications.delay(app.decode("utf8")) | |
668 |
|
670 | |||
669 |
|
671 | |||
670 | @celery.task(queue="default") |
|
672 | @celery.task(queue="default") | |
671 | def alerting_reports(): |
|
673 | def alerting_reports(): | |
672 | """ |
|
674 | """ | |
673 | Loop that checks redis for info and then issues new tasks to celery to |
|
675 | Loop that checks redis for info and then issues new tasks to celery to | |
674 | perform the following: |
|
676 | perform the following: | |
675 | - which applications should have new alerts opened |
|
677 | - which applications should have new alerts opened | |
676 | """ |
|
678 | """ | |
677 |
|
679 | |||
678 | apps = Datastores.redis.smembers(REDIS_KEYS["apps_that_had_reports_alerting"]) |
|
680 | apps = Datastores.redis.smembers(REDIS_KEYS["apps_that_had_reports_alerting"]) | |
679 | Datastores.redis.delete(REDIS_KEYS["apps_that_had_reports_alerting"]) |
|
681 | Datastores.redis.delete(REDIS_KEYS["apps_that_had_reports_alerting"]) | |
680 | for app in apps: |
|
682 | for app in apps: | |
681 | log.warning("Notify for app: %s" % app) |
|
683 | log.warning("Notify for app: %s" % app) | |
682 | check_alerts.delay(app.decode("utf8")) |
|
684 | check_alerts.delay(app.decode("utf8")) | |
683 |
|
685 | |||
684 |
|
686 | |||
685 | @celery.task( |
|
687 | @celery.task( | |
686 | queue="default", soft_time_limit=3600 * 4, hard_time_limit=3600 * 4, max_retries=144 |
|
688 | queue="default", soft_time_limit=3600 * 4, hard_time_limit=3600 * 4, max_retries=144 | |
687 | ) |
|
689 | ) | |
688 | def logs_cleanup(resource_id, filter_settings): |
|
690 | def logs_cleanup(resource_id, filter_settings): | |
689 | request = get_current_request() |
|
691 | request = get_current_request() | |
690 | request.tm.begin() |
|
692 | request.tm.begin() | |
691 | es_query = { |
|
693 | es_query = {"query": {"bool": {"filter": [{"term": {"resource_id": resource_id}}]}}} | |
692 | "query": { |
|
|||
693 | "bool": {"filter": [{"term": {"resource_id": resource_id}}]} |
|
|||
694 | } |
|
|||
695 | } |
|
|||
696 |
|
694 | |||
697 | query = DBSession.query(Log).filter(Log.resource_id == resource_id) |
|
695 | query = DBSession.query(Log).filter(Log.resource_id == resource_id) | |
698 | if filter_settings["namespace"]: |
|
696 | if filter_settings["namespace"]: | |
699 | query = query.filter(Log.namespace == filter_settings["namespace"][0]) |
|
697 | query = query.filter(Log.namespace == filter_settings["namespace"][0]) | |
700 | es_query["query"]["bool"]["filter"].append( |
|
698 | es_query["query"]["bool"]["filter"].append( | |
701 | {"term": {"namespace": filter_settings["namespace"][0]}} |
|
699 | {"term": {"namespace": filter_settings["namespace"][0]}} | |
702 | ) |
|
700 | ) | |
703 | query.delete(synchronize_session=False) |
|
701 | query.delete(synchronize_session=False) | |
704 | request.tm.commit() |
|
702 | request.tm.commit() | |
705 | Datastores.es.delete_by_query( |
|
703 | Datastores.es.delete_by_query( | |
706 |
index="rcae_l_*", |
|
704 | index="rcae_l_*", doc_type="log", body=es_query, conflicts="proceed" | |
707 | ) |
|
705 | ) |
@@ -1,558 +1,560 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 |
|
2 | |||
3 | # Copyright 2010 - 2017 RhodeCode GmbH and the AppEnlight project authors |
|
3 | # Copyright 2010 - 2017 RhodeCode GmbH and the AppEnlight project authors | |
4 | # |
|
4 | # | |
5 | # Licensed under the Apache License, Version 2.0 (the "License"); |
|
5 | # Licensed under the Apache License, Version 2.0 (the "License"); | |
6 | # you may not use this file except in compliance with the License. |
|
6 | # you may not use this file except in compliance with the License. | |
7 | # You may obtain a copy of the License at |
|
7 | # You may obtain a copy of the License at | |
8 | # |
|
8 | # | |
9 | # http://www.apache.org/licenses/LICENSE-2.0 |
|
9 | # http://www.apache.org/licenses/LICENSE-2.0 | |
10 | # |
|
10 | # | |
11 | # Unless required by applicable law or agreed to in writing, software |
|
11 | # Unless required by applicable law or agreed to in writing, software | |
12 | # distributed under the License is distributed on an "AS IS" BASIS, |
|
12 | # distributed under the License is distributed on an "AS IS" BASIS, | |
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
14 | # See the License for the specific language governing permissions and |
|
14 | # See the License for the specific language governing permissions and | |
15 | # limitations under the License. |
|
15 | # limitations under the License. | |
16 |
|
16 | |||
17 | """ |
|
17 | """ | |
18 | Utility functions. |
|
18 | Utility functions. | |
19 | """ |
|
19 | """ | |
20 | import logging |
|
20 | import logging | |
21 | import requests |
|
21 | import requests | |
22 | import hashlib |
|
22 | import hashlib | |
23 | import json |
|
23 | import json | |
24 | import copy |
|
24 | import copy | |
25 | import uuid |
|
25 | import uuid | |
26 | import appenlight.lib.helpers as h |
|
26 | import appenlight.lib.helpers as h | |
27 | from collections import namedtuple |
|
27 | from collections import namedtuple | |
28 | from datetime import timedelta, datetime, date |
|
28 | from datetime import timedelta, datetime, date | |
29 | from dogpile.cache.api import NO_VALUE |
|
29 | from dogpile.cache.api import NO_VALUE | |
30 | from appenlight.models import Datastores |
|
30 | from appenlight.models import Datastores | |
31 | from appenlight.validators import LogSearchSchema, TagListSchema, accepted_search_params |
|
31 | from appenlight.validators import LogSearchSchema, TagListSchema, accepted_search_params | |
32 | from itsdangerous import TimestampSigner |
|
32 | from itsdangerous import TimestampSigner | |
33 | from ziggurat_foundations.permissions import ALL_PERMISSIONS |
|
33 | from ziggurat_foundations.permissions import ALL_PERMISSIONS | |
34 | from ziggurat_foundations.models.services.user import UserService |
|
34 | from ziggurat_foundations.models.services.user import UserService | |
35 | from dateutil.relativedelta import relativedelta |
|
35 | from dateutil.relativedelta import relativedelta | |
36 | from dateutil.rrule import rrule, MONTHLY, DAILY |
|
36 | from dateutil.rrule import rrule, MONTHLY, DAILY | |
37 |
|
37 | |||
38 | log = logging.getLogger(__name__) |
|
38 | log = logging.getLogger(__name__) | |
39 |
|
39 | |||
40 |
|
40 | |||
41 | Stat = namedtuple("Stat", "start_interval value") |
|
41 | Stat = namedtuple("Stat", "start_interval value") | |
42 |
|
42 | |||
43 |
|
43 | |||
44 | def default_extractor(item): |
|
44 | def default_extractor(item): | |
45 | """ |
|
45 | """ | |
46 | :param item - item to extract date from |
|
46 | :param item - item to extract date from | |
47 | """ |
|
47 | """ | |
48 | if hasattr(item, "start_interval"): |
|
48 | if hasattr(item, "start_interval"): | |
49 | return item.start_interval |
|
49 | return item.start_interval | |
50 | return item["start_interval"] |
|
50 | return item["start_interval"] | |
51 |
|
51 | |||
52 |
|
52 | |||
53 | # fast gap generator |
|
53 | # fast gap generator | |
54 | def gap_gen_default(start, step, itemiterator, end_time=None, iv_extractor=None): |
|
54 | def gap_gen_default(start, step, itemiterator, end_time=None, iv_extractor=None): | |
55 | """ generates a list of time/value items based on step and itemiterator |
|
55 | """ generates a list of time/value items based on step and itemiterator | |
56 | if there are entries missing from iterator time/None will be returned |
|
56 | if there are entries missing from iterator time/None will be returned | |
57 | instead |
|
57 | instead | |
58 | :param start - datetime - what time should we start generating our values |
|
58 | :param start - datetime - what time should we start generating our values | |
59 | :param step - timedelta - stepsize |
|
59 | :param step - timedelta - stepsize | |
60 | :param itemiterator - iterable - we will check this iterable for values |
|
60 | :param itemiterator - iterable - we will check this iterable for values | |
61 | corresponding to generated steps |
|
61 | corresponding to generated steps | |
62 | :param end_time - datetime - when last step is >= end_time stop iterating |
|
62 | :param end_time - datetime - when last step is >= end_time stop iterating | |
63 | :param iv_extractor - extracts current step from iterable items |
|
63 | :param iv_extractor - extracts current step from iterable items | |
64 | """ |
|
64 | """ | |
65 |
|
65 | |||
66 | if not iv_extractor: |
|
66 | if not iv_extractor: | |
67 | iv_extractor = default_extractor |
|
67 | iv_extractor = default_extractor | |
68 |
|
68 | |||
69 | next_step = start |
|
69 | next_step = start | |
70 | minutes = step.total_seconds() / 60.0 |
|
70 | minutes = step.total_seconds() / 60.0 | |
71 | while next_step.minute % minutes != 0: |
|
71 | while next_step.minute % minutes != 0: | |
72 | next_step = next_step.replace(minute=next_step.minute - 1) |
|
72 | next_step = next_step.replace(minute=next_step.minute - 1) | |
73 | for item in itemiterator: |
|
73 | for item in itemiterator: | |
74 | item_start_interval = iv_extractor(item) |
|
74 | item_start_interval = iv_extractor(item) | |
75 | # do we have a match for current time step in our data? |
|
75 | # do we have a match for current time step in our data? | |
76 | # no gen a new tuple with 0 values |
|
76 | # no gen a new tuple with 0 values | |
77 | while next_step < item_start_interval: |
|
77 | while next_step < item_start_interval: | |
78 | yield Stat(next_step, None) |
|
78 | yield Stat(next_step, None) | |
79 | next_step = next_step + step |
|
79 | next_step = next_step + step | |
80 | if next_step == item_start_interval: |
|
80 | if next_step == item_start_interval: | |
81 | yield Stat(item_start_interval, item) |
|
81 | yield Stat(item_start_interval, item) | |
82 | next_step = next_step + step |
|
82 | next_step = next_step + step | |
83 | if end_time: |
|
83 | if end_time: | |
84 | while next_step < end_time: |
|
84 | while next_step < end_time: | |
85 | yield Stat(next_step, None) |
|
85 | yield Stat(next_step, None) | |
86 | next_step = next_step + step |
|
86 | next_step = next_step + step | |
87 |
|
87 | |||
88 |
|
88 | |||
89 | class DateTimeEncoder(json.JSONEncoder): |
|
89 | class DateTimeEncoder(json.JSONEncoder): | |
90 | """ Simple datetime to ISO encoder for json serialization""" |
|
90 | """ Simple datetime to ISO encoder for json serialization""" | |
91 |
|
91 | |||
92 | def default(self, obj): |
|
92 | def default(self, obj): | |
93 | if isinstance(obj, date): |
|
93 | if isinstance(obj, date): | |
94 | return obj.isoformat() |
|
94 | return obj.isoformat() | |
95 | if isinstance(obj, datetime): |
|
95 | if isinstance(obj, datetime): | |
96 | return obj.isoformat() |
|
96 | return obj.isoformat() | |
97 | return json.JSONEncoder.default(self, obj) |
|
97 | return json.JSONEncoder.default(self, obj) | |
98 |
|
98 | |||
99 |
|
99 | |||
100 | def channelstream_request( |
|
100 | def channelstream_request( | |
101 | secret, endpoint, payload, throw_exceptions=False, servers=None |
|
101 | secret, endpoint, payload, throw_exceptions=False, servers=None | |
102 | ): |
|
102 | ): | |
103 | responses = [] |
|
103 | responses = [] | |
104 | if not servers: |
|
104 | if not servers: | |
105 | servers = [] |
|
105 | servers = [] | |
106 |
|
106 | |||
107 | signer = TimestampSigner(secret) |
|
107 | signer = TimestampSigner(secret) | |
108 | sig_for_server = signer.sign(endpoint) |
|
108 | sig_for_server = signer.sign(endpoint) | |
109 | for secret, server in [(s["secret"], s["server"]) for s in servers]: |
|
109 | for secret, server in [(s["secret"], s["server"]) for s in servers]: | |
110 | response = {} |
|
110 | response = {} | |
111 | secret_headers = { |
|
111 | secret_headers = { | |
112 | "x-channelstream-secret": sig_for_server, |
|
112 | "x-channelstream-secret": sig_for_server, | |
113 | "x-channelstream-endpoint": endpoint, |
|
113 | "x-channelstream-endpoint": endpoint, | |
114 | "Content-Type": "application/json", |
|
114 | "Content-Type": "application/json", | |
115 | } |
|
115 | } | |
116 | url = "%s%s" % (server, endpoint) |
|
116 | url = "%s%s" % (server, endpoint) | |
117 | try: |
|
117 | try: | |
118 | response = requests.post( |
|
118 | response = requests.post( | |
119 | url, |
|
119 | url, | |
120 | data=json.dumps(payload, cls=DateTimeEncoder), |
|
120 | data=json.dumps(payload, cls=DateTimeEncoder), | |
121 | headers=secret_headers, |
|
121 | headers=secret_headers, | |
122 | verify=False, |
|
122 | verify=False, | |
123 | timeout=2, |
|
123 | timeout=2, | |
124 | ).json() |
|
124 | ).json() | |
125 | except requests.exceptions.RequestException as e: |
|
125 | except requests.exceptions.RequestException as e: | |
126 | if throw_exceptions: |
|
126 | if throw_exceptions: | |
127 | raise |
|
127 | raise | |
128 | responses.append(response) |
|
128 | responses.append(response) | |
129 | return responses |
|
129 | return responses | |
130 |
|
130 | |||
131 |
|
131 | |||
132 | def add_cors_headers(response): |
|
132 | def add_cors_headers(response): | |
133 | # allow CORS |
|
133 | # allow CORS | |
134 | response.headers.add("Access-Control-Allow-Origin", "*") |
|
134 | response.headers.add("Access-Control-Allow-Origin", "*") | |
135 | response.headers.add("XDomainRequestAllowed", "1") |
|
135 | response.headers.add("XDomainRequestAllowed", "1") | |
136 | response.headers.add("Access-Control-Allow-Methods", "GET, POST, OPTIONS") |
|
136 | response.headers.add("Access-Control-Allow-Methods", "GET, POST, OPTIONS") | |
137 | # response.headers.add('Access-Control-Allow-Credentials', 'true') |
|
137 | # response.headers.add('Access-Control-Allow-Credentials', 'true') | |
138 | response.headers.add( |
|
138 | response.headers.add( | |
139 | "Access-Control-Allow-Headers", |
|
139 | "Access-Control-Allow-Headers", | |
140 | "Content-Type, Depth, User-Agent, X-File-Size, X-Requested-With, If-Modified-Since, X-File-Name, Cache-Control, Pragma, Origin, Connection, Referer, Cookie", |
|
140 | "Content-Type, Depth, User-Agent, X-File-Size, X-Requested-With, If-Modified-Since, X-File-Name, Cache-Control, Pragma, Origin, Connection, Referer, Cookie", | |
141 | ) |
|
141 | ) | |
142 | response.headers.add("Access-Control-Max-Age", "86400") |
|
142 | response.headers.add("Access-Control-Max-Age", "86400") | |
143 |
|
143 | |||
144 |
|
144 | |||
145 | from sqlalchemy.sql import compiler |
|
145 | from sqlalchemy.sql import compiler | |
146 | from psycopg2.extensions import adapt as sqlescape |
|
146 | from psycopg2.extensions import adapt as sqlescape | |
147 |
|
147 | |||
148 |
|
148 | |||
149 | # or use the appropiate escape function from your db driver |
|
149 | # or use the appropiate escape function from your db driver | |
150 |
|
150 | |||
151 |
|
151 | |||
152 | def compile_query(query): |
|
152 | def compile_query(query): | |
153 | dialect = query.session.bind.dialect |
|
153 | dialect = query.session.bind.dialect | |
154 | statement = query.statement |
|
154 | statement = query.statement | |
155 | comp = compiler.SQLCompiler(dialect, statement) |
|
155 | comp = compiler.SQLCompiler(dialect, statement) | |
156 | comp.compile() |
|
156 | comp.compile() | |
157 | enc = dialect.encoding |
|
157 | enc = dialect.encoding | |
158 | params = {} |
|
158 | params = {} | |
159 | for k, v in comp.params.items(): |
|
159 | for k, v in comp.params.items(): | |
160 | if isinstance(v, str): |
|
160 | if isinstance(v, str): | |
161 | v = v.encode(enc) |
|
161 | v = v.encode(enc) | |
162 | params[k] = sqlescape(v) |
|
162 | params[k] = sqlescape(v) | |
163 | return (comp.string.encode(enc) % params).decode(enc) |
|
163 | return (comp.string.encode(enc) % params).decode(enc) | |
164 |
|
164 | |||
165 |
|
165 | |||
166 | def convert_es_type(input_data): |
|
166 | def convert_es_type(input_data): | |
167 | """ |
|
167 | """ | |
168 | This might need to convert some text or other types to corresponding ES types |
|
168 | This might need to convert some text or other types to corresponding ES types | |
169 | """ |
|
169 | """ | |
170 | return str(input_data) |
|
170 | return str(input_data) | |
171 |
|
171 | |||
172 |
|
172 | |||
173 | ProtoVersion = namedtuple("ProtoVersion", ["major", "minor", "patch"]) |
|
173 | ProtoVersion = namedtuple("ProtoVersion", ["major", "minor", "patch"]) | |
174 |
|
174 | |||
175 |
|
175 | |||
176 | def parse_proto(input_data): |
|
176 | def parse_proto(input_data): | |
177 | try: |
|
177 | try: | |
178 | parts = [int(x) for x in input_data.split(".")] |
|
178 | parts = [int(x) for x in input_data.split(".")] | |
179 | while len(parts) < 3: |
|
179 | while len(parts) < 3: | |
180 | parts.append(0) |
|
180 | parts.append(0) | |
181 | return ProtoVersion(*parts) |
|
181 | return ProtoVersion(*parts) | |
182 | except Exception as e: |
|
182 | except Exception as e: | |
183 | log.info("Unknown protocol version: %s" % e) |
|
183 | log.info("Unknown protocol version: %s" % e) | |
184 | return ProtoVersion(99, 99, 99) |
|
184 | return ProtoVersion(99, 99, 99) | |
185 |
|
185 | |||
186 |
|
186 | |||
187 | def es_index_name_limiter( |
|
187 | def es_index_name_limiter( | |
188 | start_date=None, end_date=None, months_in_past=6, ixtypes=None |
|
188 | start_date=None, end_date=None, months_in_past=6, ixtypes=None | |
189 | ): |
|
189 | ): | |
190 | """ |
|
190 | """ | |
191 | This function limits the search to 6 months by default so we don't have to |
|
191 | This function limits the search to 6 months by default so we don't have to | |
192 | query 300 elasticsearch indices for 20 years of historical data for example |
|
192 | query 300 elasticsearch indices for 20 years of historical data for example | |
193 | """ |
|
193 | """ | |
194 |
|
194 | |||
195 | # should be cached later |
|
195 | # should be cached later | |
196 | def get_possible_names(): |
|
196 | def get_possible_names(): | |
197 | return list(Datastores.es.indices.get_alias("*")) |
|
197 | return list(Datastores.es.indices.get_alias("*")) | |
198 |
|
198 | |||
199 | possible_names = get_possible_names() |
|
199 | possible_names = get_possible_names() | |
200 | es_index_types = [] |
|
200 | es_index_types = [] | |
201 | if not ixtypes: |
|
201 | if not ixtypes: | |
202 | ixtypes = ["reports", "metrics", "logs"] |
|
202 | ixtypes = ["reports", "metrics", "logs"] | |
203 | for t in ixtypes: |
|
203 | for t in ixtypes: | |
204 | if t == "reports": |
|
204 | if t == "reports": | |
205 | es_index_types.append("rcae_r_%s") |
|
205 | es_index_types.append("rcae_r_%s") | |
206 | elif t == "logs": |
|
206 | elif t == "logs": | |
207 | es_index_types.append("rcae_l_%s") |
|
207 | es_index_types.append("rcae_l_%s") | |
208 | elif t == "metrics": |
|
208 | elif t == "metrics": | |
209 | es_index_types.append("rcae_m_%s") |
|
209 | es_index_types.append("rcae_m_%s") | |
210 | elif t == "uptime": |
|
210 | elif t == "uptime": | |
211 | es_index_types.append("rcae_uptime_ce_%s") |
|
211 | es_index_types.append("rcae_uptime_ce_%s") | |
212 | elif t == "slow_calls": |
|
212 | elif t == "slow_calls": | |
213 | es_index_types.append("rcae_sc_%s") |
|
213 | es_index_types.append("rcae_sc_%s") | |
214 |
|
214 | |||
215 | if start_date: |
|
215 | if start_date: | |
216 | start_date = copy.copy(start_date) |
|
216 | start_date = copy.copy(start_date) | |
217 | else: |
|
217 | else: | |
218 | if not end_date: |
|
218 | if not end_date: | |
219 | end_date = datetime.utcnow() |
|
219 | end_date = datetime.utcnow() | |
220 | start_date = end_date + relativedelta(months=months_in_past * -1) |
|
220 | start_date = end_date + relativedelta(months=months_in_past * -1) | |
221 |
|
221 | |||
222 | if not end_date: |
|
222 | if not end_date: | |
223 | end_date = start_date + relativedelta(months=months_in_past) |
|
223 | end_date = start_date + relativedelta(months=months_in_past) | |
224 |
|
224 | |||
225 | index_dates = list( |
|
225 | index_dates = list( | |
226 | rrule( |
|
226 | rrule( | |
227 | MONTHLY, |
|
227 | MONTHLY, | |
228 | dtstart=start_date.date().replace(day=1), |
|
228 | dtstart=start_date.date().replace(day=1), | |
229 | until=end_date.date(), |
|
229 | until=end_date.date(), | |
230 | count=36, |
|
230 | count=36, | |
231 | ) |
|
231 | ) | |
232 | ) |
|
232 | ) | |
233 | index_names = [] |
|
233 | index_names = [] | |
234 | for ix_type in es_index_types: |
|
234 | for ix_type in es_index_types: | |
235 | to_extend = [ |
|
235 | to_extend = [ | |
236 | ix_type % d.strftime("%Y_%m") |
|
236 | ix_type % d.strftime("%Y_%m") | |
237 | for d in index_dates |
|
237 | for d in index_dates | |
238 | if ix_type % d.strftime("%Y_%m") in possible_names |
|
238 | if ix_type % d.strftime("%Y_%m") in possible_names | |
239 | ] |
|
239 | ] | |
240 | index_names.extend(to_extend) |
|
240 | index_names.extend(to_extend) | |
241 | for day in list( |
|
241 | for day in list( | |
242 | rrule(DAILY, dtstart=start_date.date(), until=end_date.date(), count=366) |
|
242 | rrule(DAILY, dtstart=start_date.date(), until=end_date.date(), count=366) | |
243 | ): |
|
243 | ): | |
244 | ix_name = ix_type % day.strftime("%Y_%m_%d") |
|
244 | ix_name = ix_type % day.strftime("%Y_%m_%d") | |
245 | if ix_name in possible_names: |
|
245 | if ix_name in possible_names: | |
246 | index_names.append(ix_name) |
|
246 | index_names.append(ix_name) | |
247 | return index_names |
|
247 | return index_names | |
248 |
|
248 | |||
249 |
|
249 | |||
250 | def build_filter_settings_from_query_dict( |
|
250 | def build_filter_settings_from_query_dict( | |
251 | request, params=None, override_app_ids=None, resource_permissions=None |
|
251 | request, params=None, override_app_ids=None, resource_permissions=None | |
252 | ): |
|
252 | ): | |
253 | """ |
|
253 | """ | |
254 | Builds list of normalized search terms for ES from query params |
|
254 | Builds list of normalized search terms for ES from query params | |
255 | ensuring application list is restricted to only applications user |
|
255 | ensuring application list is restricted to only applications user | |
256 | has access to |
|
256 | has access to | |
257 |
|
257 | |||
258 | :param params (dictionary) |
|
258 | :param params (dictionary) | |
259 | :param override_app_ids - list of application id's to use instead of |
|
259 | :param override_app_ids - list of application id's to use instead of | |
260 | applications user normally has access to |
|
260 | applications user normally has access to | |
261 | """ |
|
261 | """ | |
262 | params = copy.deepcopy(params) |
|
262 | params = copy.deepcopy(params) | |
263 | applications = [] |
|
263 | applications = [] | |
264 | if not resource_permissions: |
|
264 | if not resource_permissions: | |
265 | resource_permissions = ["view"] |
|
265 | resource_permissions = ["view"] | |
266 |
|
266 | |||
267 | if request.user: |
|
267 | if request.user: | |
268 | applications = UserService.resources_with_perms( |
|
268 | applications = UserService.resources_with_perms( | |
269 | request.user, resource_permissions, resource_types=["application"] |
|
269 | request.user, resource_permissions, resource_types=["application"] | |
270 | ) |
|
270 | ) | |
271 |
|
271 | |||
272 | # CRITICAL - this ensures our resultset is limited to only the ones |
|
272 | # CRITICAL - this ensures our resultset is limited to only the ones | |
273 | # user has view permissions |
|
273 | # user has view permissions | |
274 | all_possible_app_ids = set([app.resource_id for app in applications]) |
|
274 | all_possible_app_ids = set([app.resource_id for app in applications]) | |
275 |
|
275 | |||
276 | # if override is preset we force permission for app to be present |
|
276 | # if override is preset we force permission for app to be present | |
277 | # this allows users to see dashboards and applications they would |
|
277 | # this allows users to see dashboards and applications they would | |
278 | # normally not be able to |
|
278 | # normally not be able to | |
279 |
|
279 | |||
280 | if override_app_ids: |
|
280 | if override_app_ids: | |
281 | all_possible_app_ids = set(override_app_ids) |
|
281 | all_possible_app_ids = set(override_app_ids) | |
282 |
|
282 | |||
283 | schema = LogSearchSchema().bind(resources=all_possible_app_ids) |
|
283 | schema = LogSearchSchema().bind(resources=all_possible_app_ids) | |
284 | tag_schema = TagListSchema() |
|
284 | tag_schema = TagListSchema() | |
285 | filter_settings = schema.deserialize(params) |
|
285 | filter_settings = schema.deserialize(params) | |
286 | tag_list = [] |
|
286 | tag_list = [] | |
287 | for k, v in list(filter_settings.items()): |
|
287 | for k, v in list(filter_settings.items()): | |
288 | if k in accepted_search_params: |
|
288 | if k in accepted_search_params: | |
289 | continue |
|
289 | continue | |
290 | tag_list.append({"name": k, "value": v, "op": "eq"}) |
|
290 | tag_list.append({"name": k, "value": v, "op": "eq"}) | |
291 | # remove the key from filter_settings |
|
291 | # remove the key from filter_settings | |
292 | filter_settings.pop(k, None) |
|
292 | filter_settings.pop(k, None) | |
293 | tags = tag_schema.deserialize(tag_list) |
|
293 | tags = tag_schema.deserialize(tag_list) | |
294 | filter_settings["tags"] = tags |
|
294 | filter_settings["tags"] = tags | |
295 | return filter_settings |
|
295 | return filter_settings | |
296 |
|
296 | |||
297 |
|
297 | |||
298 | def gen_uuid(): |
|
298 | def gen_uuid(): | |
299 | return str(uuid.uuid4()) |
|
299 | return str(uuid.uuid4()) | |
300 |
|
300 | |||
301 |
|
301 | |||
302 | def gen_uuid4_sha_hex(): |
|
302 | def gen_uuid4_sha_hex(): | |
303 | return hashlib.sha1(uuid.uuid4().bytes).hexdigest() |
|
303 | return hashlib.sha1(uuid.uuid4().bytes).hexdigest() | |
304 |
|
304 | |||
305 |
|
305 | |||
306 | def permission_tuple_to_dict(data): |
|
306 | def permission_tuple_to_dict(data): | |
307 | out = { |
|
307 | out = { | |
308 | "user_name": None, |
|
308 | "user_name": None, | |
309 | "perm_name": data.perm_name, |
|
309 | "perm_name": data.perm_name, | |
310 | "owner": data.owner, |
|
310 | "owner": data.owner, | |
311 | "type": data.type, |
|
311 | "type": data.type, | |
312 | "resource_name": None, |
|
312 | "resource_name": None, | |
313 | "resource_type": None, |
|
313 | "resource_type": None, | |
314 | "resource_id": None, |
|
314 | "resource_id": None, | |
315 | "group_name": None, |
|
315 | "group_name": None, | |
316 | "group_id": None, |
|
316 | "group_id": None, | |
317 | } |
|
317 | } | |
318 | if data.user: |
|
318 | if data.user: | |
319 | out["user_name"] = data.user.user_name |
|
319 | out["user_name"] = data.user.user_name | |
320 | if data.perm_name == ALL_PERMISSIONS: |
|
320 | if data.perm_name == ALL_PERMISSIONS: | |
321 | out["perm_name"] = "__all_permissions__" |
|
321 | out["perm_name"] = "__all_permissions__" | |
322 | if data.resource: |
|
322 | if data.resource: | |
323 | out["resource_name"] = data.resource.resource_name |
|
323 | out["resource_name"] = data.resource.resource_name | |
324 | out["resource_type"] = data.resource.resource_type |
|
324 | out["resource_type"] = data.resource.resource_type | |
325 | out["resource_id"] = data.resource.resource_id |
|
325 | out["resource_id"] = data.resource.resource_id | |
326 | if data.group: |
|
326 | if data.group: | |
327 | out["group_name"] = data.group.group_name |
|
327 | out["group_name"] = data.group.group_name | |
328 | out["group_id"] = data.group.id |
|
328 | out["group_id"] = data.group.id | |
329 | return out |
|
329 | return out | |
330 |
|
330 | |||
331 |
|
331 | |||
332 | def get_cached_buckets( |
|
332 | def get_cached_buckets( | |
333 | request, |
|
333 | request, | |
334 | stats_since, |
|
334 | stats_since, | |
335 | end_time, |
|
335 | end_time, | |
336 | fn, |
|
336 | fn, | |
337 | cache_key, |
|
337 | cache_key, | |
338 | gap_gen=None, |
|
338 | gap_gen=None, | |
339 | db_session=None, |
|
339 | db_session=None, | |
340 | step_interval=None, |
|
340 | step_interval=None, | |
341 | iv_extractor=None, |
|
341 | iv_extractor=None, | |
342 | rerange=False, |
|
342 | rerange=False, | |
343 | *args, |
|
343 | *args, | |
344 | **kwargs |
|
344 | **kwargs | |
345 | ): |
|
345 | ): | |
346 | """ Takes "fn" that should return some data and tries to load the data |
|
346 | """ Takes "fn" that should return some data and tries to load the data | |
347 | dividing it into daily buckets - if the stats_since and end time give a |
|
347 | dividing it into daily buckets - if the stats_since and end time give a | |
348 | delta bigger than 24hours, then only "todays" data is computed on the fly |
|
348 | delta bigger than 24hours, then only "todays" data is computed on the fly | |
349 |
|
349 | |||
350 | :param request: (request) request object |
|
350 | :param request: (request) request object | |
351 | :param stats_since: (datetime) start date of buckets range |
|
351 | :param stats_since: (datetime) start date of buckets range | |
352 | :param end_time: (datetime) end date of buckets range - utcnow() if None |
|
352 | :param end_time: (datetime) end date of buckets range - utcnow() if None | |
353 | :param fn: (callable) callable to use to populate buckets should have |
|
353 | :param fn: (callable) callable to use to populate buckets should have | |
354 | following signature: |
|
354 | following signature: | |
355 | def get_data(request, since_when, until, *args, **kwargs): |
|
355 | def get_data(request, since_when, until, *args, **kwargs): | |
356 |
|
356 | |||
357 | :param cache_key: (string) cache key that will be used to build bucket |
|
357 | :param cache_key: (string) cache key that will be used to build bucket | |
358 | caches |
|
358 | caches | |
359 | :param gap_gen: (callable) gap generator - should return step intervals |
|
359 | :param gap_gen: (callable) gap generator - should return step intervals | |
360 | to use with out `fn` callable |
|
360 | to use with out `fn` callable | |
361 | :param db_session: (Session) sqlalchemy session |
|
361 | :param db_session: (Session) sqlalchemy session | |
362 | :param step_interval: (timedelta) optional step interval if we want to |
|
362 | :param step_interval: (timedelta) optional step interval if we want to | |
363 | override the default determined from total start/end time delta |
|
363 | override the default determined from total start/end time delta | |
364 | :param iv_extractor: (callable) used to get step intervals from data |
|
364 | :param iv_extractor: (callable) used to get step intervals from data | |
365 | returned by `fn` callable |
|
365 | returned by `fn` callable | |
366 | :param rerange: (bool) handy if we want to change ranges from hours to |
|
366 | :param rerange: (bool) handy if we want to change ranges from hours to | |
367 | days when cached data is missing - will shorten execution time if `fn` |
|
367 | days when cached data is missing - will shorten execution time if `fn` | |
368 | callable supports that and we are working with multiple rows - like metrics |
|
368 | callable supports that and we are working with multiple rows - like metrics | |
369 | :param args: |
|
369 | :param args: | |
370 | :param kwargs: |
|
370 | :param kwargs: | |
371 |
|
371 | |||
372 | :return: iterable |
|
372 | :return: iterable | |
373 | """ |
|
373 | """ | |
374 | if not end_time: |
|
374 | if not end_time: | |
375 | end_time = datetime.utcnow().replace(second=0, microsecond=0) |
|
375 | end_time = datetime.utcnow().replace(second=0, microsecond=0) | |
376 | delta = end_time - stats_since |
|
376 | delta = end_time - stats_since | |
377 | # if smaller than 3 days we want to group by 5min else by 1h, |
|
377 | # if smaller than 3 days we want to group by 5min else by 1h, | |
378 | # for 60 min group by min |
|
378 | # for 60 min group by min | |
379 | if not gap_gen: |
|
379 | if not gap_gen: | |
380 | gap_gen = gap_gen_default |
|
380 | gap_gen = gap_gen_default | |
381 | if not iv_extractor: |
|
381 | if not iv_extractor: | |
382 | iv_extractor = default_extractor |
|
382 | iv_extractor = default_extractor | |
383 |
|
383 | |||
384 | # do not use custom interval if total time range with new iv would exceed |
|
384 | # do not use custom interval if total time range with new iv would exceed | |
385 | # end time |
|
385 | # end time | |
386 | if not step_interval or stats_since + step_interval >= end_time: |
|
386 | if not step_interval or stats_since + step_interval >= end_time: | |
387 | if delta < h.time_deltas.get("12h")["delta"]: |
|
387 | if delta < h.time_deltas.get("12h")["delta"]: | |
388 | step_interval = timedelta(seconds=60) |
|
388 | step_interval = timedelta(seconds=60) | |
389 | elif delta < h.time_deltas.get("3d")["delta"]: |
|
389 | elif delta < h.time_deltas.get("3d")["delta"]: | |
390 | step_interval = timedelta(seconds=60 * 5) |
|
390 | step_interval = timedelta(seconds=60 * 5) | |
391 | elif delta > h.time_deltas.get("2w")["delta"]: |
|
391 | elif delta > h.time_deltas.get("2w")["delta"]: | |
392 | step_interval = timedelta(days=1) |
|
392 | step_interval = timedelta(days=1) | |
393 | else: |
|
393 | else: | |
394 | step_interval = timedelta(minutes=60) |
|
394 | step_interval = timedelta(minutes=60) | |
395 |
|
395 | |||
396 | if step_interval >= timedelta(minutes=60): |
|
396 | if step_interval >= timedelta(minutes=60): | |
397 | log.info( |
|
397 | log.info( | |
398 | "cached_buckets:{}: adjusting start time " |
|
398 | "cached_buckets:{}: adjusting start time " | |
399 | "for hourly or daily intervals".format(cache_key) |
|
399 | "for hourly or daily intervals".format(cache_key) | |
400 | ) |
|
400 | ) | |
401 | stats_since = stats_since.replace(hour=0, minute=0) |
|
401 | stats_since = stats_since.replace(hour=0, minute=0) | |
402 |
|
402 | |||
403 | ranges = [ |
|
403 | ranges = [ | |
404 | i.start_interval |
|
404 | i.start_interval | |
405 | for i in list(gap_gen(stats_since, step_interval, [], end_time=end_time)) |
|
405 | for i in list(gap_gen(stats_since, step_interval, [], end_time=end_time)) | |
406 | ] |
|
406 | ] | |
407 | buckets = {} |
|
407 | buckets = {} | |
408 | storage_key = "buckets:" + cache_key + "{}|{}" |
|
408 | storage_key = "buckets:" + cache_key + "{}|{}" | |
409 | # this means we basicly cache per hour in 3-14 day intervals but i think |
|
409 | # this means we basicly cache per hour in 3-14 day intervals but i think | |
410 | # its fine at this point - will be faster than db access anyways |
|
410 | # its fine at this point - will be faster than db access anyways | |
411 |
|
411 | |||
412 | if len(ranges) >= 1: |
|
412 | if len(ranges) >= 1: | |
413 | last_ranges = [ranges[-1]] |
|
413 | last_ranges = [ranges[-1]] | |
414 | else: |
|
414 | else: | |
415 | last_ranges = [] |
|
415 | last_ranges = [] | |
416 | if step_interval >= timedelta(minutes=60): |
|
416 | if step_interval >= timedelta(minutes=60): | |
417 | for r in ranges: |
|
417 | for r in ranges: | |
418 | k = storage_key.format(step_interval.total_seconds(), r) |
|
418 | k = storage_key.format(step_interval.total_seconds(), r) | |
419 | value = request.registry.cache_regions.redis_day_30.get(k) |
|
419 | value = request.registry.cache_regions.redis_day_30.get(k) | |
420 | # last buckets are never loaded from cache |
|
420 | # last buckets are never loaded from cache | |
421 | is_last_result = r >= end_time - timedelta(hours=6) or r in last_ranges |
|
421 | is_last_result = r >= end_time - timedelta(hours=6) or r in last_ranges | |
422 | if value is not NO_VALUE and not is_last_result: |
|
422 | if value is not NO_VALUE and not is_last_result: | |
423 | log.info( |
|
423 | log.info( | |
424 | "cached_buckets:{}: " |
|
424 | "cached_buckets:{}: " | |
425 | "loading range {} from cache".format(cache_key, r) |
|
425 | "loading range {} from cache".format(cache_key, r) | |
426 | ) |
|
426 | ) | |
427 | buckets[r] = value |
|
427 | buckets[r] = value | |
428 | else: |
|
428 | else: | |
429 | log.info( |
|
429 | log.info( | |
430 | "cached_buckets:{}: " |
|
430 | "cached_buckets:{}: " | |
431 | "loading range {} from storage".format(cache_key, r) |
|
431 | "loading range {} from storage".format(cache_key, r) | |
432 | ) |
|
432 | ) | |
433 | range_size = step_interval |
|
433 | range_size = step_interval | |
434 | if ( |
|
434 | if ( | |
435 | step_interval == timedelta(minutes=60) |
|
435 | step_interval == timedelta(minutes=60) | |
436 | and not is_last_result |
|
436 | and not is_last_result | |
437 | and rerange |
|
437 | and rerange | |
438 | ): |
|
438 | ): | |
439 | range_size = timedelta(days=1) |
|
439 | range_size = timedelta(days=1) | |
440 | r = r.replace(hour=0, minute=0) |
|
440 | r = r.replace(hour=0, minute=0) | |
441 | log.info( |
|
441 | log.info( | |
442 | "cached_buckets:{}: " |
|
442 | "cached_buckets:{}: " | |
443 | "loading collapsed " |
|
443 | "loading collapsed " | |
444 | "range {} {}".format(cache_key, r, r + range_size) |
|
444 | "range {} {}".format(cache_key, r, r + range_size) | |
445 | ) |
|
445 | ) | |
446 | bucket_data = fn( |
|
446 | bucket_data = fn( | |
447 | request, |
|
447 | request, | |
448 | r, |
|
448 | r, | |
449 | r + range_size, |
|
449 | r + range_size, | |
450 | step_interval, |
|
450 | step_interval, | |
451 | gap_gen, |
|
451 | gap_gen, | |
452 | bucket_count=len(ranges), |
|
452 | bucket_count=len(ranges), | |
453 | *args, |
|
453 | *args, | |
454 | **kwargs |
|
454 | **kwargs | |
455 | ) |
|
455 | ) | |
456 | for b in bucket_data: |
|
456 | for b in bucket_data: | |
457 | b_iv = iv_extractor(b) |
|
457 | b_iv = iv_extractor(b) | |
458 | buckets[b_iv] = b |
|
458 | buckets[b_iv] = b | |
459 | k2 = storage_key.format(step_interval.total_seconds(), b_iv) |
|
459 | k2 = storage_key.format(step_interval.total_seconds(), b_iv) | |
460 | request.registry.cache_regions.redis_day_30.set(k2, b) |
|
460 | request.registry.cache_regions.redis_day_30.set(k2, b) | |
461 | log.info("cached_buckets:{}: saving cache".format(cache_key)) |
|
461 | log.info("cached_buckets:{}: saving cache".format(cache_key)) | |
462 | else: |
|
462 | else: | |
463 | # bucket count is 1 for short time ranges <= 24h from now |
|
463 | # bucket count is 1 for short time ranges <= 24h from now | |
464 | bucket_data = fn( |
|
464 | bucket_data = fn( | |
465 | request, |
|
465 | request, | |
466 | stats_since, |
|
466 | stats_since, | |
467 | end_time, |
|
467 | end_time, | |
468 | step_interval, |
|
468 | step_interval, | |
469 | gap_gen, |
|
469 | gap_gen, | |
470 | bucket_count=1, |
|
470 | bucket_count=1, | |
471 | *args, |
|
471 | *args, | |
472 | **kwargs |
|
472 | **kwargs | |
473 | ) |
|
473 | ) | |
474 | for b in bucket_data: |
|
474 | for b in bucket_data: | |
475 | buckets[iv_extractor(b)] = b |
|
475 | buckets[iv_extractor(b)] = b | |
476 | return buckets |
|
476 | return buckets | |
477 |
|
477 | |||
478 |
|
478 | |||
479 | def get_cached_split_data( |
|
479 | def get_cached_split_data( | |
480 | request, stats_since, end_time, fn, cache_key, db_session=None, *args, **kwargs |
|
480 | request, stats_since, end_time, fn, cache_key, db_session=None, *args, **kwargs | |
481 | ): |
|
481 | ): | |
482 | """ Takes "fn" that should return some data and tries to load the data |
|
482 | """ Takes "fn" that should return some data and tries to load the data | |
483 | dividing it into 2 buckets - cached "since_from" bucket and "today" |
|
483 | dividing it into 2 buckets - cached "since_from" bucket and "today" | |
484 | bucket - then the data can be reduced into single value |
|
484 | bucket - then the data can be reduced into single value | |
485 |
|
485 | |||
486 | Data is cached if the stats_since and end time give a delta bigger |
|
486 | Data is cached if the stats_since and end time give a delta bigger | |
487 | than 24hours - then only 24h is computed on the fly |
|
487 | than 24hours - then only 24h is computed on the fly | |
488 | """ |
|
488 | """ | |
489 | if not end_time: |
|
489 | if not end_time: | |
490 | end_time = datetime.utcnow().replace(second=0, microsecond=0) |
|
490 | end_time = datetime.utcnow().replace(second=0, microsecond=0) | |
491 | delta = end_time - stats_since |
|
491 | delta = end_time - stats_since | |
492 |
|
492 | |||
493 | if delta >= timedelta(minutes=60): |
|
493 | if delta >= timedelta(minutes=60): | |
494 | log.info( |
|
494 | log.info( | |
495 | "cached_split_data:{}: adjusting start time " |
|
495 | "cached_split_data:{}: adjusting start time " | |
496 | "for hourly or daily intervals".format(cache_key) |
|
496 | "for hourly or daily intervals".format(cache_key) | |
497 | ) |
|
497 | ) | |
498 | stats_since = stats_since.replace(hour=0, minute=0) |
|
498 | stats_since = stats_since.replace(hour=0, minute=0) | |
499 |
|
499 | |||
500 | storage_key = "buckets_split_data:" + cache_key + ":{}|{}" |
|
500 | storage_key = "buckets_split_data:" + cache_key + ":{}|{}" | |
501 | old_end_time = end_time.replace(hour=0, minute=0) |
|
501 | old_end_time = end_time.replace(hour=0, minute=0) | |
502 |
|
502 | |||
503 | final_storage_key = storage_key.format(delta.total_seconds(), old_end_time) |
|
503 | final_storage_key = storage_key.format(delta.total_seconds(), old_end_time) | |
504 | older_data = None |
|
504 | older_data = None | |
505 |
|
505 | |||
506 | cdata = request.registry.cache_regions.redis_day_7.get(final_storage_key) |
|
506 | cdata = request.registry.cache_regions.redis_day_7.get(final_storage_key) | |
507 |
|
507 | |||
508 | if cdata: |
|
508 | if cdata: | |
509 | log.info("cached_split_data:{}: found old " "bucket data".format(cache_key)) |
|
509 | log.info("cached_split_data:{}: found old " "bucket data".format(cache_key)) | |
510 | older_data = cdata |
|
510 | older_data = cdata | |
511 |
|
511 | |||
512 | if stats_since < end_time - h.time_deltas.get("24h")["delta"] and not cdata: |
|
512 | if stats_since < end_time - h.time_deltas.get("24h")["delta"] and not cdata: | |
513 | log.info( |
|
513 | log.info( | |
514 | "cached_split_data:{}: didn't find the " |
|
514 | "cached_split_data:{}: didn't find the " | |
515 | "start bucket in cache so load older data".format(cache_key) |
|
515 | "start bucket in cache so load older data".format(cache_key) | |
516 | ) |
|
516 | ) | |
517 | recent_stats_since = old_end_time |
|
517 | recent_stats_since = old_end_time | |
518 | older_data = fn( |
|
518 | older_data = fn( | |
519 | request, |
|
519 | request, | |
520 | stats_since, |
|
520 | stats_since, | |
521 | recent_stats_since, |
|
521 | recent_stats_since, | |
522 | db_session=db_session, |
|
522 | db_session=db_session, | |
523 | *args, |
|
523 | *args, | |
524 | **kwargs |
|
524 | **kwargs | |
525 | ) |
|
525 | ) | |
526 | request.registry.cache_regions.redis_day_7.set(final_storage_key, older_data) |
|
526 | request.registry.cache_regions.redis_day_7.set(final_storage_key, older_data) | |
527 | elif stats_since < end_time - h.time_deltas.get("24h")["delta"]: |
|
527 | elif stats_since < end_time - h.time_deltas.get("24h")["delta"]: | |
528 | recent_stats_since = old_end_time |
|
528 | recent_stats_since = old_end_time | |
529 | else: |
|
529 | else: | |
530 | recent_stats_since = stats_since |
|
530 | recent_stats_since = stats_since | |
531 |
|
531 | |||
532 | log.info( |
|
532 | log.info( | |
533 | "cached_split_data:{}: loading fresh " |
|
533 | "cached_split_data:{}: loading fresh " | |
534 | "data bucksts from last 24h ".format(cache_key) |
|
534 | "data bucksts from last 24h ".format(cache_key) | |
535 | ) |
|
535 | ) | |
536 | todays_data = fn( |
|
536 | todays_data = fn( | |
537 | request, recent_stats_since, end_time, db_session=db_session, *args, **kwargs |
|
537 | request, recent_stats_since, end_time, db_session=db_session, *args, **kwargs | |
538 | ) |
|
538 | ) | |
539 | return older_data, todays_data |
|
539 | return older_data, todays_data | |
540 |
|
540 | |||
541 |
|
541 | |||
542 | def in_batches(seq, size): |
|
542 | def in_batches(seq, size): | |
543 | """ |
|
543 | """ | |
544 | Splits am iterable into batches of specified size |
|
544 | Splits am iterable into batches of specified size | |
545 | :param seq (iterable) |
|
545 | :param seq (iterable) | |
546 | :param size integer |
|
546 | :param size integer | |
547 | """ |
|
547 | """ | |
548 | return (seq[pos : pos + size] for pos in range(0, len(seq), size)) |
|
548 | return (seq[pos : pos + size] for pos in range(0, len(seq), size)) | |
549 |
|
549 | |||
550 |
|
550 | |||
551 | def get_es_info(cache_regions, es_conn): |
|
551 | def get_es_info(cache_regions, es_conn): | |
552 | @cache_regions.memory_min_10.cache_on_arguments() |
|
552 | @cache_regions.memory_min_10.cache_on_arguments() | |
553 | def get_es_info_cached(): |
|
553 | def get_es_info_cached(): | |
554 | returned_info = {"raw_info": es_conn.info()} |
|
554 | returned_info = {"raw_info": es_conn.info()} | |
555 |
returned_info["version"] = returned_info["raw_info"]["version"]["number"].split( |
|
555 | returned_info["version"] = returned_info["raw_info"]["version"]["number"].split( | |
|
556 | "." | |||
|
557 | ) | |||
556 | return returned_info |
|
558 | return returned_info | |
557 |
|
559 | |||
558 | return get_es_info_cached() |
|
560 | return get_es_info_cached() |
@@ -1,534 +1,534 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 |
|
2 | |||
3 | # Copyright 2010 - 2017 RhodeCode GmbH and the AppEnlight project authors |
|
3 | # Copyright 2010 - 2017 RhodeCode GmbH and the AppEnlight project authors | |
4 | # |
|
4 | # | |
5 | # Licensed under the Apache License, Version 2.0 (the "License"); |
|
5 | # Licensed under the Apache License, Version 2.0 (the "License"); | |
6 | # you may not use this file except in compliance with the License. |
|
6 | # you may not use this file except in compliance with the License. | |
7 | # You may obtain a copy of the License at |
|
7 | # You may obtain a copy of the License at | |
8 | # |
|
8 | # | |
9 | # http://www.apache.org/licenses/LICENSE-2.0 |
|
9 | # http://www.apache.org/licenses/LICENSE-2.0 | |
10 | # |
|
10 | # | |
11 | # Unless required by applicable law or agreed to in writing, software |
|
11 | # Unless required by applicable law or agreed to in writing, software | |
12 | # distributed under the License is distributed on an "AS IS" BASIS, |
|
12 | # distributed under the License is distributed on an "AS IS" BASIS, | |
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
14 | # See the License for the specific language governing permissions and |
|
14 | # See the License for the specific language governing permissions and | |
15 | # limitations under the License. |
|
15 | # limitations under the License. | |
16 |
|
16 | |||
17 | from datetime import datetime, timedelta |
|
17 | from datetime import datetime, timedelta | |
18 | import math |
|
18 | import math | |
19 | import uuid |
|
19 | import uuid | |
20 | import hashlib |
|
20 | import hashlib | |
21 | import copy |
|
21 | import copy | |
22 | import urllib.parse |
|
22 | import urllib.parse | |
23 | import logging |
|
23 | import logging | |
24 | import sqlalchemy as sa |
|
24 | import sqlalchemy as sa | |
25 |
|
25 | |||
26 | from appenlight.models import Base, Datastores |
|
26 | from appenlight.models import Base, Datastores | |
27 | from appenlight.lib.utils.date_utils import convert_date |
|
27 | from appenlight.lib.utils.date_utils import convert_date | |
28 | from appenlight.lib.utils import convert_es_type |
|
28 | from appenlight.lib.utils import convert_es_type | |
29 | from appenlight.models.slow_call import SlowCall |
|
29 | from appenlight.models.slow_call import SlowCall | |
30 | from appenlight.lib.utils import channelstream_request |
|
30 | from appenlight.lib.utils import channelstream_request | |
31 | from appenlight.lib.enums import ReportType, Language |
|
31 | from appenlight.lib.enums import ReportType, Language | |
32 | from pyramid.threadlocal import get_current_registry, get_current_request |
|
32 | from pyramid.threadlocal import get_current_registry, get_current_request | |
33 | from sqlalchemy.dialects.postgresql import JSON |
|
33 | from sqlalchemy.dialects.postgresql import JSON | |
34 | from ziggurat_foundations.models.base import BaseModel |
|
34 | from ziggurat_foundations.models.base import BaseModel | |
35 |
|
35 | |||
36 | log = logging.getLogger(__name__) |
|
36 | log = logging.getLogger(__name__) | |
37 |
|
37 | |||
38 | REPORT_TYPE_MATRIX = { |
|
38 | REPORT_TYPE_MATRIX = { | |
39 | "http_status": {"type": "int", "ops": ("eq", "ne", "ge", "le")}, |
|
39 | "http_status": {"type": "int", "ops": ("eq", "ne", "ge", "le")}, | |
40 | "group:priority": {"type": "int", "ops": ("eq", "ne", "ge", "le")}, |
|
40 | "group:priority": {"type": "int", "ops": ("eq", "ne", "ge", "le")}, | |
41 | "duration": {"type": "float", "ops": ("ge", "le")}, |
|
41 | "duration": {"type": "float", "ops": ("ge", "le")}, | |
42 | "url_domain": { |
|
42 | "url_domain": { | |
43 | "type": "unicode", |
|
43 | "type": "unicode", | |
44 | "ops": ("eq", "ne", "startswith", "endswith", "contains"), |
|
44 | "ops": ("eq", "ne", "startswith", "endswith", "contains"), | |
45 | }, |
|
45 | }, | |
46 | "url_path": { |
|
46 | "url_path": { | |
47 | "type": "unicode", |
|
47 | "type": "unicode", | |
48 | "ops": ("eq", "ne", "startswith", "endswith", "contains"), |
|
48 | "ops": ("eq", "ne", "startswith", "endswith", "contains"), | |
49 | }, |
|
49 | }, | |
50 | "error": { |
|
50 | "error": { | |
51 | "type": "unicode", |
|
51 | "type": "unicode", | |
52 | "ops": ("eq", "ne", "startswith", "endswith", "contains"), |
|
52 | "ops": ("eq", "ne", "startswith", "endswith", "contains"), | |
53 | }, |
|
53 | }, | |
54 | "tags:server_name": { |
|
54 | "tags:server_name": { | |
55 | "type": "unicode", |
|
55 | "type": "unicode", | |
56 | "ops": ("eq", "ne", "startswith", "endswith", "contains"), |
|
56 | "ops": ("eq", "ne", "startswith", "endswith", "contains"), | |
57 | }, |
|
57 | }, | |
58 | "traceback": {"type": "unicode", "ops": ("contains",)}, |
|
58 | "traceback": {"type": "unicode", "ops": ("contains",)}, | |
59 | "group:occurences": {"type": "int", "ops": ("eq", "ne", "ge", "le")}, |
|
59 | "group:occurences": {"type": "int", "ops": ("eq", "ne", "ge", "le")}, | |
60 | } |
|
60 | } | |
61 |
|
61 | |||
62 |
|
62 | |||
63 | class Report(Base, BaseModel): |
|
63 | class Report(Base, BaseModel): | |
64 | __tablename__ = "reports" |
|
64 | __tablename__ = "reports" | |
65 | __table_args__ = {"implicit_returning": False} |
|
65 | __table_args__ = {"implicit_returning": False} | |
66 |
|
66 | |||
67 | id = sa.Column(sa.Integer, nullable=False, primary_key=True) |
|
67 | id = sa.Column(sa.Integer, nullable=False, primary_key=True) | |
68 | group_id = sa.Column( |
|
68 | group_id = sa.Column( | |
69 | sa.BigInteger, |
|
69 | sa.BigInteger, | |
70 | sa.ForeignKey("reports_groups.id", ondelete="cascade", onupdate="cascade"), |
|
70 | sa.ForeignKey("reports_groups.id", ondelete="cascade", onupdate="cascade"), | |
71 | ) |
|
71 | ) | |
72 | resource_id = sa.Column(sa.Integer(), nullable=False, index=True) |
|
72 | resource_id = sa.Column(sa.Integer(), nullable=False, index=True) | |
73 | report_type = sa.Column(sa.Integer(), nullable=False, index=True) |
|
73 | report_type = sa.Column(sa.Integer(), nullable=False, index=True) | |
74 | error = sa.Column(sa.UnicodeText(), index=True) |
|
74 | error = sa.Column(sa.UnicodeText(), index=True) | |
75 | extra = sa.Column(JSON(), default={}) |
|
75 | extra = sa.Column(JSON(), default={}) | |
76 | request = sa.Column(JSON(), nullable=False, default={}) |
|
76 | request = sa.Column(JSON(), nullable=False, default={}) | |
77 | ip = sa.Column(sa.String(39), index=True, default="") |
|
77 | ip = sa.Column(sa.String(39), index=True, default="") | |
78 | username = sa.Column(sa.Unicode(255), default="") |
|
78 | username = sa.Column(sa.Unicode(255), default="") | |
79 | user_agent = sa.Column(sa.Unicode(255), default="") |
|
79 | user_agent = sa.Column(sa.Unicode(255), default="") | |
80 | url = sa.Column(sa.UnicodeText(), index=True) |
|
80 | url = sa.Column(sa.UnicodeText(), index=True) | |
81 | request_id = sa.Column(sa.Text()) |
|
81 | request_id = sa.Column(sa.Text()) | |
82 | request_stats = sa.Column(JSON(), nullable=False, default={}) |
|
82 | request_stats = sa.Column(JSON(), nullable=False, default={}) | |
83 | traceback = sa.Column(JSON(), nullable=False, default=None) |
|
83 | traceback = sa.Column(JSON(), nullable=False, default=None) | |
84 | traceback_hash = sa.Column(sa.Text()) |
|
84 | traceback_hash = sa.Column(sa.Text()) | |
85 | start_time = sa.Column( |
|
85 | start_time = sa.Column( | |
86 | sa.DateTime(), default=datetime.utcnow, server_default=sa.func.now() |
|
86 | sa.DateTime(), default=datetime.utcnow, server_default=sa.func.now() | |
87 | ) |
|
87 | ) | |
88 | end_time = sa.Column(sa.DateTime()) |
|
88 | end_time = sa.Column(sa.DateTime()) | |
89 | duration = sa.Column(sa.Float, default=0) |
|
89 | duration = sa.Column(sa.Float, default=0) | |
90 | http_status = sa.Column(sa.Integer, index=True) |
|
90 | http_status = sa.Column(sa.Integer, index=True) | |
91 | url_domain = sa.Column(sa.Unicode(100), index=True) |
|
91 | url_domain = sa.Column(sa.Unicode(100), index=True) | |
92 | url_path = sa.Column(sa.Unicode(255), index=True) |
|
92 | url_path = sa.Column(sa.Unicode(255), index=True) | |
93 | tags = sa.Column(JSON(), nullable=False, default={}) |
|
93 | tags = sa.Column(JSON(), nullable=False, default={}) | |
94 | language = sa.Column(sa.Integer(), default=0) |
|
94 | language = sa.Column(sa.Integer(), default=0) | |
95 | # this is used to determine partition for the report |
|
95 | # this is used to determine partition for the report | |
96 | report_group_time = sa.Column( |
|
96 | report_group_time = sa.Column( | |
97 | sa.DateTime(), default=datetime.utcnow, server_default=sa.func.now() |
|
97 | sa.DateTime(), default=datetime.utcnow, server_default=sa.func.now() | |
98 | ) |
|
98 | ) | |
99 |
|
99 | |||
100 | logs = sa.orm.relationship( |
|
100 | logs = sa.orm.relationship( | |
101 | "Log", |
|
101 | "Log", | |
102 | lazy="dynamic", |
|
102 | lazy="dynamic", | |
103 | passive_deletes=True, |
|
103 | passive_deletes=True, | |
104 | passive_updates=True, |
|
104 | passive_updates=True, | |
105 | primaryjoin="and_(Report.request_id==Log.request_id, " |
|
105 | primaryjoin="and_(Report.request_id==Log.request_id, " | |
106 | "Log.request_id != None, Log.request_id != '')", |
|
106 | "Log.request_id != None, Log.request_id != '')", | |
107 | foreign_keys="[Log.request_id]", |
|
107 | foreign_keys="[Log.request_id]", | |
108 | ) |
|
108 | ) | |
109 |
|
109 | |||
110 | slow_calls = sa.orm.relationship( |
|
110 | slow_calls = sa.orm.relationship( | |
111 | "SlowCall", |
|
111 | "SlowCall", | |
112 | backref="detail", |
|
112 | backref="detail", | |
113 | cascade="all, delete-orphan", |
|
113 | cascade="all, delete-orphan", | |
114 | passive_deletes=True, |
|
114 | passive_deletes=True, | |
115 | passive_updates=True, |
|
115 | passive_updates=True, | |
116 | order_by="SlowCall.timestamp", |
|
116 | order_by="SlowCall.timestamp", | |
117 | ) |
|
117 | ) | |
118 |
|
118 | |||
119 | def set_data(self, data, resource, protocol_version=None): |
|
119 | def set_data(self, data, resource, protocol_version=None): | |
120 | self.http_status = data["http_status"] |
|
120 | self.http_status = data["http_status"] | |
121 | self.priority = data["priority"] |
|
121 | self.priority = data["priority"] | |
122 | self.error = data["error"] |
|
122 | self.error = data["error"] | |
123 | report_language = data.get("language", "").lower() |
|
123 | report_language = data.get("language", "").lower() | |
124 | self.language = getattr(Language, report_language, Language.unknown) |
|
124 | self.language = getattr(Language, report_language, Language.unknown) | |
125 | # we need temp holder here to decide later |
|
125 | # we need temp holder here to decide later | |
126 | # if we want to to commit the tags if report is marked for creation |
|
126 | # if we want to to commit the tags if report is marked for creation | |
127 | self.tags = {"server_name": data["server"], "view_name": data["view_name"]} |
|
127 | self.tags = {"server_name": data["server"], "view_name": data["view_name"]} | |
128 | if data.get("tags"): |
|
128 | if data.get("tags"): | |
129 | for tag_tuple in data["tags"]: |
|
129 | for tag_tuple in data["tags"]: | |
130 | self.tags[tag_tuple[0]] = tag_tuple[1] |
|
130 | self.tags[tag_tuple[0]] = tag_tuple[1] | |
131 | self.traceback = data["traceback"] |
|
131 | self.traceback = data["traceback"] | |
132 | stripped_traceback = self.stripped_traceback() |
|
132 | stripped_traceback = self.stripped_traceback() | |
133 | tb_repr = repr(stripped_traceback).encode("utf8") |
|
133 | tb_repr = repr(stripped_traceback).encode("utf8") | |
134 | self.traceback_hash = hashlib.sha1(tb_repr).hexdigest() |
|
134 | self.traceback_hash = hashlib.sha1(tb_repr).hexdigest() | |
135 | url_info = urllib.parse.urlsplit(data.get("url", ""), allow_fragments=False) |
|
135 | url_info = urllib.parse.urlsplit(data.get("url", ""), allow_fragments=False) | |
136 | self.url_domain = url_info.netloc[:128] |
|
136 | self.url_domain = url_info.netloc[:128] | |
137 | self.url_path = url_info.path[:2048] |
|
137 | self.url_path = url_info.path[:2048] | |
138 | self.occurences = data["occurences"] |
|
138 | self.occurences = data["occurences"] | |
139 | if self.error: |
|
139 | if self.error: | |
140 | self.report_type = ReportType.error |
|
140 | self.report_type = ReportType.error | |
141 | else: |
|
141 | else: | |
142 | self.report_type = ReportType.slow |
|
142 | self.report_type = ReportType.slow | |
143 |
|
143 | |||
144 | # but if its status 404 its 404 type |
|
144 | # but if its status 404 its 404 type | |
145 | if self.http_status in [404, "404"] or self.error == "404 Not Found": |
|
145 | if self.http_status in [404, "404"] or self.error == "404 Not Found": | |
146 | self.report_type = ReportType.not_found |
|
146 | self.report_type = ReportType.not_found | |
147 | self.error = "" |
|
147 | self.error = "" | |
148 |
|
148 | |||
149 | self.generate_grouping_hash( |
|
149 | self.generate_grouping_hash( | |
150 | data.get("appenlight.group_string", data.get("group_string")), |
|
150 | data.get("appenlight.group_string", data.get("group_string")), | |
151 | resource.default_grouping, |
|
151 | resource.default_grouping, | |
152 | protocol_version, |
|
152 | protocol_version, | |
153 | ) |
|
153 | ) | |
154 |
|
154 | |||
155 | # details |
|
155 | # details | |
156 | if data["http_status"] in [404, "404"]: |
|
156 | if data["http_status"] in [404, "404"]: | |
157 | data = { |
|
157 | data = { | |
158 | "username": data["username"], |
|
158 | "username": data["username"], | |
159 | "ip": data["ip"], |
|
159 | "ip": data["ip"], | |
160 | "url": data["url"], |
|
160 | "url": data["url"], | |
161 | "user_agent": data["user_agent"], |
|
161 | "user_agent": data["user_agent"], | |
162 | } |
|
162 | } | |
163 | if data.get("HTTP_REFERER") or data.get("http_referer"): |
|
163 | if data.get("HTTP_REFERER") or data.get("http_referer"): | |
164 | data["HTTP_REFERER"] = data.get("HTTP_REFERER", "") or data.get( |
|
164 | data["HTTP_REFERER"] = data.get("HTTP_REFERER", "") or data.get( | |
165 | "http_referer", "" |
|
165 | "http_referer", "" | |
166 | ) |
|
166 | ) | |
167 |
|
167 | |||
168 | self.resource_id = resource.resource_id |
|
168 | self.resource_id = resource.resource_id | |
169 | self.username = data["username"] |
|
169 | self.username = data["username"] | |
170 | self.user_agent = data["user_agent"] |
|
170 | self.user_agent = data["user_agent"] | |
171 | self.ip = data["ip"] |
|
171 | self.ip = data["ip"] | |
172 | self.extra = {} |
|
172 | self.extra = {} | |
173 | if data.get("extra"): |
|
173 | if data.get("extra"): | |
174 | for extra_tuple in data["extra"]: |
|
174 | for extra_tuple in data["extra"]: | |
175 | self.extra[extra_tuple[0]] = extra_tuple[1] |
|
175 | self.extra[extra_tuple[0]] = extra_tuple[1] | |
176 |
|
176 | |||
177 | self.url = data["url"] |
|
177 | self.url = data["url"] | |
178 | self.request_id = data.get("request_id", "").replace("-", "") or str( |
|
178 | self.request_id = data.get("request_id", "").replace("-", "") or str( | |
179 | uuid.uuid4() |
|
179 | uuid.uuid4() | |
180 | ) |
|
180 | ) | |
181 | request_data = data.get("request", {}) |
|
181 | request_data = data.get("request", {}) | |
182 |
|
182 | |||
183 | self.request = request_data |
|
183 | self.request = request_data | |
184 | self.request_stats = data.get("request_stats") or {} |
|
184 | self.request_stats = data.get("request_stats") or {} | |
185 | traceback = data.get("traceback") |
|
185 | traceback = data.get("traceback") | |
186 | if not traceback: |
|
186 | if not traceback: | |
187 | traceback = data.get("frameinfo") |
|
187 | traceback = data.get("frameinfo") | |
188 | self.traceback = traceback |
|
188 | self.traceback = traceback | |
189 | start_date = convert_date(data.get("start_time")) |
|
189 | start_date = convert_date(data.get("start_time")) | |
190 | if not self.start_time or self.start_time < start_date: |
|
190 | if not self.start_time or self.start_time < start_date: | |
191 | self.start_time = start_date |
|
191 | self.start_time = start_date | |
192 |
|
192 | |||
193 | self.end_time = convert_date(data.get("end_time"), False) |
|
193 | self.end_time = convert_date(data.get("end_time"), False) | |
194 | self.duration = 0 |
|
194 | self.duration = 0 | |
195 |
|
195 | |||
196 | if self.start_time and self.end_time: |
|
196 | if self.start_time and self.end_time: | |
197 | d = self.end_time - self.start_time |
|
197 | d = self.end_time - self.start_time | |
198 | self.duration = d.total_seconds() |
|
198 | self.duration = d.total_seconds() | |
199 |
|
199 | |||
200 | # update tags with other vars |
|
200 | # update tags with other vars | |
201 | if self.username: |
|
201 | if self.username: | |
202 | self.tags["user_name"] = self.username |
|
202 | self.tags["user_name"] = self.username | |
203 | self.tags["report_language"] = Language.key_from_value(self.language) |
|
203 | self.tags["report_language"] = Language.key_from_value(self.language) | |
204 |
|
204 | |||
205 | def add_slow_calls(self, data, report_group): |
|
205 | def add_slow_calls(self, data, report_group): | |
206 | slow_calls = [] |
|
206 | slow_calls = [] | |
207 | for call in data.get("slow_calls", []): |
|
207 | for call in data.get("slow_calls", []): | |
208 | sc_inst = SlowCall() |
|
208 | sc_inst = SlowCall() | |
209 | sc_inst.set_data( |
|
209 | sc_inst.set_data( | |
210 | call, resource_id=self.resource_id, report_group=report_group |
|
210 | call, resource_id=self.resource_id, report_group=report_group | |
211 | ) |
|
211 | ) | |
212 | slow_calls.append(sc_inst) |
|
212 | slow_calls.append(sc_inst) | |
213 | self.slow_calls.extend(slow_calls) |
|
213 | self.slow_calls.extend(slow_calls) | |
214 | return slow_calls |
|
214 | return slow_calls | |
215 |
|
215 | |||
216 | def get_dict(self, request, details=False, exclude_keys=None, include_keys=None): |
|
216 | def get_dict(self, request, details=False, exclude_keys=None, include_keys=None): | |
217 | from appenlight.models.services.report_group import ReportGroupService |
|
217 | from appenlight.models.services.report_group import ReportGroupService | |
218 |
|
218 | |||
219 | instance_dict = super(Report, self).get_dict() |
|
219 | instance_dict = super(Report, self).get_dict() | |
220 | instance_dict["req_stats"] = self.req_stats() |
|
220 | instance_dict["req_stats"] = self.req_stats() | |
221 | instance_dict["group"] = {} |
|
221 | instance_dict["group"] = {} | |
222 | instance_dict["group"]["id"] = self.report_group.id |
|
222 | instance_dict["group"]["id"] = self.report_group.id | |
223 | instance_dict["group"]["total_reports"] = self.report_group.total_reports |
|
223 | instance_dict["group"]["total_reports"] = self.report_group.total_reports | |
224 | instance_dict["group"]["last_report"] = self.report_group.last_report |
|
224 | instance_dict["group"]["last_report"] = self.report_group.last_report | |
225 | instance_dict["group"]["priority"] = self.report_group.priority |
|
225 | instance_dict["group"]["priority"] = self.report_group.priority | |
226 | instance_dict["group"]["occurences"] = self.report_group.occurences |
|
226 | instance_dict["group"]["occurences"] = self.report_group.occurences | |
227 | instance_dict["group"]["last_timestamp"] = self.report_group.last_timestamp |
|
227 | instance_dict["group"]["last_timestamp"] = self.report_group.last_timestamp | |
228 | instance_dict["group"]["first_timestamp"] = self.report_group.first_timestamp |
|
228 | instance_dict["group"]["first_timestamp"] = self.report_group.first_timestamp | |
229 | instance_dict["group"]["public"] = self.report_group.public |
|
229 | instance_dict["group"]["public"] = self.report_group.public | |
230 | instance_dict["group"]["fixed"] = self.report_group.fixed |
|
230 | instance_dict["group"]["fixed"] = self.report_group.fixed | |
231 | instance_dict["group"]["read"] = self.report_group.read |
|
231 | instance_dict["group"]["read"] = self.report_group.read | |
232 | instance_dict["group"]["average_duration"] = self.report_group.average_duration |
|
232 | instance_dict["group"]["average_duration"] = self.report_group.average_duration | |
233 |
|
233 | |||
234 | instance_dict["resource_name"] = self.report_group.application.resource_name |
|
234 | instance_dict["resource_name"] = self.report_group.application.resource_name | |
235 | instance_dict["report_type"] = self.report_type |
|
235 | instance_dict["report_type"] = self.report_type | |
236 |
|
236 | |||
237 | if instance_dict["http_status"] == 404 and not instance_dict["error"]: |
|
237 | if instance_dict["http_status"] == 404 and not instance_dict["error"]: | |
238 | instance_dict["error"] = "404 Not Found" |
|
238 | instance_dict["error"] = "404 Not Found" | |
239 |
|
239 | |||
240 | if details: |
|
240 | if details: | |
241 | instance_dict[ |
|
241 | instance_dict[ | |
242 | "affected_users_count" |
|
242 | "affected_users_count" | |
243 | ] = ReportGroupService.affected_users_count(self.report_group) |
|
243 | ] = ReportGroupService.affected_users_count(self.report_group) | |
244 | instance_dict["top_affected_users"] = [ |
|
244 | instance_dict["top_affected_users"] = [ | |
245 | {"username": u.username, "count": u.count} |
|
245 | {"username": u.username, "count": u.count} | |
246 | for u in ReportGroupService.top_affected_users(self.report_group) |
|
246 | for u in ReportGroupService.top_affected_users(self.report_group) | |
247 | ] |
|
247 | ] | |
248 | instance_dict["application"] = {"integrations": []} |
|
248 | instance_dict["application"] = {"integrations": []} | |
249 | for integration in self.report_group.application.integrations: |
|
249 | for integration in self.report_group.application.integrations: | |
250 | if integration.front_visible: |
|
250 | if integration.front_visible: | |
251 | instance_dict["application"]["integrations"].append( |
|
251 | instance_dict["application"]["integrations"].append( | |
252 | { |
|
252 | { | |
253 | "name": integration.integration_name, |
|
253 | "name": integration.integration_name, | |
254 | "action": integration.integration_action, |
|
254 | "action": integration.integration_action, | |
255 | } |
|
255 | } | |
256 | ) |
|
256 | ) | |
257 | instance_dict["comments"] = [ |
|
257 | instance_dict["comments"] = [ | |
258 | c.get_dict() for c in self.report_group.comments |
|
258 | c.get_dict() for c in self.report_group.comments | |
259 | ] |
|
259 | ] | |
260 |
|
260 | |||
261 | instance_dict["group"]["next_report"] = None |
|
261 | instance_dict["group"]["next_report"] = None | |
262 | instance_dict["group"]["previous_report"] = None |
|
262 | instance_dict["group"]["previous_report"] = None | |
263 | next_in_group = self.get_next_in_group(request) |
|
263 | next_in_group = self.get_next_in_group(request) | |
264 | previous_in_group = self.get_previous_in_group(request) |
|
264 | previous_in_group = self.get_previous_in_group(request) | |
265 | if next_in_group: |
|
265 | if next_in_group: | |
266 | instance_dict["group"]["next_report"] = next_in_group |
|
266 | instance_dict["group"]["next_report"] = next_in_group | |
267 | if previous_in_group: |
|
267 | if previous_in_group: | |
268 | instance_dict["group"]["previous_report"] = previous_in_group |
|
268 | instance_dict["group"]["previous_report"] = previous_in_group | |
269 |
|
269 | |||
270 | # slow call ordering |
|
270 | # slow call ordering | |
271 | def find_parent(row, data): |
|
271 | def find_parent(row, data): | |
272 | for r in reversed(data): |
|
272 | for r in reversed(data): | |
273 | try: |
|
273 | try: | |
274 | if ( |
|
274 | if ( | |
275 | row["timestamp"] > r["timestamp"] |
|
275 | row["timestamp"] > r["timestamp"] | |
276 | and row["end_time"] < r["end_time"] |
|
276 | and row["end_time"] < r["end_time"] | |
277 | ): |
|
277 | ): | |
278 | return r |
|
278 | return r | |
279 | except TypeError as e: |
|
279 | except TypeError as e: | |
280 | log.warning("reports_view.find_parent: %s" % e) |
|
280 | log.warning("reports_view.find_parent: %s" % e) | |
281 | return None |
|
281 | return None | |
282 |
|
282 | |||
283 | new_calls = [] |
|
283 | new_calls = [] | |
284 | calls = [c.get_dict() for c in self.slow_calls] |
|
284 | calls = [c.get_dict() for c in self.slow_calls] | |
285 | while calls: |
|
285 | while calls: | |
286 | # start from end |
|
286 | # start from end | |
287 | for x in range(len(calls) - 1, -1, -1): |
|
287 | for x in range(len(calls) - 1, -1, -1): | |
288 | parent = find_parent(calls[x], calls) |
|
288 | parent = find_parent(calls[x], calls) | |
289 | if parent: |
|
289 | if parent: | |
290 | parent["children"].append(calls[x]) |
|
290 | parent["children"].append(calls[x]) | |
291 | else: |
|
291 | else: | |
292 | # no parent at all? append to new calls anyways |
|
292 | # no parent at all? append to new calls anyways | |
293 | new_calls.append(calls[x]) |
|
293 | new_calls.append(calls[x]) | |
294 | # print 'append', calls[x] |
|
294 | # print 'append', calls[x] | |
295 | del calls[x] |
|
295 | del calls[x] | |
296 | break |
|
296 | break | |
297 | instance_dict["slow_calls"] = new_calls |
|
297 | instance_dict["slow_calls"] = new_calls | |
298 |
|
298 | |||
299 | instance_dict["front_url"] = self.get_public_url(request) |
|
299 | instance_dict["front_url"] = self.get_public_url(request) | |
300 |
|
300 | |||
301 | exclude_keys_list = exclude_keys or [] |
|
301 | exclude_keys_list = exclude_keys or [] | |
302 | include_keys_list = include_keys or [] |
|
302 | include_keys_list = include_keys or [] | |
303 | for k in list(instance_dict.keys()): |
|
303 | for k in list(instance_dict.keys()): | |
304 | if k == "group": |
|
304 | if k == "group": | |
305 | continue |
|
305 | continue | |
306 | if k in exclude_keys_list or (k not in include_keys_list and include_keys): |
|
306 | if k in exclude_keys_list or (k not in include_keys_list and include_keys): | |
307 | del instance_dict[k] |
|
307 | del instance_dict[k] | |
308 | return instance_dict |
|
308 | return instance_dict | |
309 |
|
309 | |||
310 | def get_previous_in_group(self, request): |
|
310 | def get_previous_in_group(self, request): | |
311 | query = { |
|
311 | query = { | |
312 | "size": 1, |
|
312 | "size": 1, | |
313 | "query": { |
|
313 | "query": { | |
314 | "bool": { |
|
314 | "bool": { | |
315 | "filter": [ |
|
315 | "filter": [ | |
316 | {"term": {"group_id": self.group_id}}, |
|
316 | {"term": {"group_id": self.group_id}}, | |
317 | {"range": {"report_id": {"lt": self.id}}}, |
|
317 | {"range": {"report_id": {"lt": self.id}}}, | |
318 | ] |
|
318 | ] | |
319 | } |
|
319 | } | |
320 | }, |
|
320 | }, | |
321 | "sort": [{"_doc": {"order": "desc"}}], |
|
321 | "sort": [{"_doc": {"order": "desc"}}], | |
322 | } |
|
322 | } | |
323 | result = request.es_conn.search( |
|
323 | result = request.es_conn.search( | |
324 | body=query, index=self.partition_id, doc_type="report" |
|
324 | body=query, index=self.partition_id, doc_type="report" | |
325 | ) |
|
325 | ) | |
326 | if result["hits"]["total"]: |
|
326 | if result["hits"]["total"]: | |
327 | return result["hits"]["hits"][0]["_source"]["report_id"] |
|
327 | return result["hits"]["hits"][0]["_source"]["report_id"] | |
328 |
|
328 | |||
329 | def get_next_in_group(self, request): |
|
329 | def get_next_in_group(self, request): | |
330 | query = { |
|
330 | query = { | |
331 | "size": 1, |
|
331 | "size": 1, | |
332 | "query": { |
|
332 | "query": { | |
333 | "bool": { |
|
333 | "bool": { | |
334 | "filter": [ |
|
334 | "filter": [ | |
335 | {"term": {"group_id": self.group_id}}, |
|
335 | {"term": {"group_id": self.group_id}}, | |
336 | {"range": {"report_id": {"gt": self.id}}}, |
|
336 | {"range": {"report_id": {"gt": self.id}}}, | |
337 | ] |
|
337 | ] | |
338 | } |
|
338 | } | |
339 | }, |
|
339 | }, | |
340 | "sort": [{"_doc": {"order": "asc"}}], |
|
340 | "sort": [{"_doc": {"order": "asc"}}], | |
341 | } |
|
341 | } | |
342 | result = request.es_conn.search( |
|
342 | result = request.es_conn.search( | |
343 | body=query, index=self.partition_id, doc_type="report" |
|
343 | body=query, index=self.partition_id, doc_type="report" | |
344 | ) |
|
344 | ) | |
345 | if result["hits"]["total"]: |
|
345 | if result["hits"]["total"]: | |
346 | return result["hits"]["hits"][0]["_source"]["report_id"] |
|
346 | return result["hits"]["hits"][0]["_source"]["report_id"] | |
347 |
|
347 | |||
348 | def get_public_url(self, request=None, report_group=None, _app_url=None): |
|
348 | def get_public_url(self, request=None, report_group=None, _app_url=None): | |
349 | """ |
|
349 | """ | |
350 | Returns url that user can use to visit specific report |
|
350 | Returns url that user can use to visit specific report | |
351 | """ |
|
351 | """ | |
352 | if not request: |
|
352 | if not request: | |
353 | request = get_current_request() |
|
353 | request = get_current_request() | |
354 | url = request.route_url("/", _app_url=_app_url) |
|
354 | url = request.route_url("/", _app_url=_app_url) | |
355 | if report_group: |
|
355 | if report_group: | |
356 | return (url + "ui/report/%s/%s") % (report_group.id, self.id) |
|
356 | return (url + "ui/report/%s/%s") % (report_group.id, self.id) | |
357 | return (url + "ui/report/%s/%s") % (self.group_id, self.id) |
|
357 | return (url + "ui/report/%s/%s") % (self.group_id, self.id) | |
358 |
|
358 | |||
359 | def req_stats(self): |
|
359 | def req_stats(self): | |
360 | stats = self.request_stats.copy() |
|
360 | stats = self.request_stats.copy() | |
361 | stats["percentages"] = {} |
|
361 | stats["percentages"] = {} | |
362 | stats["percentages"]["main"] = 100.0 |
|
362 | stats["percentages"]["main"] = 100.0 | |
363 | main = stats.get("main", 0.0) |
|
363 | main = stats.get("main", 0.0) | |
364 | if not main: |
|
364 | if not main: | |
365 | return None |
|
365 | return None | |
366 | for name, call_time in stats.items(): |
|
366 | for name, call_time in stats.items(): | |
367 | if "calls" not in name and "main" not in name and "percentages" not in name: |
|
367 | if "calls" not in name and "main" not in name and "percentages" not in name: | |
368 | stats["main"] -= call_time |
|
368 | stats["main"] -= call_time | |
369 | stats["percentages"][name] = math.floor((call_time / main * 100.0)) |
|
369 | stats["percentages"][name] = math.floor((call_time / main * 100.0)) | |
370 | stats["percentages"]["main"] -= stats["percentages"][name] |
|
370 | stats["percentages"]["main"] -= stats["percentages"][name] | |
371 | if stats["percentages"]["main"] < 0.0: |
|
371 | if stats["percentages"]["main"] < 0.0: | |
372 | stats["percentages"]["main"] = 0.0 |
|
372 | stats["percentages"]["main"] = 0.0 | |
373 | stats["main"] = 0.0 |
|
373 | stats["main"] = 0.0 | |
374 | return stats |
|
374 | return stats | |
375 |
|
375 | |||
376 | def generate_grouping_hash( |
|
376 | def generate_grouping_hash( | |
377 | self, hash_string=None, default_grouping=None, protocol_version=None |
|
377 | self, hash_string=None, default_grouping=None, protocol_version=None | |
378 | ): |
|
378 | ): | |
379 | """ |
|
379 | """ | |
380 | Generates SHA1 hash that will be used to group reports together |
|
380 | Generates SHA1 hash that will be used to group reports together | |
381 | """ |
|
381 | """ | |
382 | if not hash_string: |
|
382 | if not hash_string: | |
383 | location = self.tags.get("view_name") or self.url_path |
|
383 | location = self.tags.get("view_name") or self.url_path | |
384 | server_name = self.tags.get("server_name") or "" |
|
384 | server_name = self.tags.get("server_name") or "" | |
385 | if default_grouping == "url_traceback": |
|
385 | if default_grouping == "url_traceback": | |
386 | hash_string = "%s_%s_%s" % (self.traceback_hash, location, self.error) |
|
386 | hash_string = "%s_%s_%s" % (self.traceback_hash, location, self.error) | |
387 | if self.language == Language.javascript: |
|
387 | if self.language == Language.javascript: | |
388 | hash_string = "%s_%s" % (self.traceback_hash, self.error) |
|
388 | hash_string = "%s_%s" % (self.traceback_hash, self.error) | |
389 |
|
389 | |||
390 | elif default_grouping == "traceback_server": |
|
390 | elif default_grouping == "traceback_server": | |
391 | hash_string = "%s_%s" % (self.traceback_hash, server_name) |
|
391 | hash_string = "%s_%s" % (self.traceback_hash, server_name) | |
392 | if self.language == Language.javascript: |
|
392 | if self.language == Language.javascript: | |
393 | hash_string = "%s_%s" % (self.traceback_hash, server_name) |
|
393 | hash_string = "%s_%s" % (self.traceback_hash, server_name) | |
394 | else: |
|
394 | else: | |
395 | hash_string = "%s_%s" % (self.error, location) |
|
395 | hash_string = "%s_%s" % (self.error, location) | |
396 | month = datetime.utcnow().date().replace(day=1) |
|
396 | month = datetime.utcnow().date().replace(day=1) | |
397 | hash_string = "{}_{}".format(month, hash_string) |
|
397 | hash_string = "{}_{}".format(month, hash_string) | |
398 | binary_string = hash_string.encode("utf8") |
|
398 | binary_string = hash_string.encode("utf8") | |
399 | self.grouping_hash = hashlib.sha1(binary_string).hexdigest() |
|
399 | self.grouping_hash = hashlib.sha1(binary_string).hexdigest() | |
400 | return self.grouping_hash |
|
400 | return self.grouping_hash | |
401 |
|
401 | |||
402 | def stripped_traceback(self): |
|
402 | def stripped_traceback(self): | |
403 | """ |
|
403 | """ | |
404 | Traceback without local vars |
|
404 | Traceback without local vars | |
405 | """ |
|
405 | """ | |
406 | stripped_traceback = copy.deepcopy(self.traceback) |
|
406 | stripped_traceback = copy.deepcopy(self.traceback) | |
407 |
|
407 | |||
408 | if isinstance(stripped_traceback, list): |
|
408 | if isinstance(stripped_traceback, list): | |
409 | for row in stripped_traceback: |
|
409 | for row in stripped_traceback: | |
410 | row.pop("vars", None) |
|
410 | row.pop("vars", None) | |
411 | return stripped_traceback |
|
411 | return stripped_traceback | |
412 |
|
412 | |||
413 | def notify_channel(self, report_group): |
|
413 | def notify_channel(self, report_group): | |
414 | """ |
|
414 | """ | |
415 | Sends notification to websocket channel |
|
415 | Sends notification to websocket channel | |
416 | """ |
|
416 | """ | |
417 | settings = get_current_registry().settings |
|
417 | settings = get_current_registry().settings | |
418 | log.info("notify channelstream") |
|
418 | log.info("notify channelstream") | |
419 | if self.report_type != ReportType.error: |
|
419 | if self.report_type != ReportType.error: | |
420 | return |
|
420 | return | |
421 | payload = { |
|
421 | payload = { | |
422 | "type": "message", |
|
422 | "type": "message", | |
423 | "user": "__system__", |
|
423 | "user": "__system__", | |
424 | "channel": "app_%s" % self.resource_id, |
|
424 | "channel": "app_%s" % self.resource_id, | |
425 | "message": { |
|
425 | "message": { | |
426 | "topic": "front_dashboard.new_topic", |
|
426 | "topic": "front_dashboard.new_topic", | |
427 | "report": { |
|
427 | "report": { | |
428 | "group": { |
|
428 | "group": { | |
429 | "priority": report_group.priority, |
|
429 | "priority": report_group.priority, | |
430 | "first_timestamp": report_group.first_timestamp, |
|
430 | "first_timestamp": report_group.first_timestamp, | |
431 | "last_timestamp": report_group.last_timestamp, |
|
431 | "last_timestamp": report_group.last_timestamp, | |
432 | "average_duration": report_group.average_duration, |
|
432 | "average_duration": report_group.average_duration, | |
433 | "occurences": report_group.occurences, |
|
433 | "occurences": report_group.occurences, | |
434 | }, |
|
434 | }, | |
435 | "report_id": self.id, |
|
435 | "report_id": self.id, | |
436 | "group_id": self.group_id, |
|
436 | "group_id": self.group_id, | |
437 | "resource_id": self.resource_id, |
|
437 | "resource_id": self.resource_id, | |
438 | "http_status": self.http_status, |
|
438 | "http_status": self.http_status, | |
439 | "url_domain": self.url_domain, |
|
439 | "url_domain": self.url_domain, | |
440 | "url_path": self.url_path, |
|
440 | "url_path": self.url_path, | |
441 | "error": self.error or "", |
|
441 | "error": self.error or "", | |
442 | "server": self.tags.get("server_name"), |
|
442 | "server": self.tags.get("server_name"), | |
443 | "view_name": self.tags.get("view_name"), |
|
443 | "view_name": self.tags.get("view_name"), | |
444 | "front_url": self.get_public_url(), |
|
444 | "front_url": self.get_public_url(), | |
445 | }, |
|
445 | }, | |
446 | }, |
|
446 | }, | |
447 | } |
|
447 | } | |
448 | channelstream_request( |
|
448 | channelstream_request( | |
449 | settings["cometd.secret"], |
|
449 | settings["cometd.secret"], | |
450 | "/message", |
|
450 | "/message", | |
451 | [payload], |
|
451 | [payload], | |
452 | servers=[settings["cometd_servers"]], |
|
452 | servers=[settings["cometd_servers"]], | |
453 | ) |
|
453 | ) | |
454 |
|
454 | |||
455 | def es_doc(self): |
|
455 | def es_doc(self): | |
456 | tags = {} |
|
456 | tags = {} | |
457 | tag_list = [] |
|
457 | tag_list = [] | |
458 | for name, value in self.tags.items(): |
|
458 | for name, value in self.tags.items(): | |
459 | name = name.replace(".", "_") |
|
459 | name = name.replace(".", "_") | |
460 | tag_list.append(name) |
|
460 | tag_list.append(name) | |
461 | tags[name] = { |
|
461 | tags[name] = { | |
462 | "values": convert_es_type(value), |
|
462 | "values": convert_es_type(value), | |
463 | "numeric_values": value |
|
463 | "numeric_values": value | |
464 | if (isinstance(value, (int, float)) and not isinstance(value, bool)) |
|
464 | if (isinstance(value, (int, float)) and not isinstance(value, bool)) | |
465 | else None, |
|
465 | else None, | |
466 | } |
|
466 | } | |
467 |
|
467 | |||
468 | if "user_name" not in self.tags and self.username: |
|
468 | if "user_name" not in self.tags and self.username: | |
469 | tags["user_name"] = {"value": [self.username], "numeric_value": None} |
|
469 | tags["user_name"] = {"value": [self.username], "numeric_value": None} | |
470 | return { |
|
470 | return { | |
471 | "_id": str(self.id), |
|
471 | "_id": str(self.id), | |
472 | "report_id": str(self.id), |
|
472 | "report_id": str(self.id), | |
473 | "resource_id": self.resource_id, |
|
473 | "resource_id": self.resource_id, | |
474 | "http_status": self.http_status or "", |
|
474 | "http_status": self.http_status or "", | |
475 | "start_time": self.start_time, |
|
475 | "start_time": self.start_time, | |
476 | "end_time": self.end_time, |
|
476 | "end_time": self.end_time, | |
477 | "url_domain": self.url_domain if self.url_domain else "", |
|
477 | "url_domain": self.url_domain if self.url_domain else "", | |
478 | "url_path": self.url_path if self.url_path else "", |
|
478 | "url_path": self.url_path if self.url_path else "", | |
479 | "duration": self.duration, |
|
479 | "duration": self.duration, | |
480 | "error": self.error if self.error else "", |
|
480 | "error": self.error if self.error else "", | |
481 | "report_type": self.report_type, |
|
481 | "report_type": self.report_type, | |
482 | "request_id": self.request_id, |
|
482 | "request_id": self.request_id, | |
483 | "ip": self.ip, |
|
483 | "ip": self.ip, | |
484 | "group_id": str(self.group_id), |
|
484 | "group_id": str(self.group_id), | |
485 | "type": "report", |
|
485 | "type": "report", | |
486 | "join_field": { |
|
486 | "join_field": {"name": "report", "parent": str(self.group_id)}, | |
487 | "name": "report", |
|
|||
488 | "parent": str(self.group_id) |
|
|||
489 | }, |
|
|||
490 | "tags": tags, |
|
487 | "tags": tags, | |
491 | "tag_list": tag_list, |
|
488 | "tag_list": tag_list, | |
492 | "_routing": str(self.group_id) |
|
489 | "_routing": str(self.group_id), | |
493 | } |
|
490 | } | |
494 |
|
491 | |||
495 | @property |
|
492 | @property | |
496 | def partition_id(self): |
|
493 | def partition_id(self): | |
497 | return "rcae_r_%s" % self.report_group_time.strftime("%Y_%m") |
|
494 | return "rcae_r_%s" % self.report_group_time.strftime("%Y_%m") | |
498 |
|
495 | |||
499 | def partition_range(self): |
|
496 | def partition_range(self): | |
500 | start_date = self.report_group_time.date().replace(day=1) |
|
497 | start_date = self.report_group_time.date().replace(day=1) | |
501 | end_date = start_date + timedelta(days=40) |
|
498 | end_date = start_date + timedelta(days=40) | |
502 | end_date = end_date.replace(day=1) |
|
499 | end_date = end_date.replace(day=1) | |
503 | return start_date, end_date |
|
500 | return start_date, end_date | |
504 |
|
501 | |||
505 |
|
502 | |||
506 | def after_insert(mapper, connection, target): |
|
503 | def after_insert(mapper, connection, target): | |
507 | if not hasattr(target, "_skip_ft_index"): |
|
504 | if not hasattr(target, "_skip_ft_index"): | |
508 | data = target.es_doc() |
|
505 | data = target.es_doc() | |
509 | data.pop("_id", None) |
|
506 | data.pop("_id", None) | |
510 | Datastores.es.index( |
|
507 | Datastores.es.index( | |
511 | target.partition_id, "report", data, parent=target.group_id, id=target.id |
|
508 | target.partition_id, "report", data, parent=target.group_id, id=target.id | |
512 | ) |
|
509 | ) | |
513 |
|
510 | |||
514 |
|
511 | |||
515 | def after_update(mapper, connection, target): |
|
512 | def after_update(mapper, connection, target): | |
516 | if not hasattr(target, "_skip_ft_index"): |
|
513 | if not hasattr(target, "_skip_ft_index"): | |
517 | data = target.es_doc() |
|
514 | data = target.es_doc() | |
518 | data.pop("_id", None) |
|
515 | data.pop("_id", None) | |
519 | Datastores.es.index( |
|
516 | Datastores.es.index( | |
520 | target.partition_id, "report", data, parent=target.group_id, id=target.id |
|
517 | target.partition_id, "report", data, parent=target.group_id, id=target.id | |
521 | ) |
|
518 | ) | |
522 |
|
519 | |||
523 |
|
520 | |||
524 | def after_delete(mapper, connection, target): |
|
521 | def after_delete(mapper, connection, target): | |
525 | if not hasattr(target, "_skip_ft_index"): |
|
522 | if not hasattr(target, "_skip_ft_index"): | |
526 | query = {"query": {"term": {"report_id": target.id}}} |
|
523 | query = {"query": {"term": {"report_id": target.id}}} | |
527 | Datastores.es.delete_by_query( |
|
524 | Datastores.es.delete_by_query( | |
528 | index=target.partition_id, doc_type="report", body=query, conflicts="proceed" |
|
525 | index=target.partition_id, | |
|
526 | doc_type="report", | |||
|
527 | body=query, | |||
|
528 | conflicts="proceed", | |||
529 | ) |
|
529 | ) | |
530 |
|
530 | |||
531 |
|
531 | |||
532 | sa.event.listen(Report, "after_insert", after_insert) |
|
532 | sa.event.listen(Report, "after_insert", after_insert) | |
533 | sa.event.listen(Report, "after_update", after_update) |
|
533 | sa.event.listen(Report, "after_update", after_update) | |
534 | sa.event.listen(Report, "after_delete", after_delete) |
|
534 | sa.event.listen(Report, "after_delete", after_delete) |
@@ -1,285 +1,283 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 |
|
2 | |||
3 | # Copyright 2010 - 2017 RhodeCode GmbH and the AppEnlight project authors |
|
3 | # Copyright 2010 - 2017 RhodeCode GmbH and the AppEnlight project authors | |
4 | # |
|
4 | # | |
5 | # Licensed under the Apache License, Version 2.0 (the "License"); |
|
5 | # Licensed under the Apache License, Version 2.0 (the "License"); | |
6 | # you may not use this file except in compliance with the License. |
|
6 | # you may not use this file except in compliance with the License. | |
7 | # You may obtain a copy of the License at |
|
7 | # You may obtain a copy of the License at | |
8 | # |
|
8 | # | |
9 | # http://www.apache.org/licenses/LICENSE-2.0 |
|
9 | # http://www.apache.org/licenses/LICENSE-2.0 | |
10 | # |
|
10 | # | |
11 | # Unless required by applicable law or agreed to in writing, software |
|
11 | # Unless required by applicable law or agreed to in writing, software | |
12 | # distributed under the License is distributed on an "AS IS" BASIS, |
|
12 | # distributed under the License is distributed on an "AS IS" BASIS, | |
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
14 | # See the License for the specific language governing permissions and |
|
14 | # See the License for the specific language governing permissions and | |
15 | # limitations under the License. |
|
15 | # limitations under the License. | |
16 |
|
16 | |||
17 | import logging |
|
17 | import logging | |
18 | import sqlalchemy as sa |
|
18 | import sqlalchemy as sa | |
19 |
|
19 | |||
20 | from datetime import datetime, timedelta |
|
20 | from datetime import datetime, timedelta | |
21 |
|
21 | |||
22 | from pyramid.threadlocal import get_current_request |
|
22 | from pyramid.threadlocal import get_current_request | |
23 | from sqlalchemy.dialects.postgresql import JSON |
|
23 | from sqlalchemy.dialects.postgresql import JSON | |
24 | from ziggurat_foundations.models.base import BaseModel |
|
24 | from ziggurat_foundations.models.base import BaseModel | |
25 |
|
25 | |||
26 | from appenlight.models import Base, get_db_session, Datastores |
|
26 | from appenlight.models import Base, get_db_session, Datastores | |
27 | from appenlight.lib.enums import ReportType |
|
27 | from appenlight.lib.enums import ReportType | |
28 | from appenlight.lib.rule import Rule |
|
28 | from appenlight.lib.rule import Rule | |
29 | from appenlight.lib.redis_keys import REDIS_KEYS |
|
29 | from appenlight.lib.redis_keys import REDIS_KEYS | |
30 | from appenlight.models.report import REPORT_TYPE_MATRIX |
|
30 | from appenlight.models.report import REPORT_TYPE_MATRIX | |
31 |
|
31 | |||
32 | log = logging.getLogger(__name__) |
|
32 | log = logging.getLogger(__name__) | |
33 |
|
33 | |||
34 |
|
34 | |||
35 | class ReportGroup(Base, BaseModel): |
|
35 | class ReportGroup(Base, BaseModel): | |
36 | __tablename__ = "reports_groups" |
|
36 | __tablename__ = "reports_groups" | |
37 | __table_args__ = {"implicit_returning": False} |
|
37 | __table_args__ = {"implicit_returning": False} | |
38 |
|
38 | |||
39 | id = sa.Column(sa.BigInteger(), nullable=False, primary_key=True) |
|
39 | id = sa.Column(sa.BigInteger(), nullable=False, primary_key=True) | |
40 | resource_id = sa.Column( |
|
40 | resource_id = sa.Column( | |
41 | sa.Integer(), |
|
41 | sa.Integer(), | |
42 | sa.ForeignKey( |
|
42 | sa.ForeignKey( | |
43 | "applications.resource_id", onupdate="CASCADE", ondelete="CASCADE" |
|
43 | "applications.resource_id", onupdate="CASCADE", ondelete="CASCADE" | |
44 | ), |
|
44 | ), | |
45 | nullable=False, |
|
45 | nullable=False, | |
46 | index=True, |
|
46 | index=True, | |
47 | ) |
|
47 | ) | |
48 | priority = sa.Column( |
|
48 | priority = sa.Column( | |
49 | sa.Integer, nullable=False, index=True, default=5, server_default="5" |
|
49 | sa.Integer, nullable=False, index=True, default=5, server_default="5" | |
50 | ) |
|
50 | ) | |
51 | first_timestamp = sa.Column( |
|
51 | first_timestamp = sa.Column( | |
52 | sa.DateTime(), default=datetime.utcnow, server_default=sa.func.now() |
|
52 | sa.DateTime(), default=datetime.utcnow, server_default=sa.func.now() | |
53 | ) |
|
53 | ) | |
54 | last_timestamp = sa.Column( |
|
54 | last_timestamp = sa.Column( | |
55 | sa.DateTime(), default=datetime.utcnow, server_default=sa.func.now() |
|
55 | sa.DateTime(), default=datetime.utcnow, server_default=sa.func.now() | |
56 | ) |
|
56 | ) | |
57 | error = sa.Column(sa.UnicodeText(), index=True) |
|
57 | error = sa.Column(sa.UnicodeText(), index=True) | |
58 | grouping_hash = sa.Column(sa.String(40), default="") |
|
58 | grouping_hash = sa.Column(sa.String(40), default="") | |
59 | triggered_postprocesses_ids = sa.Column(JSON(), nullable=False, default=list) |
|
59 | triggered_postprocesses_ids = sa.Column(JSON(), nullable=False, default=list) | |
60 | report_type = sa.Column(sa.Integer, default=1) |
|
60 | report_type = sa.Column(sa.Integer, default=1) | |
61 | total_reports = sa.Column(sa.Integer, default=1) |
|
61 | total_reports = sa.Column(sa.Integer, default=1) | |
62 | last_report = sa.Column(sa.Integer) |
|
62 | last_report = sa.Column(sa.Integer) | |
63 | occurences = sa.Column(sa.Integer, default=1) |
|
63 | occurences = sa.Column(sa.Integer, default=1) | |
64 | average_duration = sa.Column(sa.Float, default=0) |
|
64 | average_duration = sa.Column(sa.Float, default=0) | |
65 | summed_duration = sa.Column(sa.Float, default=0) |
|
65 | summed_duration = sa.Column(sa.Float, default=0) | |
66 | read = sa.Column(sa.Boolean(), index=True, default=False) |
|
66 | read = sa.Column(sa.Boolean(), index=True, default=False) | |
67 | fixed = sa.Column(sa.Boolean(), index=True, default=False) |
|
67 | fixed = sa.Column(sa.Boolean(), index=True, default=False) | |
68 | notified = sa.Column(sa.Boolean(), index=True, default=False) |
|
68 | notified = sa.Column(sa.Boolean(), index=True, default=False) | |
69 | public = sa.Column(sa.Boolean(), index=True, default=False) |
|
69 | public = sa.Column(sa.Boolean(), index=True, default=False) | |
70 |
|
70 | |||
71 | reports = sa.orm.relationship( |
|
71 | reports = sa.orm.relationship( | |
72 | "Report", |
|
72 | "Report", | |
73 | lazy="dynamic", |
|
73 | lazy="dynamic", | |
74 | backref="report_group", |
|
74 | backref="report_group", | |
75 | cascade="all, delete-orphan", |
|
75 | cascade="all, delete-orphan", | |
76 | passive_deletes=True, |
|
76 | passive_deletes=True, | |
77 | passive_updates=True, |
|
77 | passive_updates=True, | |
78 | ) |
|
78 | ) | |
79 |
|
79 | |||
80 | comments = sa.orm.relationship( |
|
80 | comments = sa.orm.relationship( | |
81 | "ReportComment", |
|
81 | "ReportComment", | |
82 | lazy="dynamic", |
|
82 | lazy="dynamic", | |
83 | backref="report", |
|
83 | backref="report", | |
84 | cascade="all, delete-orphan", |
|
84 | cascade="all, delete-orphan", | |
85 | passive_deletes=True, |
|
85 | passive_deletes=True, | |
86 | passive_updates=True, |
|
86 | passive_updates=True, | |
87 | order_by="ReportComment.comment_id", |
|
87 | order_by="ReportComment.comment_id", | |
88 | ) |
|
88 | ) | |
89 |
|
89 | |||
90 | assigned_users = sa.orm.relationship( |
|
90 | assigned_users = sa.orm.relationship( | |
91 | "User", |
|
91 | "User", | |
92 | backref=sa.orm.backref( |
|
92 | backref=sa.orm.backref( | |
93 | "assigned_reports_relation", |
|
93 | "assigned_reports_relation", | |
94 | lazy="dynamic", |
|
94 | lazy="dynamic", | |
95 | order_by=sa.desc(sa.text("reports_groups.id")), |
|
95 | order_by=sa.desc(sa.text("reports_groups.id")), | |
96 | ), |
|
96 | ), | |
97 | passive_deletes=True, |
|
97 | passive_deletes=True, | |
98 | passive_updates=True, |
|
98 | passive_updates=True, | |
99 | secondary="reports_assignments", |
|
99 | secondary="reports_assignments", | |
100 | order_by="User.user_name", |
|
100 | order_by="User.user_name", | |
101 | ) |
|
101 | ) | |
102 |
|
102 | |||
103 | stats = sa.orm.relationship( |
|
103 | stats = sa.orm.relationship( | |
104 | "ReportStat", |
|
104 | "ReportStat", | |
105 | lazy="dynamic", |
|
105 | lazy="dynamic", | |
106 | backref="report", |
|
106 | backref="report", | |
107 | passive_deletes=True, |
|
107 | passive_deletes=True, | |
108 | passive_updates=True, |
|
108 | passive_updates=True, | |
109 | ) |
|
109 | ) | |
110 |
|
110 | |||
111 | last_report_ref = sa.orm.relationship( |
|
111 | last_report_ref = sa.orm.relationship( | |
112 | "Report", |
|
112 | "Report", | |
113 | uselist=False, |
|
113 | uselist=False, | |
114 | primaryjoin="ReportGroup.last_report " "== Report.id", |
|
114 | primaryjoin="ReportGroup.last_report " "== Report.id", | |
115 | foreign_keys="Report.id", |
|
115 | foreign_keys="Report.id", | |
116 | cascade="all, delete-orphan", |
|
116 | cascade="all, delete-orphan", | |
117 | passive_deletes=True, |
|
117 | passive_deletes=True, | |
118 | passive_updates=True, |
|
118 | passive_updates=True, | |
119 | ) |
|
119 | ) | |
120 |
|
120 | |||
121 | def __repr__(self): |
|
121 | def __repr__(self): | |
122 | return "<ReportGroup id:{}>".format(self.id) |
|
122 | return "<ReportGroup id:{}>".format(self.id) | |
123 |
|
123 | |||
124 | def get_report(self, report_id=None, public=False): |
|
124 | def get_report(self, report_id=None, public=False): | |
125 | """ |
|
125 | """ | |
126 | Gets report with specific id or latest report if id was not specified |
|
126 | Gets report with specific id or latest report if id was not specified | |
127 | """ |
|
127 | """ | |
128 | from .report import Report |
|
128 | from .report import Report | |
129 |
|
129 | |||
130 | if not report_id: |
|
130 | if not report_id: | |
131 | return self.last_report_ref |
|
131 | return self.last_report_ref | |
132 | else: |
|
132 | else: | |
133 | return self.reports.filter(Report.id == report_id).first() |
|
133 | return self.reports.filter(Report.id == report_id).first() | |
134 |
|
134 | |||
135 | def get_public_url(self, request, _app_url=None): |
|
135 | def get_public_url(self, request, _app_url=None): | |
136 | url = request.route_url("/", _app_url=_app_url) |
|
136 | url = request.route_url("/", _app_url=_app_url) | |
137 | return (url + "ui/report/%s") % self.id |
|
137 | return (url + "ui/report/%s") % self.id | |
138 |
|
138 | |||
139 | def run_postprocessing(self, report): |
|
139 | def run_postprocessing(self, report): | |
140 | """ |
|
140 | """ | |
141 | Alters report group priority based on postprocessing configuration |
|
141 | Alters report group priority based on postprocessing configuration | |
142 | """ |
|
142 | """ | |
143 | request = get_current_request() |
|
143 | request = get_current_request() | |
144 | get_db_session(None, self).flush() |
|
144 | get_db_session(None, self).flush() | |
145 | for action in self.application.postprocess_conf: |
|
145 | for action in self.application.postprocess_conf: | |
146 | get_db_session(None, self).flush() |
|
146 | get_db_session(None, self).flush() | |
147 | rule_obj = Rule(action.rule, REPORT_TYPE_MATRIX) |
|
147 | rule_obj = Rule(action.rule, REPORT_TYPE_MATRIX) | |
148 | report_dict = report.get_dict(request) |
|
148 | report_dict = report.get_dict(request) | |
149 | # if was not processed yet |
|
149 | # if was not processed yet | |
150 | if ( |
|
150 | if ( | |
151 | rule_obj.match(report_dict) |
|
151 | rule_obj.match(report_dict) | |
152 | and action.pkey not in self.triggered_postprocesses_ids |
|
152 | and action.pkey not in self.triggered_postprocesses_ids | |
153 | ): |
|
153 | ): | |
154 | action.postprocess(self) |
|
154 | action.postprocess(self) | |
155 | # this way sqla can track mutation of list |
|
155 | # this way sqla can track mutation of list | |
156 | self.triggered_postprocesses_ids = self.triggered_postprocesses_ids + [ |
|
156 | self.triggered_postprocesses_ids = self.triggered_postprocesses_ids + [ | |
157 | action.pkey |
|
157 | action.pkey | |
158 | ] |
|
158 | ] | |
159 |
|
159 | |||
160 | get_db_session(None, self).flush() |
|
160 | get_db_session(None, self).flush() | |
161 | # do not go out of bounds |
|
161 | # do not go out of bounds | |
162 | if self.priority < 1: |
|
162 | if self.priority < 1: | |
163 | self.priority = 1 |
|
163 | self.priority = 1 | |
164 | if self.priority > 10: |
|
164 | if self.priority > 10: | |
165 | self.priority = 10 |
|
165 | self.priority = 10 | |
166 |
|
166 | |||
167 | def get_dict(self, request): |
|
167 | def get_dict(self, request): | |
168 | instance_dict = super(ReportGroup, self).get_dict() |
|
168 | instance_dict = super(ReportGroup, self).get_dict() | |
169 | instance_dict["server_name"] = self.get_report().tags.get("server_name") |
|
169 | instance_dict["server_name"] = self.get_report().tags.get("server_name") | |
170 | instance_dict["view_name"] = self.get_report().tags.get("view_name") |
|
170 | instance_dict["view_name"] = self.get_report().tags.get("view_name") | |
171 | instance_dict["resource_name"] = self.application.resource_name |
|
171 | instance_dict["resource_name"] = self.application.resource_name | |
172 | instance_dict["report_type"] = self.get_report().report_type |
|
172 | instance_dict["report_type"] = self.get_report().report_type | |
173 | instance_dict["url_path"] = self.get_report().url_path |
|
173 | instance_dict["url_path"] = self.get_report().url_path | |
174 | instance_dict["front_url"] = self.get_report().get_public_url(request) |
|
174 | instance_dict["front_url"] = self.get_report().get_public_url(request) | |
175 | del instance_dict["triggered_postprocesses_ids"] |
|
175 | del instance_dict["triggered_postprocesses_ids"] | |
176 | return instance_dict |
|
176 | return instance_dict | |
177 |
|
177 | |||
178 | def es_doc(self): |
|
178 | def es_doc(self): | |
179 | return { |
|
179 | return { | |
180 | "_id": str(self.id), |
|
180 | "_id": str(self.id), | |
181 | "group_id": str(self.id), |
|
181 | "group_id": str(self.id), | |
182 | "resource_id": self.resource_id, |
|
182 | "resource_id": self.resource_id, | |
183 | "error": self.error, |
|
183 | "error": self.error, | |
184 | "fixed": self.fixed, |
|
184 | "fixed": self.fixed, | |
185 | "public": self.public, |
|
185 | "public": self.public, | |
186 | "read": self.read, |
|
186 | "read": self.read, | |
187 | "priority": self.priority, |
|
187 | "priority": self.priority, | |
188 | "occurences": self.occurences, |
|
188 | "occurences": self.occurences, | |
189 | "average_duration": self.average_duration, |
|
189 | "average_duration": self.average_duration, | |
190 | "summed_duration": self.summed_duration, |
|
190 | "summed_duration": self.summed_duration, | |
191 | "first_timestamp": self.first_timestamp, |
|
191 | "first_timestamp": self.first_timestamp, | |
192 | "last_timestamp": self.last_timestamp, |
|
192 | "last_timestamp": self.last_timestamp, | |
193 | "type": "report_group", |
|
193 | "type": "report_group", | |
194 | "join_field": { |
|
194 | "join_field": {"name": "report_group"}, | |
195 | "name": "report_group" |
|
|||
196 | }, |
|
|||
197 | } |
|
195 | } | |
198 |
|
196 | |||
199 | def set_notification_info(self, notify_10=False, notify_100=False): |
|
197 | def set_notification_info(self, notify_10=False, notify_100=False): | |
200 | """ |
|
198 | """ | |
201 | Update redis notification maps for notification job |
|
199 | Update redis notification maps for notification job | |
202 | """ |
|
200 | """ | |
203 | current_time = datetime.utcnow().replace(second=0, microsecond=0) |
|
201 | current_time = datetime.utcnow().replace(second=0, microsecond=0) | |
204 | # global app counter |
|
202 | # global app counter | |
205 | key = REDIS_KEYS["counters"]["reports_per_type"].format( |
|
203 | key = REDIS_KEYS["counters"]["reports_per_type"].format( | |
206 | self.report_type, current_time |
|
204 | self.report_type, current_time | |
207 | ) |
|
205 | ) | |
208 | redis_pipeline = Datastores.redis.pipeline() |
|
206 | redis_pipeline = Datastores.redis.pipeline() | |
209 | redis_pipeline.incr(key) |
|
207 | redis_pipeline.incr(key) | |
210 | redis_pipeline.expire(key, 3600 * 24) |
|
208 | redis_pipeline.expire(key, 3600 * 24) | |
211 | # detailed app notification for alerts and notifications |
|
209 | # detailed app notification for alerts and notifications | |
212 | redis_pipeline.sadd(REDIS_KEYS["apps_that_had_reports"], self.resource_id) |
|
210 | redis_pipeline.sadd(REDIS_KEYS["apps_that_had_reports"], self.resource_id) | |
213 | redis_pipeline.sadd( |
|
211 | redis_pipeline.sadd( | |
214 | REDIS_KEYS["apps_that_had_reports_alerting"], self.resource_id |
|
212 | REDIS_KEYS["apps_that_had_reports_alerting"], self.resource_id | |
215 | ) |
|
213 | ) | |
216 | # only notify for exceptions here |
|
214 | # only notify for exceptions here | |
217 | if self.report_type == ReportType.error: |
|
215 | if self.report_type == ReportType.error: | |
218 | redis_pipeline.sadd(REDIS_KEYS["apps_that_had_reports"], self.resource_id) |
|
216 | redis_pipeline.sadd(REDIS_KEYS["apps_that_had_reports"], self.resource_id) | |
219 | redis_pipeline.sadd( |
|
217 | redis_pipeline.sadd( | |
220 | REDIS_KEYS["apps_that_had_error_reports_alerting"], self.resource_id |
|
218 | REDIS_KEYS["apps_that_had_error_reports_alerting"], self.resource_id | |
221 | ) |
|
219 | ) | |
222 | key = REDIS_KEYS["counters"]["report_group_occurences"].format(self.id) |
|
220 | key = REDIS_KEYS["counters"]["report_group_occurences"].format(self.id) | |
223 | redis_pipeline.incr(key) |
|
221 | redis_pipeline.incr(key) | |
224 | redis_pipeline.expire(key, 3600 * 24) |
|
222 | redis_pipeline.expire(key, 3600 * 24) | |
225 | key = REDIS_KEYS["counters"]["report_group_occurences_alerting"].format(self.id) |
|
223 | key = REDIS_KEYS["counters"]["report_group_occurences_alerting"].format(self.id) | |
226 | redis_pipeline.incr(key) |
|
224 | redis_pipeline.incr(key) | |
227 | redis_pipeline.expire(key, 3600 * 24) |
|
225 | redis_pipeline.expire(key, 3600 * 24) | |
228 |
|
226 | |||
229 | if notify_10: |
|
227 | if notify_10: | |
230 | key = REDIS_KEYS["counters"]["report_group_occurences_10th"].format(self.id) |
|
228 | key = REDIS_KEYS["counters"]["report_group_occurences_10th"].format(self.id) | |
231 | redis_pipeline.setex(key, 3600 * 24, 1) |
|
229 | redis_pipeline.setex(key, 3600 * 24, 1) | |
232 | if notify_100: |
|
230 | if notify_100: | |
233 | key = REDIS_KEYS["counters"]["report_group_occurences_100th"].format( |
|
231 | key = REDIS_KEYS["counters"]["report_group_occurences_100th"].format( | |
234 | self.id |
|
232 | self.id | |
235 | ) |
|
233 | ) | |
236 | redis_pipeline.setex(key, 3600 * 24, 1) |
|
234 | redis_pipeline.setex(key, 3600 * 24, 1) | |
237 |
|
235 | |||
238 | key = REDIS_KEYS["reports_to_notify_per_type_per_app"].format( |
|
236 | key = REDIS_KEYS["reports_to_notify_per_type_per_app"].format( | |
239 | self.report_type, self.resource_id |
|
237 | self.report_type, self.resource_id | |
240 | ) |
|
238 | ) | |
241 | redis_pipeline.sadd(key, self.id) |
|
239 | redis_pipeline.sadd(key, self.id) | |
242 | redis_pipeline.expire(key, 3600 * 24) |
|
240 | redis_pipeline.expire(key, 3600 * 24) | |
243 | key = REDIS_KEYS["reports_to_notify_per_type_per_app_alerting"].format( |
|
241 | key = REDIS_KEYS["reports_to_notify_per_type_per_app_alerting"].format( | |
244 | self.report_type, self.resource_id |
|
242 | self.report_type, self.resource_id | |
245 | ) |
|
243 | ) | |
246 | redis_pipeline.sadd(key, self.id) |
|
244 | redis_pipeline.sadd(key, self.id) | |
247 | redis_pipeline.expire(key, 3600 * 24) |
|
245 | redis_pipeline.expire(key, 3600 * 24) | |
248 | redis_pipeline.execute() |
|
246 | redis_pipeline.execute() | |
249 |
|
247 | |||
250 | @property |
|
248 | @property | |
251 | def partition_id(self): |
|
249 | def partition_id(self): | |
252 | return "rcae_r_%s" % self.first_timestamp.strftime("%Y_%m") |
|
250 | return "rcae_r_%s" % self.first_timestamp.strftime("%Y_%m") | |
253 |
|
251 | |||
254 | def partition_range(self): |
|
252 | def partition_range(self): | |
255 | start_date = self.first_timestamp.date().replace(day=1) |
|
253 | start_date = self.first_timestamp.date().replace(day=1) | |
256 | end_date = start_date + timedelta(days=40) |
|
254 | end_date = start_date + timedelta(days=40) | |
257 | end_date = end_date.replace(day=1) |
|
255 | end_date = end_date.replace(day=1) | |
258 | return start_date, end_date |
|
256 | return start_date, end_date | |
259 |
|
257 | |||
260 |
|
258 | |||
261 | def after_insert(mapper, connection, target): |
|
259 | def after_insert(mapper, connection, target): | |
262 | if not hasattr(target, "_skip_ft_index"): |
|
260 | if not hasattr(target, "_skip_ft_index"): | |
263 | data = target.es_doc() |
|
261 | data = target.es_doc() | |
264 | data.pop("_id", None) |
|
262 | data.pop("_id", None) | |
265 | Datastores.es.index(target.partition_id, "report", data, id=target.id) |
|
263 | Datastores.es.index(target.partition_id, "report", data, id=target.id) | |
266 |
|
264 | |||
267 |
|
265 | |||
268 | def after_update(mapper, connection, target): |
|
266 | def after_update(mapper, connection, target): | |
269 | if not hasattr(target, "_skip_ft_index"): |
|
267 | if not hasattr(target, "_skip_ft_index"): | |
270 | data = target.es_doc() |
|
268 | data = target.es_doc() | |
271 | data.pop("_id", None) |
|
269 | data.pop("_id", None) | |
272 | Datastores.es.index(target.partition_id, "report", data, id=target.id) |
|
270 | Datastores.es.index(target.partition_id, "report", data, id=target.id) | |
273 |
|
271 | |||
274 |
|
272 | |||
275 | def after_delete(mapper, connection, target): |
|
273 | def after_delete(mapper, connection, target): | |
276 | query = {"query": {"term": {"group_id": target.id}}} |
|
274 | query = {"query": {"term": {"group_id": target.id}}} | |
277 | # delete by query |
|
275 | # delete by query | |
278 | Datastores.es.delete_by_query( |
|
276 | Datastores.es.delete_by_query( | |
279 | index=target.partition_id, doc_type="report", body=query, conflicts="proceed" |
|
277 | index=target.partition_id, doc_type="report", body=query, conflicts="proceed" | |
280 | ) |
|
278 | ) | |
281 |
|
279 | |||
282 |
|
280 | |||
283 | sa.event.listen(ReportGroup, "after_insert", after_insert) |
|
281 | sa.event.listen(ReportGroup, "after_insert", after_insert) | |
284 | sa.event.listen(ReportGroup, "after_update", after_update) |
|
282 | sa.event.listen(ReportGroup, "after_update", after_update) | |
285 | sa.event.listen(ReportGroup, "after_delete", after_delete) |
|
283 | sa.event.listen(ReportGroup, "after_delete", after_delete) |
@@ -1,222 +1,218 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 |
|
2 | |||
3 | # Copyright 2010 - 2017 RhodeCode GmbH and the AppEnlight project authors |
|
3 | # Copyright 2010 - 2017 RhodeCode GmbH and the AppEnlight project authors | |
4 | # |
|
4 | # | |
5 | # Licensed under the Apache License, Version 2.0 (the "License"); |
|
5 | # Licensed under the Apache License, Version 2.0 (the "License"); | |
6 | # you may not use this file except in compliance with the License. |
|
6 | # you may not use this file except in compliance with the License. | |
7 | # You may obtain a copy of the License at |
|
7 | # You may obtain a copy of the License at | |
8 | # |
|
8 | # | |
9 | # http://www.apache.org/licenses/LICENSE-2.0 |
|
9 | # http://www.apache.org/licenses/LICENSE-2.0 | |
10 | # |
|
10 | # | |
11 | # Unless required by applicable law or agreed to in writing, software |
|
11 | # Unless required by applicable law or agreed to in writing, software | |
12 | # distributed under the License is distributed on an "AS IS" BASIS, |
|
12 | # distributed under the License is distributed on an "AS IS" BASIS, | |
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
14 | # See the License for the specific language governing permissions and |
|
14 | # See the License for the specific language governing permissions and | |
15 | # limitations under the License. |
|
15 | # limitations under the License. | |
16 |
|
16 | |||
17 | import paginate |
|
17 | import paginate | |
18 | import logging |
|
18 | import logging | |
19 | import sqlalchemy as sa |
|
19 | import sqlalchemy as sa | |
20 |
|
20 | |||
21 | from appenlight.models.log import Log |
|
21 | from appenlight.models.log import Log | |
22 | from appenlight.models import get_db_session, Datastores |
|
22 | from appenlight.models import get_db_session, Datastores | |
23 | from appenlight.models.services.base import BaseService |
|
23 | from appenlight.models.services.base import BaseService | |
24 | from appenlight.lib.utils import es_index_name_limiter |
|
24 | from appenlight.lib.utils import es_index_name_limiter | |
25 |
|
25 | |||
26 | log = logging.getLogger(__name__) |
|
26 | log = logging.getLogger(__name__) | |
27 |
|
27 | |||
28 |
|
28 | |||
29 | class LogService(BaseService): |
|
29 | class LogService(BaseService): | |
30 | @classmethod |
|
30 | @classmethod | |
31 | def get_logs(cls, resource_ids=None, filter_settings=None, db_session=None): |
|
31 | def get_logs(cls, resource_ids=None, filter_settings=None, db_session=None): | |
32 | # ensure we always have id's passed |
|
32 | # ensure we always have id's passed | |
33 | if not resource_ids: |
|
33 | if not resource_ids: | |
34 | # raise Exception('No App ID passed') |
|
34 | # raise Exception('No App ID passed') | |
35 | return [] |
|
35 | return [] | |
36 | db_session = get_db_session(db_session) |
|
36 | db_session = get_db_session(db_session) | |
37 | q = db_session.query(Log) |
|
37 | q = db_session.query(Log) | |
38 | q = q.filter(Log.resource_id.in_(resource_ids)) |
|
38 | q = q.filter(Log.resource_id.in_(resource_ids)) | |
39 | if filter_settings.get("start_date"): |
|
39 | if filter_settings.get("start_date"): | |
40 | q = q.filter(Log.timestamp >= filter_settings.get("start_date")) |
|
40 | q = q.filter(Log.timestamp >= filter_settings.get("start_date")) | |
41 | if filter_settings.get("end_date"): |
|
41 | if filter_settings.get("end_date"): | |
42 | q = q.filter(Log.timestamp <= filter_settings.get("end_date")) |
|
42 | q = q.filter(Log.timestamp <= filter_settings.get("end_date")) | |
43 | if filter_settings.get("log_level"): |
|
43 | if filter_settings.get("log_level"): | |
44 | q = q.filter(Log.log_level == filter_settings.get("log_level").upper()) |
|
44 | q = q.filter(Log.log_level == filter_settings.get("log_level").upper()) | |
45 | if filter_settings.get("request_id"): |
|
45 | if filter_settings.get("request_id"): | |
46 | request_id = filter_settings.get("request_id", "") |
|
46 | request_id = filter_settings.get("request_id", "") | |
47 | q = q.filter(Log.request_id == request_id.replace("-", "")) |
|
47 | q = q.filter(Log.request_id == request_id.replace("-", "")) | |
48 | if filter_settings.get("namespace"): |
|
48 | if filter_settings.get("namespace"): | |
49 | q = q.filter(Log.namespace == filter_settings.get("namespace")) |
|
49 | q = q.filter(Log.namespace == filter_settings.get("namespace")) | |
50 | q = q.order_by(sa.desc(Log.timestamp)) |
|
50 | q = q.order_by(sa.desc(Log.timestamp)) | |
51 | return q |
|
51 | return q | |
52 |
|
52 | |||
53 | @classmethod |
|
53 | @classmethod | |
54 | def es_query_builder(cls, app_ids, filter_settings): |
|
54 | def es_query_builder(cls, app_ids, filter_settings): | |
55 | if not filter_settings: |
|
55 | if not filter_settings: | |
56 | filter_settings = {} |
|
56 | filter_settings = {} | |
57 |
|
57 | |||
58 | query = { |
|
58 | query = { | |
59 | "query": { |
|
59 | "query": {"bool": {"filter": [{"terms": {"resource_id": list(app_ids)}}]}} | |
60 | "bool": { |
|
|||
61 | "filter": [{"terms": {"resource_id": list(app_ids)}}] |
|
|||
62 | } |
|
|||
63 | } |
|
|||
64 | } |
|
60 | } | |
65 |
|
61 | |||
66 | start_date = filter_settings.get("start_date") |
|
62 | start_date = filter_settings.get("start_date") | |
67 | end_date = filter_settings.get("end_date") |
|
63 | end_date = filter_settings.get("end_date") | |
68 | filter_part = query["query"]["bool"]["filter"] |
|
64 | filter_part = query["query"]["bool"]["filter"] | |
69 |
|
65 | |||
70 | for tag in filter_settings.get("tags", []): |
|
66 | for tag in filter_settings.get("tags", []): | |
71 | tag_values = [v.lower() for v in tag["value"]] |
|
67 | tag_values = [v.lower() for v in tag["value"]] | |
72 | key = "tags.%s.values" % tag["name"].replace(".", "_") |
|
68 | key = "tags.%s.values" % tag["name"].replace(".", "_") | |
73 | filter_part.append({"terms": {key: tag_values}}) |
|
69 | filter_part.append({"terms": {key: tag_values}}) | |
74 |
|
70 | |||
75 | date_range = {"range": {"timestamp": {}}} |
|
71 | date_range = {"range": {"timestamp": {}}} | |
76 | if start_date: |
|
72 | if start_date: | |
77 | date_range["range"]["timestamp"]["gte"] = start_date |
|
73 | date_range["range"]["timestamp"]["gte"] = start_date | |
78 | if end_date: |
|
74 | if end_date: | |
79 | date_range["range"]["timestamp"]["lte"] = end_date |
|
75 | date_range["range"]["timestamp"]["lte"] = end_date | |
80 | if start_date or end_date: |
|
76 | if start_date or end_date: | |
81 | filter_part.append(date_range) |
|
77 | filter_part.append(date_range) | |
82 |
|
78 | |||
83 | levels = filter_settings.get("level") |
|
79 | levels = filter_settings.get("level") | |
84 | if levels: |
|
80 | if levels: | |
85 | filter_part.append({"terms": {"log_level": levels}}) |
|
81 | filter_part.append({"terms": {"log_level": levels}}) | |
86 | namespaces = filter_settings.get("namespace") |
|
82 | namespaces = filter_settings.get("namespace") | |
87 | if namespaces: |
|
83 | if namespaces: | |
88 | filter_part.append({"terms": {"namespace": namespaces}}) |
|
84 | filter_part.append({"terms": {"namespace": namespaces}}) | |
89 |
|
85 | |||
90 | request_ids = filter_settings.get("request_id") |
|
86 | request_ids = filter_settings.get("request_id") | |
91 | if request_ids: |
|
87 | if request_ids: | |
92 | filter_part.append({"terms": {"request_id": request_ids}}) |
|
88 | filter_part.append({"terms": {"request_id": request_ids}}) | |
93 |
|
89 | |||
94 | messages = filter_settings.get("message") |
|
90 | messages = filter_settings.get("message") | |
95 | if messages: |
|
91 | if messages: | |
96 | query["query"]["bool"]["must"] = { |
|
92 | query["query"]["bool"]["must"] = { | |
97 | "match": {"message": {"query": " ".join(messages), "operator": "and"}} |
|
93 | "match": {"message": {"query": " ".join(messages), "operator": "and"}} | |
98 | } |
|
94 | } | |
99 | return query |
|
95 | return query | |
100 |
|
96 | |||
101 | @classmethod |
|
97 | @classmethod | |
102 | def get_time_series_aggregate(cls, app_ids=None, filter_settings=None): |
|
98 | def get_time_series_aggregate(cls, app_ids=None, filter_settings=None): | |
103 | if not app_ids: |
|
99 | if not app_ids: | |
104 | return {} |
|
100 | return {} | |
105 | es_query = cls.es_query_builder(app_ids, filter_settings) |
|
101 | es_query = cls.es_query_builder(app_ids, filter_settings) | |
106 | es_query["aggs"] = { |
|
102 | es_query["aggs"] = { | |
107 | "events_over_time": { |
|
103 | "events_over_time": { | |
108 | "date_histogram": { |
|
104 | "date_histogram": { | |
109 | "field": "timestamp", |
|
105 | "field": "timestamp", | |
110 | "interval": "1h", |
|
106 | "interval": "1h", | |
111 | "min_doc_count": 0, |
|
107 | "min_doc_count": 0, | |
112 | "extended_bounds": { |
|
108 | "extended_bounds": { | |
113 | "max": filter_settings.get("end_date"), |
|
109 | "max": filter_settings.get("end_date"), | |
114 | "min": filter_settings.get("start_date"), |
|
110 | "min": filter_settings.get("start_date"), | |
115 | }, |
|
111 | }, | |
116 | } |
|
112 | } | |
117 | } |
|
113 | } | |
118 | } |
|
114 | } | |
119 | log.debug(es_query) |
|
115 | log.debug(es_query) | |
120 | index_names = es_index_name_limiter( |
|
116 | index_names = es_index_name_limiter( | |
121 | filter_settings.get("start_date"), |
|
117 | filter_settings.get("start_date"), | |
122 | filter_settings.get("end_date"), |
|
118 | filter_settings.get("end_date"), | |
123 | ixtypes=["logs"], |
|
119 | ixtypes=["logs"], | |
124 | ) |
|
120 | ) | |
125 | if index_names: |
|
121 | if index_names: | |
126 | results = Datastores.es.search( |
|
122 | results = Datastores.es.search( | |
127 | body=es_query, index=index_names, doc_type="log", size=0 |
|
123 | body=es_query, index=index_names, doc_type="log", size=0 | |
128 | ) |
|
124 | ) | |
129 | else: |
|
125 | else: | |
130 | results = [] |
|
126 | results = [] | |
131 | return results |
|
127 | return results | |
132 |
|
128 | |||
133 | @classmethod |
|
129 | @classmethod | |
134 | def get_search_iterator( |
|
130 | def get_search_iterator( | |
135 |
|
|
131 | cls, | |
136 |
|
|
132 | app_ids=None, | |
137 |
|
|
133 | page=1, | |
138 |
|
|
134 | items_per_page=50, | |
139 |
|
|
135 | order_by=None, | |
140 |
|
|
136 | filter_settings=None, | |
141 |
|
|
137 | limit=None, | |
142 | ): |
|
138 | ): | |
143 | if not app_ids: |
|
139 | if not app_ids: | |
144 | return {}, 0 |
|
140 | return {}, 0 | |
145 |
|
141 | |||
146 | es_query = cls.es_query_builder(app_ids, filter_settings) |
|
142 | es_query = cls.es_query_builder(app_ids, filter_settings) | |
147 | sort_query = {"sort": [{"timestamp": {"order": "desc"}}]} |
|
143 | sort_query = {"sort": [{"timestamp": {"order": "desc"}}]} | |
148 | es_query.update(sort_query) |
|
144 | es_query.update(sort_query) | |
149 | log.debug(es_query) |
|
145 | log.debug(es_query) | |
150 | es_from = (page - 1) * items_per_page |
|
146 | es_from = (page - 1) * items_per_page | |
151 | index_names = es_index_name_limiter( |
|
147 | index_names = es_index_name_limiter( | |
152 | filter_settings.get("start_date"), |
|
148 | filter_settings.get("start_date"), | |
153 | filter_settings.get("end_date"), |
|
149 | filter_settings.get("end_date"), | |
154 | ixtypes=["logs"], |
|
150 | ixtypes=["logs"], | |
155 | ) |
|
151 | ) | |
156 | if not index_names: |
|
152 | if not index_names: | |
157 | return {}, 0 |
|
153 | return {}, 0 | |
158 |
|
154 | |||
159 | results = Datastores.es.search( |
|
155 | results = Datastores.es.search( | |
160 | body=es_query, |
|
156 | body=es_query, | |
161 | index=index_names, |
|
157 | index=index_names, | |
162 | doc_type="log", |
|
158 | doc_type="log", | |
163 | size=items_per_page, |
|
159 | size=items_per_page, | |
164 | from_=es_from, |
|
160 | from_=es_from, | |
165 | ) |
|
161 | ) | |
166 | if results["hits"]["total"] > 5000: |
|
162 | if results["hits"]["total"] > 5000: | |
167 | count = 5000 |
|
163 | count = 5000 | |
168 | else: |
|
164 | else: | |
169 | count = results["hits"]["total"] |
|
165 | count = results["hits"]["total"] | |
170 | return results["hits"], count |
|
166 | return results["hits"], count | |
171 |
|
167 | |||
172 | @classmethod |
|
168 | @classmethod | |
173 | def get_paginator_by_app_ids( |
|
169 | def get_paginator_by_app_ids( | |
174 |
|
|
170 | cls, | |
175 |
|
|
171 | app_ids=None, | |
176 |
|
|
172 | page=1, | |
177 |
|
|
173 | item_count=None, | |
178 |
|
|
174 | items_per_page=50, | |
179 |
|
|
175 | order_by=None, | |
180 |
|
|
176 | filter_settings=None, | |
181 |
|
|
177 | exclude_columns=None, | |
182 |
|
|
178 | db_session=None, | |
183 | ): |
|
179 | ): | |
184 | if not filter_settings: |
|
180 | if not filter_settings: | |
185 | filter_settings = {} |
|
181 | filter_settings = {} | |
186 | results, item_count = cls.get_search_iterator( |
|
182 | results, item_count = cls.get_search_iterator( | |
187 | app_ids, page, items_per_page, order_by, filter_settings |
|
183 | app_ids, page, items_per_page, order_by, filter_settings | |
188 | ) |
|
184 | ) | |
189 | paginator = paginate.Page( |
|
185 | paginator = paginate.Page( | |
190 | [], item_count=item_count, items_per_page=items_per_page, **filter_settings |
|
186 | [], item_count=item_count, items_per_page=items_per_page, **filter_settings | |
191 | ) |
|
187 | ) | |
192 | ordered_ids = tuple( |
|
188 | ordered_ids = tuple( | |
193 | item["_source"]["log_id"] for item in results.get("hits", []) |
|
189 | item["_source"]["log_id"] for item in results.get("hits", []) | |
194 | ) |
|
190 | ) | |
195 |
|
191 | |||
196 | sorted_instance_list = [] |
|
192 | sorted_instance_list = [] | |
197 | if ordered_ids: |
|
193 | if ordered_ids: | |
198 | db_session = get_db_session(db_session) |
|
194 | db_session = get_db_session(db_session) | |
199 | query = db_session.query(Log) |
|
195 | query = db_session.query(Log) | |
200 | query = query.filter(Log.log_id.in_(ordered_ids)) |
|
196 | query = query.filter(Log.log_id.in_(ordered_ids)) | |
201 | query = query.order_by(sa.desc("timestamp")) |
|
197 | query = query.order_by(sa.desc("timestamp")) | |
202 | sa_items = query.all() |
|
198 | sa_items = query.all() | |
203 | # resort by score |
|
199 | # resort by score | |
204 | for i_id in ordered_ids: |
|
200 | for i_id in ordered_ids: | |
205 | for item in sa_items: |
|
201 | for item in sa_items: | |
206 | if str(item.log_id) == str(i_id): |
|
202 | if str(item.log_id) == str(i_id): | |
207 | sorted_instance_list.append(item) |
|
203 | sorted_instance_list.append(item) | |
208 | paginator.sa_items = sorted_instance_list |
|
204 | paginator.sa_items = sorted_instance_list | |
209 | return paginator |
|
205 | return paginator | |
210 |
|
206 | |||
211 | @classmethod |
|
207 | @classmethod | |
212 | def query_by_primary_key_and_namespace(cls, list_of_pairs, db_session=None): |
|
208 | def query_by_primary_key_and_namespace(cls, list_of_pairs, db_session=None): | |
213 | db_session = get_db_session(db_session) |
|
209 | db_session = get_db_session(db_session) | |
214 | list_of_conditions = [] |
|
210 | list_of_conditions = [] | |
215 | query = db_session.query(Log) |
|
211 | query = db_session.query(Log) | |
216 | for pair in list_of_pairs: |
|
212 | for pair in list_of_pairs: | |
217 | list_of_conditions.append( |
|
213 | list_of_conditions.append( | |
218 | sa.and_(Log.primary_key == pair["pk"], Log.namespace == pair["ns"]) |
|
214 | sa.and_(Log.primary_key == pair["pk"], Log.namespace == pair["ns"]) | |
219 | ) |
|
215 | ) | |
220 | query = query.filter(sa.or_(*list_of_conditions)) |
|
216 | query = query.filter(sa.or_(*list_of_conditions)) | |
221 | query = query.order_by(sa.asc(Log.timestamp), sa.asc(Log.log_id)) |
|
217 | query = query.order_by(sa.asc(Log.timestamp), sa.asc(Log.log_id)) | |
222 | return query |
|
218 | return query |
@@ -1,521 +1,521 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 |
|
2 | |||
3 | # Copyright 2010 - 2017 RhodeCode GmbH and the AppEnlight project authors |
|
3 | # Copyright 2010 - 2017 RhodeCode GmbH and the AppEnlight project authors | |
4 | # |
|
4 | # | |
5 | # Licensed under the Apache License, Version 2.0 (the "License"); |
|
5 | # Licensed under the Apache License, Version 2.0 (the "License"); | |
6 | # you may not use this file except in compliance with the License. |
|
6 | # you may not use this file except in compliance with the License. | |
7 | # You may obtain a copy of the License at |
|
7 | # You may obtain a copy of the License at | |
8 | # |
|
8 | # | |
9 | # http://www.apache.org/licenses/LICENSE-2.0 |
|
9 | # http://www.apache.org/licenses/LICENSE-2.0 | |
10 | # |
|
10 | # | |
11 | # Unless required by applicable law or agreed to in writing, software |
|
11 | # Unless required by applicable law or agreed to in writing, software | |
12 | # distributed under the License is distributed on an "AS IS" BASIS, |
|
12 | # distributed under the License is distributed on an "AS IS" BASIS, | |
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
14 | # See the License for the specific language governing permissions and |
|
14 | # See the License for the specific language governing permissions and | |
15 | # limitations under the License. |
|
15 | # limitations under the License. | |
16 |
|
16 | |||
17 | import logging |
|
17 | import logging | |
18 | import paginate |
|
18 | import paginate | |
19 | import sqlalchemy as sa |
|
19 | import sqlalchemy as sa | |
20 | import appenlight.lib.helpers as h |
|
20 | import appenlight.lib.helpers as h | |
21 |
|
21 | |||
22 | from datetime import datetime |
|
22 | from datetime import datetime | |
23 |
|
23 | |||
24 | from appenlight.models import get_db_session, Datastores |
|
24 | from appenlight.models import get_db_session, Datastores | |
25 | from appenlight.models.report import Report |
|
25 | from appenlight.models.report import Report | |
26 | from appenlight.models.report_group import ReportGroup |
|
26 | from appenlight.models.report_group import ReportGroup | |
27 | from appenlight.models.report_comment import ReportComment |
|
27 | from appenlight.models.report_comment import ReportComment | |
28 | from appenlight.models.user import User |
|
28 | from appenlight.models.user import User | |
29 | from appenlight.models.services.base import BaseService |
|
29 | from appenlight.models.services.base import BaseService | |
30 | from appenlight.lib.enums import ReportType |
|
30 | from appenlight.lib.enums import ReportType | |
31 | from appenlight.lib.utils import es_index_name_limiter |
|
31 | from appenlight.lib.utils import es_index_name_limiter | |
32 |
|
32 | |||
33 | log = logging.getLogger(__name__) |
|
33 | log = logging.getLogger(__name__) | |
34 |
|
34 | |||
35 |
|
35 | |||
36 | class ReportGroupService(BaseService): |
|
36 | class ReportGroupService(BaseService): | |
37 | @classmethod |
|
37 | @classmethod | |
38 | def get_trending(cls, request, filter_settings, limit=15, db_session=None): |
|
38 | def get_trending(cls, request, filter_settings, limit=15, db_session=None): | |
39 | """ |
|
39 | """ | |
40 | Returns report groups trending for specific time interval |
|
40 | Returns report groups trending for specific time interval | |
41 | """ |
|
41 | """ | |
42 | db_session = get_db_session(db_session) |
|
42 | db_session = get_db_session(db_session) | |
43 |
|
43 | |||
44 | tags = [] |
|
44 | tags = [] | |
45 | if filter_settings.get("tags"): |
|
45 | if filter_settings.get("tags"): | |
46 | for tag in filter_settings["tags"]: |
|
46 | for tag in filter_settings["tags"]: | |
47 | tags.append( |
|
47 | tags.append( | |
48 | {"terms": {"tags.{}.values".format(tag["name"]): tag["value"]}} |
|
48 | {"terms": {"tags.{}.values".format(tag["name"]): tag["value"]}} | |
49 | ) |
|
49 | ) | |
50 |
|
50 | |||
51 | index_names = es_index_name_limiter( |
|
51 | index_names = es_index_name_limiter( | |
52 | start_date=filter_settings["start_date"], |
|
52 | start_date=filter_settings["start_date"], | |
53 | end_date=filter_settings["end_date"], |
|
53 | end_date=filter_settings["end_date"], | |
54 | ixtypes=["reports"], |
|
54 | ixtypes=["reports"], | |
55 | ) |
|
55 | ) | |
56 |
|
56 | |||
57 | if not index_names or not filter_settings["resource"]: |
|
57 | if not index_names or not filter_settings["resource"]: | |
58 | return [] |
|
58 | return [] | |
59 |
|
59 | |||
60 | es_query = { |
|
60 | es_query = { | |
61 | "aggs": { |
|
61 | "aggs": { | |
62 | "parent_agg": { |
|
62 | "parent_agg": { | |
63 | "aggs": { |
|
63 | "aggs": { | |
64 | "groups": { |
|
64 | "groups": { | |
65 | "aggs": { |
|
65 | "aggs": { | |
66 | "sub_agg": { |
|
66 | "sub_agg": { | |
67 |
"value_count": { |
|
67 | "value_count": { | |
|
68 | "field": "tags.group_id.values.keyword" | |||
|
69 | } | |||
68 | } |
|
70 | } | |
69 | }, |
|
71 | }, | |
70 | "filter": {"exists": {"field": "tags.group_id.values"}}, |
|
72 | "filter": {"exists": {"field": "tags.group_id.values"}}, | |
71 | } |
|
73 | } | |
72 | }, |
|
74 | }, | |
73 | "terms": {"field": "tags.group_id.values.keyword", "size": limit}, |
|
75 | "terms": {"field": "tags.group_id.values.keyword", "size": limit}, | |
74 | } |
|
76 | } | |
75 | }, |
|
77 | }, | |
76 | "query": { |
|
78 | "query": { | |
77 | "bool": { |
|
79 | "bool": { | |
78 | "filter": [ |
|
80 | "filter": [ | |
79 | { |
|
81 | {"terms": {"resource_id": [filter_settings["resource"][0]]}}, | |
80 | "terms": { |
|
|||
81 | "resource_id": [filter_settings["resource"][0]] |
|
|||
82 | } |
|
|||
83 | }, |
|
|||
84 | { |
|
82 | { | |
85 | "range": { |
|
83 | "range": { | |
86 | "timestamp": { |
|
84 | "timestamp": { | |
87 | "gte": filter_settings["start_date"], |
|
85 | "gte": filter_settings["start_date"], | |
88 | "lte": filter_settings["end_date"], |
|
86 | "lte": filter_settings["end_date"], | |
89 | } |
|
87 | } | |
90 | } |
|
88 | } | |
91 | }, |
|
89 | }, | |
92 | ] |
|
90 | ] | |
93 | } |
|
91 | } | |
94 | }, |
|
92 | }, | |
95 | } |
|
93 | } | |
96 | if tags: |
|
94 | if tags: | |
97 | es_query["query"]["bool"]["filter"].extend(tags) |
|
95 | es_query["query"]["bool"]["filter"].extend(tags) | |
98 |
|
96 | |||
99 | result = Datastores.es.search( |
|
97 | result = Datastores.es.search( | |
100 | body=es_query, index=index_names, doc_type="report", size=0 |
|
98 | body=es_query, index=index_names, doc_type="report", size=0 | |
101 | ) |
|
99 | ) | |
102 | series = [] |
|
100 | series = [] | |
103 | for bucket in result["aggregations"]["parent_agg"]["buckets"]: |
|
101 | for bucket in result["aggregations"]["parent_agg"]["buckets"]: | |
104 | series.append( |
|
102 | series.append( | |
105 | {"key": bucket["key"], "groups": bucket["groups"]["sub_agg"]["value"]} |
|
103 | {"key": bucket["key"], "groups": bucket["groups"]["sub_agg"]["value"]} | |
106 | ) |
|
104 | ) | |
107 |
|
105 | |||
108 | report_groups_d = {} |
|
106 | report_groups_d = {} | |
109 | for g in series: |
|
107 | for g in series: | |
110 | report_groups_d[int(g["key"])] = g["groups"] or 0 |
|
108 | report_groups_d[int(g["key"])] = g["groups"] or 0 | |
111 |
|
109 | |||
112 | query = db_session.query(ReportGroup) |
|
110 | query = db_session.query(ReportGroup) | |
113 | query = query.filter(ReportGroup.id.in_(list(report_groups_d.keys()))) |
|
111 | query = query.filter(ReportGroup.id.in_(list(report_groups_d.keys()))) | |
114 | query = query.options(sa.orm.joinedload(ReportGroup.last_report_ref)) |
|
112 | query = query.options(sa.orm.joinedload(ReportGroup.last_report_ref)) | |
115 | results = [(report_groups_d[group.id], group) for group in query] |
|
113 | results = [(report_groups_d[group.id], group) for group in query] | |
116 | return sorted(results, reverse=True, key=lambda x: x[0]) |
|
114 | return sorted(results, reverse=True, key=lambda x: x[0]) | |
117 |
|
115 | |||
118 | @classmethod |
|
116 | @classmethod | |
119 | def get_search_iterator( |
|
117 | def get_search_iterator( | |
120 | cls, |
|
118 | cls, | |
121 | app_ids=None, |
|
119 | app_ids=None, | |
122 | page=1, |
|
120 | page=1, | |
123 | items_per_page=50, |
|
121 | items_per_page=50, | |
124 | order_by=None, |
|
122 | order_by=None, | |
125 | filter_settings=None, |
|
123 | filter_settings=None, | |
126 | limit=None, |
|
124 | limit=None, | |
127 | ): |
|
125 | ): | |
128 | if not app_ids: |
|
126 | if not app_ids: | |
129 | return {} |
|
127 | return {} | |
130 | if not filter_settings: |
|
128 | if not filter_settings: | |
131 | filter_settings = {} |
|
129 | filter_settings = {} | |
132 |
|
130 | |||
133 | query = { |
|
131 | query = { | |
134 | "size": 0, |
|
132 | "size": 0, | |
135 | "query": { |
|
133 | "query": { | |
136 | "bool": { |
|
134 | "bool": { | |
137 | "must": [], |
|
135 | "must": [], | |
138 | "should": [], |
|
136 | "should": [], | |
139 | "filter": [{"terms": {"resource_id": list(app_ids)}}] |
|
137 | "filter": [{"terms": {"resource_id": list(app_ids)}}], | |
140 | } |
|
138 | } | |
141 | }, |
|
139 | }, | |
142 | "aggs": { |
|
140 | "aggs": { | |
143 | "top_groups": { |
|
141 | "top_groups": { | |
144 | "terms": { |
|
142 | "terms": { | |
145 | "size": 5000, |
|
143 | "size": 5000, | |
146 | "field": "join_field#report_group", |
|
144 | "field": "join_field#report_group", | |
147 | "order": {"newest": "desc"}, |
|
145 | "order": {"newest": "desc"}, | |
148 | }, |
|
146 | }, | |
149 | "aggs": { |
|
147 | "aggs": { | |
150 | "top_reports_hits": { |
|
148 | "top_reports_hits": { | |
151 | "top_hits": {"size": 1, "sort": {"start_time": "desc"}} |
|
149 | "top_hits": {"size": 1, "sort": {"start_time": "desc"}} | |
152 | }, |
|
150 | }, | |
153 | "newest": {"max": {"field": "start_time"}}, |
|
151 | "newest": {"max": {"field": "start_time"}}, | |
154 | }, |
|
152 | }, | |
155 | } |
|
153 | } | |
156 | }, |
|
154 | }, | |
157 | } |
|
155 | } | |
158 |
|
156 | |||
159 | start_date = filter_settings.get("start_date") |
|
157 | start_date = filter_settings.get("start_date") | |
160 | end_date = filter_settings.get("end_date") |
|
158 | end_date = filter_settings.get("end_date") | |
161 | filter_part = query["query"]["bool"]["filter"] |
|
159 | filter_part = query["query"]["bool"]["filter"] | |
162 | date_range = {"range": {"start_time": {}}} |
|
160 | date_range = {"range": {"start_time": {}}} | |
163 | if start_date: |
|
161 | if start_date: | |
164 | date_range["range"]["start_time"]["gte"] = start_date |
|
162 | date_range["range"]["start_time"]["gte"] = start_date | |
165 | if end_date: |
|
163 | if end_date: | |
166 | date_range["range"]["start_time"]["lte"] = end_date |
|
164 | date_range["range"]["start_time"]["lte"] = end_date | |
167 | if start_date or end_date: |
|
165 | if start_date or end_date: | |
168 | filter_part.append(date_range) |
|
166 | filter_part.append(date_range) | |
169 |
|
167 | |||
170 | priorities = filter_settings.get("priority") |
|
168 | priorities = filter_settings.get("priority") | |
171 |
|
169 | |||
172 | for tag in filter_settings.get("tags", []): |
|
170 | for tag in filter_settings.get("tags", []): | |
173 | tag_values = [v.lower() for v in tag["value"]] |
|
171 | tag_values = [v.lower() for v in tag["value"]] | |
174 | key = "tags.%s.values" % tag["name"].replace(".", "_") |
|
172 | key = "tags.%s.values" % tag["name"].replace(".", "_") | |
175 | filter_part.append({"terms": {key: tag_values}}) |
|
173 | filter_part.append({"terms": {key: tag_values}}) | |
176 |
|
174 | |||
177 | if priorities: |
|
175 | if priorities: | |
178 | filter_part.append( |
|
176 | filter_part.append( | |
179 | { |
|
177 | { | |
180 | "has_parent": { |
|
178 | "has_parent": { | |
181 | "parent_type": "report_group", |
|
179 | "parent_type": "report_group", | |
182 | "query": {"terms": {"priority": priorities}}, |
|
180 | "query": {"terms": {"priority": priorities}}, | |
183 | } |
|
181 | } | |
184 | } |
|
182 | } | |
185 | ) |
|
183 | ) | |
186 |
|
184 | |||
187 | min_occurences = filter_settings.get("min_occurences") |
|
185 | min_occurences = filter_settings.get("min_occurences") | |
188 | if min_occurences: |
|
186 | if min_occurences: | |
189 | filter_part.append( |
|
187 | filter_part.append( | |
190 | { |
|
188 | { | |
191 | "has_parent": { |
|
189 | "has_parent": { | |
192 | "parent_type": "report_group", |
|
190 | "parent_type": "report_group", | |
193 | "query": {"range": {"occurences": {"gte": min_occurences[0]}}}, |
|
191 | "query": {"range": {"occurences": {"gte": min_occurences[0]}}}, | |
194 | } |
|
192 | } | |
195 | } |
|
193 | } | |
196 | ) |
|
194 | ) | |
197 |
|
195 | |||
198 | min_duration = filter_settings.get("min_duration") |
|
196 | min_duration = filter_settings.get("min_duration") | |
199 | max_duration = filter_settings.get("max_duration") |
|
197 | max_duration = filter_settings.get("max_duration") | |
200 |
|
198 | |||
201 | request_ids = filter_settings.get("request_id") |
|
199 | request_ids = filter_settings.get("request_id") | |
202 | if request_ids: |
|
200 | if request_ids: | |
203 | filter_part.append({"terms": {"request_id": request_ids}}) |
|
201 | filter_part.append({"terms": {"request_id": request_ids}}) | |
204 |
|
202 | |||
205 | duration_range = {"range": {"average_duration": {}}} |
|
203 | duration_range = {"range": {"average_duration": {}}} | |
206 | if min_duration: |
|
204 | if min_duration: | |
207 | duration_range["range"]["average_duration"]["gte"] = min_duration[0] |
|
205 | duration_range["range"]["average_duration"]["gte"] = min_duration[0] | |
208 | if max_duration: |
|
206 | if max_duration: | |
209 | duration_range["range"]["average_duration"]["lte"] = max_duration[0] |
|
207 | duration_range["range"]["average_duration"]["lte"] = max_duration[0] | |
210 | if min_duration or max_duration: |
|
208 | if min_duration or max_duration: | |
211 | filter_part.append( |
|
209 | filter_part.append( | |
212 | {"has_parent": {"parent_type": "report_group", "query": duration_range}} |
|
210 | {"has_parent": {"parent_type": "report_group", "query": duration_range}} | |
213 | ) |
|
211 | ) | |
214 |
|
212 | |||
215 | http_status = filter_settings.get("http_status") |
|
213 | http_status = filter_settings.get("http_status") | |
216 | report_type = filter_settings.get("report_type", [ReportType.error]) |
|
214 | report_type = filter_settings.get("report_type", [ReportType.error]) | |
217 | # set error report type if http status is not found |
|
215 | # set error report type if http status is not found | |
218 | # and we are dealing with slow reports |
|
216 | # and we are dealing with slow reports | |
219 | if not http_status or ReportType.slow in report_type: |
|
217 | if not http_status or ReportType.slow in report_type: | |
220 | filter_part.append({"terms": {"report_type": report_type}}) |
|
218 | filter_part.append({"terms": {"report_type": report_type}}) | |
221 | if http_status: |
|
219 | if http_status: | |
222 | filter_part.append({"terms": {"http_status": http_status}}) |
|
220 | filter_part.append({"terms": {"http_status": http_status}}) | |
223 |
|
221 | |||
224 | messages = filter_settings.get("message") |
|
222 | messages = filter_settings.get("message") | |
225 | if messages: |
|
223 | if messages: | |
226 | condition = {"match": {"message": " ".join(messages)}} |
|
224 | condition = {"match": {"message": " ".join(messages)}} | |
227 | query["query"]["bool"]["must"].append(condition) |
|
225 | query["query"]["bool"]["must"].append(condition) | |
228 | errors = filter_settings.get("error") |
|
226 | errors = filter_settings.get("error") | |
229 | if errors: |
|
227 | if errors: | |
230 | condition = {"match": {"error": " ".join(errors)}} |
|
228 | condition = {"match": {"error": " ".join(errors)}} | |
231 | query["query"]["bool"]["must"].append(condition) |
|
229 | query["query"]["bool"]["must"].append(condition) | |
232 | url_domains = filter_settings.get("url_domain") |
|
230 | url_domains = filter_settings.get("url_domain") | |
233 | if url_domains: |
|
231 | if url_domains: | |
234 | condition = {"terms": {"url_domain": url_domains}} |
|
232 | condition = {"terms": {"url_domain": url_domains}} | |
235 | query["query"]["bool"]["must"].append(condition) |
|
233 | query["query"]["bool"]["must"].append(condition) | |
236 | url_paths = filter_settings.get("url_path") |
|
234 | url_paths = filter_settings.get("url_path") | |
237 | if url_paths: |
|
235 | if url_paths: | |
238 | condition = {"terms": {"url_path": url_paths}} |
|
236 | condition = {"terms": {"url_path": url_paths}} | |
239 | query["query"]["bool"]["must"].append(condition) |
|
237 | query["query"]["bool"]["must"].append(condition) | |
240 |
|
238 | |||
241 | if filter_settings.get("report_status"): |
|
239 | if filter_settings.get("report_status"): | |
242 | for status in filter_settings.get("report_status"): |
|
240 | for status in filter_settings.get("report_status"): | |
243 | if status == "never_reviewed": |
|
241 | if status == "never_reviewed": | |
244 | filter_part.append( |
|
242 | filter_part.append( | |
245 | { |
|
243 | { | |
246 | "has_parent": { |
|
244 | "has_parent": { | |
247 | "parent_type": "report_group", |
|
245 | "parent_type": "report_group", | |
248 | "query": {"term": {"read": False}}, |
|
246 | "query": {"term": {"read": False}}, | |
249 | } |
|
247 | } | |
250 | } |
|
248 | } | |
251 | ) |
|
249 | ) | |
252 | elif status == "reviewed": |
|
250 | elif status == "reviewed": | |
253 | filter_part.append( |
|
251 | filter_part.append( | |
254 | { |
|
252 | { | |
255 | "has_parent": { |
|
253 | "has_parent": { | |
256 | "parent_type": "report_group", |
|
254 | "parent_type": "report_group", | |
257 | "query": {"term": {"read": True}}, |
|
255 | "query": {"term": {"read": True}}, | |
258 | } |
|
256 | } | |
259 | } |
|
257 | } | |
260 | ) |
|
258 | ) | |
261 | elif status == "public": |
|
259 | elif status == "public": | |
262 | filter_part.append( |
|
260 | filter_part.append( | |
263 | { |
|
261 | { | |
264 | "has_parent": { |
|
262 | "has_parent": { | |
265 | "parent_type": "report_group", |
|
263 | "parent_type": "report_group", | |
266 | "query": {"term": {"public": True}}, |
|
264 | "query": {"term": {"public": True}}, | |
267 | } |
|
265 | } | |
268 | } |
|
266 | } | |
269 | ) |
|
267 | ) | |
270 | elif status == "fixed": |
|
268 | elif status == "fixed": | |
271 | filter_part.append( |
|
269 | filter_part.append( | |
272 | { |
|
270 | { | |
273 | "has_parent": { |
|
271 | "has_parent": { | |
274 | "parent_type": "report_group", |
|
272 | "parent_type": "report_group", | |
275 | "query": {"term": {"fixed": True}}, |
|
273 | "query": {"term": {"fixed": True}}, | |
276 | } |
|
274 | } | |
277 | } |
|
275 | } | |
278 | ) |
|
276 | ) | |
279 |
|
277 | |||
280 | # logging.getLogger('pyelasticsearch').setLevel(logging.DEBUG) |
|
278 | # logging.getLogger('pyelasticsearch').setLevel(logging.DEBUG) | |
281 | index_names = es_index_name_limiter( |
|
279 | index_names = es_index_name_limiter( | |
282 | filter_settings.get("start_date"), |
|
280 | filter_settings.get("start_date"), | |
283 | filter_settings.get("end_date"), |
|
281 | filter_settings.get("end_date"), | |
284 | ixtypes=["reports"], |
|
282 | ixtypes=["reports"], | |
285 | ) |
|
283 | ) | |
286 | if index_names: |
|
284 | if index_names: | |
287 | results = Datastores.es.search( |
|
285 | results = Datastores.es.search( | |
288 | body=query, |
|
286 | body=query, | |
289 | index=index_names, |
|
287 | index=index_names, | |
290 | doc_type=["report", "report_group"], |
|
288 | doc_type=["report", "report_group"], | |
291 | size=0, |
|
289 | size=0, | |
292 | ) |
|
290 | ) | |
293 | else: |
|
291 | else: | |
294 | return [] |
|
292 | return [] | |
295 | return results["aggregations"] |
|
293 | return results["aggregations"] | |
296 |
|
294 | |||
297 | @classmethod |
|
295 | @classmethod | |
298 | def get_paginator_by_app_ids( |
|
296 | def get_paginator_by_app_ids( | |
299 | cls, |
|
297 | cls, | |
300 | app_ids=None, |
|
298 | app_ids=None, | |
301 | page=1, |
|
299 | page=1, | |
302 | item_count=None, |
|
300 | item_count=None, | |
303 | items_per_page=50, |
|
301 | items_per_page=50, | |
304 | order_by=None, |
|
302 | order_by=None, | |
305 | filter_settings=None, |
|
303 | filter_settings=None, | |
306 | exclude_columns=None, |
|
304 | exclude_columns=None, | |
307 | db_session=None, |
|
305 | db_session=None, | |
308 | ): |
|
306 | ): | |
309 | if not filter_settings: |
|
307 | if not filter_settings: | |
310 | filter_settings = {} |
|
308 | filter_settings = {} | |
311 | results = cls.get_search_iterator( |
|
309 | results = cls.get_search_iterator( | |
312 | app_ids, page, items_per_page, order_by, filter_settings |
|
310 | app_ids, page, items_per_page, order_by, filter_settings | |
313 | ) |
|
311 | ) | |
314 |
|
312 | |||
315 | ordered_ids = [] |
|
313 | ordered_ids = [] | |
316 | if results: |
|
314 | if results: | |
317 | for item in results["top_groups"]["buckets"]: |
|
315 | for item in results["top_groups"]["buckets"]: | |
318 |
pg_id = item["top_reports_hits"]["hits"]["hits"][0]["_source"][ |
|
316 | pg_id = item["top_reports_hits"]["hits"]["hits"][0]["_source"][ | |
|
317 | "report_id" | |||
|
318 | ] | |||
319 | ordered_ids.append(pg_id) |
|
319 | ordered_ids.append(pg_id) | |
320 | log.info(filter_settings) |
|
320 | log.info(filter_settings) | |
321 | paginator = paginate.Page( |
|
321 | paginator = paginate.Page( | |
322 | ordered_ids, items_per_page=items_per_page, **filter_settings |
|
322 | ordered_ids, items_per_page=items_per_page, **filter_settings | |
323 | ) |
|
323 | ) | |
324 | sa_items = () |
|
324 | sa_items = () | |
325 | if paginator.items: |
|
325 | if paginator.items: | |
326 | db_session = get_db_session(db_session) |
|
326 | db_session = get_db_session(db_session) | |
327 | # latest report detail |
|
327 | # latest report detail | |
328 | query = db_session.query(Report) |
|
328 | query = db_session.query(Report) | |
329 | query = query.options(sa.orm.joinedload(Report.report_group)) |
|
329 | query = query.options(sa.orm.joinedload(Report.report_group)) | |
330 | query = query.filter(Report.id.in_(paginator.items)) |
|
330 | query = query.filter(Report.id.in_(paginator.items)) | |
331 | if filter_settings.get("order_col"): |
|
331 | if filter_settings.get("order_col"): | |
332 | order_col = filter_settings.get("order_col") |
|
332 | order_col = filter_settings.get("order_col") | |
333 | if filter_settings.get("order_dir") == "dsc": |
|
333 | if filter_settings.get("order_dir") == "dsc": | |
334 | sort_on = "desc" |
|
334 | sort_on = "desc" | |
335 | else: |
|
335 | else: | |
336 | sort_on = "asc" |
|
336 | sort_on = "asc" | |
337 | if order_col == "when": |
|
337 | if order_col == "when": | |
338 | order_col = "last_timestamp" |
|
338 | order_col = "last_timestamp" | |
339 | query = query.order_by( |
|
339 | query = query.order_by( | |
340 | getattr(sa, sort_on)(getattr(ReportGroup, order_col)) |
|
340 | getattr(sa, sort_on)(getattr(ReportGroup, order_col)) | |
341 | ) |
|
341 | ) | |
342 | sa_items = query.all() |
|
342 | sa_items = query.all() | |
343 | sorted_instance_list = [] |
|
343 | sorted_instance_list = [] | |
344 | for i_id in ordered_ids: |
|
344 | for i_id in ordered_ids: | |
345 | for report in sa_items: |
|
345 | for report in sa_items: | |
346 | if str(report.id) == i_id and report not in sorted_instance_list: |
|
346 | if str(report.id) == i_id and report not in sorted_instance_list: | |
347 | sorted_instance_list.append(report) |
|
347 | sorted_instance_list.append(report) | |
348 | paginator.sa_items = sorted_instance_list |
|
348 | paginator.sa_items = sorted_instance_list | |
349 | return paginator |
|
349 | return paginator | |
350 |
|
350 | |||
351 | @classmethod |
|
351 | @classmethod | |
352 | def by_app_ids(cls, app_ids=None, order_by=True, db_session=None): |
|
352 | def by_app_ids(cls, app_ids=None, order_by=True, db_session=None): | |
353 | db_session = get_db_session(db_session) |
|
353 | db_session = get_db_session(db_session) | |
354 | q = db_session.query(ReportGroup) |
|
354 | q = db_session.query(ReportGroup) | |
355 | if app_ids: |
|
355 | if app_ids: | |
356 | q = q.filter(ReportGroup.resource_id.in_(app_ids)) |
|
356 | q = q.filter(ReportGroup.resource_id.in_(app_ids)) | |
357 | if order_by: |
|
357 | if order_by: | |
358 | q = q.order_by(sa.desc(ReportGroup.id)) |
|
358 | q = q.order_by(sa.desc(ReportGroup.id)) | |
359 | return q |
|
359 | return q | |
360 |
|
360 | |||
361 | @classmethod |
|
361 | @classmethod | |
362 | def by_id(cls, group_id, app_ids=None, db_session=None): |
|
362 | def by_id(cls, group_id, app_ids=None, db_session=None): | |
363 | db_session = get_db_session(db_session) |
|
363 | db_session = get_db_session(db_session) | |
364 | q = db_session.query(ReportGroup).filter(ReportGroup.id == int(group_id)) |
|
364 | q = db_session.query(ReportGroup).filter(ReportGroup.id == int(group_id)) | |
365 | if app_ids: |
|
365 | if app_ids: | |
366 | q = q.filter(ReportGroup.resource_id.in_(app_ids)) |
|
366 | q = q.filter(ReportGroup.resource_id.in_(app_ids)) | |
367 | return q.first() |
|
367 | return q.first() | |
368 |
|
368 | |||
369 | @classmethod |
|
369 | @classmethod | |
370 | def by_ids(cls, group_ids=None, db_session=None): |
|
370 | def by_ids(cls, group_ids=None, db_session=None): | |
371 | db_session = get_db_session(db_session) |
|
371 | db_session = get_db_session(db_session) | |
372 | query = db_session.query(ReportGroup) |
|
372 | query = db_session.query(ReportGroup) | |
373 | query = query.filter(ReportGroup.id.in_(group_ids)) |
|
373 | query = query.filter(ReportGroup.id.in_(group_ids)) | |
374 | return query |
|
374 | return query | |
375 |
|
375 | |||
376 | @classmethod |
|
376 | @classmethod | |
377 | def by_hash_and_resource( |
|
377 | def by_hash_and_resource( | |
378 | cls, resource_id, grouping_hash, since_when=None, db_session=None |
|
378 | cls, resource_id, grouping_hash, since_when=None, db_session=None | |
379 | ): |
|
379 | ): | |
380 | db_session = get_db_session(db_session) |
|
380 | db_session = get_db_session(db_session) | |
381 | q = db_session.query(ReportGroup) |
|
381 | q = db_session.query(ReportGroup) | |
382 | q = q.filter(ReportGroup.resource_id == resource_id) |
|
382 | q = q.filter(ReportGroup.resource_id == resource_id) | |
383 | q = q.filter(ReportGroup.grouping_hash == grouping_hash) |
|
383 | q = q.filter(ReportGroup.grouping_hash == grouping_hash) | |
384 | q = q.filter(ReportGroup.fixed == False) |
|
384 | q = q.filter(ReportGroup.fixed == False) | |
385 | if since_when: |
|
385 | if since_when: | |
386 | q = q.filter(ReportGroup.first_timestamp >= since_when) |
|
386 | q = q.filter(ReportGroup.first_timestamp >= since_when) | |
387 | return q.first() |
|
387 | return q.first() | |
388 |
|
388 | |||
389 | @classmethod |
|
389 | @classmethod | |
390 | def users_commenting(cls, report_group, exclude_user_id=None, db_session=None): |
|
390 | def users_commenting(cls, report_group, exclude_user_id=None, db_session=None): | |
391 | db_session = get_db_session(None, report_group) |
|
391 | db_session = get_db_session(None, report_group) | |
392 | query = db_session.query(User).distinct() |
|
392 | query = db_session.query(User).distinct() | |
393 | query = query.filter(User.id == ReportComment.owner_id) |
|
393 | query = query.filter(User.id == ReportComment.owner_id) | |
394 | query = query.filter(ReportComment.group_id == report_group.id) |
|
394 | query = query.filter(ReportComment.group_id == report_group.id) | |
395 | if exclude_user_id: |
|
395 | if exclude_user_id: | |
396 | query = query.filter(ReportComment.owner_id != exclude_user_id) |
|
396 | query = query.filter(ReportComment.owner_id != exclude_user_id) | |
397 | return query |
|
397 | return query | |
398 |
|
398 | |||
399 | @classmethod |
|
399 | @classmethod | |
400 | def affected_users_count(cls, report_group, db_session=None): |
|
400 | def affected_users_count(cls, report_group, db_session=None): | |
401 | db_session = get_db_session(db_session) |
|
401 | db_session = get_db_session(db_session) | |
402 | query = db_session.query(sa.func.count(Report.username)) |
|
402 | query = db_session.query(sa.func.count(Report.username)) | |
403 | query = query.filter(Report.group_id == report_group.id) |
|
403 | query = query.filter(Report.group_id == report_group.id) | |
404 | query = query.filter(Report.username != "") |
|
404 | query = query.filter(Report.username != "") | |
405 | query = query.filter(Report.username != None) |
|
405 | query = query.filter(Report.username != None) | |
406 | query = query.group_by(Report.username) |
|
406 | query = query.group_by(Report.username) | |
407 | return query.count() |
|
407 | return query.count() | |
408 |
|
408 | |||
409 | @classmethod |
|
409 | @classmethod | |
410 | def top_affected_users(cls, report_group, db_session=None): |
|
410 | def top_affected_users(cls, report_group, db_session=None): | |
411 | db_session = get_db_session(db_session) |
|
411 | db_session = get_db_session(db_session) | |
412 | count_label = sa.func.count(Report.username).label("count") |
|
412 | count_label = sa.func.count(Report.username).label("count") | |
413 | query = db_session.query(Report.username, count_label) |
|
413 | query = db_session.query(Report.username, count_label) | |
414 | query = query.filter(Report.group_id == report_group.id) |
|
414 | query = query.filter(Report.group_id == report_group.id) | |
415 | query = query.filter(Report.username != None) |
|
415 | query = query.filter(Report.username != None) | |
416 | query = query.filter(Report.username != "") |
|
416 | query = query.filter(Report.username != "") | |
417 | query = query.group_by(Report.username) |
|
417 | query = query.group_by(Report.username) | |
418 | query = query.order_by(sa.desc(count_label)) |
|
418 | query = query.order_by(sa.desc(count_label)) | |
419 | query = query.limit(50) |
|
419 | query = query.limit(50) | |
420 | return query |
|
420 | return query | |
421 |
|
421 | |||
422 | @classmethod |
|
422 | @classmethod | |
423 | def get_report_stats(cls, request, filter_settings): |
|
423 | def get_report_stats(cls, request, filter_settings): | |
424 | """ |
|
424 | """ | |
425 | Gets report dashboard graphs |
|
425 | Gets report dashboard graphs | |
426 | Returns information for BAR charts with occurences/interval information |
|
426 | Returns information for BAR charts with occurences/interval information | |
427 | detailed means version that returns time intervals - non detailed |
|
427 | detailed means version that returns time intervals - non detailed | |
428 | returns total sum |
|
428 | returns total sum | |
429 | """ |
|
429 | """ | |
430 | delta = filter_settings["end_date"] - filter_settings["start_date"] |
|
430 | delta = filter_settings["end_date"] - filter_settings["start_date"] | |
431 | if delta < h.time_deltas.get("12h")["delta"]: |
|
431 | if delta < h.time_deltas.get("12h")["delta"]: | |
432 | interval = "1m" |
|
432 | interval = "1m" | |
433 | elif delta <= h.time_deltas.get("3d")["delta"]: |
|
433 | elif delta <= h.time_deltas.get("3d")["delta"]: | |
434 | interval = "5m" |
|
434 | interval = "5m" | |
435 | elif delta >= h.time_deltas.get("2w")["delta"]: |
|
435 | elif delta >= h.time_deltas.get("2w")["delta"]: | |
436 | interval = "24h" |
|
436 | interval = "24h" | |
437 | else: |
|
437 | else: | |
438 | interval = "1h" |
|
438 | interval = "1h" | |
439 |
|
439 | |||
440 | group_id = filter_settings.get("group_id") |
|
440 | group_id = filter_settings.get("group_id") | |
441 |
|
441 | |||
442 | es_query = { |
|
442 | es_query = { | |
443 | "aggs": { |
|
443 | "aggs": { | |
444 | "parent_agg": { |
|
444 | "parent_agg": { | |
445 | "aggs": { |
|
445 | "aggs": { | |
446 | "types": { |
|
446 | "types": { | |
447 | "aggs": { |
|
447 | "aggs": { | |
448 |
"sub_agg": { |
|
448 | "sub_agg": { | |
|
449 | "terms": {"field": "tags.type.values.keyword"} | |||
|
450 | } | |||
449 | }, |
|
451 | }, | |
450 | "filter": { |
|
452 | "filter": { | |
451 | "bool": { |
|
453 | "bool": { | |
452 |
"filter": [ |
|
454 | "filter": [ | |
|
455 | {"exists": {"field": "tags.type.values"}} | |||
|
456 | ] | |||
453 | } |
|
457 | } | |
454 | }, |
|
458 | }, | |
455 | } |
|
459 | } | |
456 | }, |
|
460 | }, | |
457 | "date_histogram": { |
|
461 | "date_histogram": { | |
458 | "extended_bounds": { |
|
462 | "extended_bounds": { | |
459 | "max": filter_settings["end_date"], |
|
463 | "max": filter_settings["end_date"], | |
460 | "min": filter_settings["start_date"], |
|
464 | "min": filter_settings["start_date"], | |
461 | }, |
|
465 | }, | |
462 | "field": "timestamp", |
|
466 | "field": "timestamp", | |
463 | "interval": interval, |
|
467 | "interval": interval, | |
464 | "min_doc_count": 0, |
|
468 | "min_doc_count": 0, | |
465 | }, |
|
469 | }, | |
466 | } |
|
470 | } | |
467 | }, |
|
471 | }, | |
468 | "query": { |
|
472 | "query": { | |
469 | "bool": { |
|
473 | "bool": { | |
470 | "filter": [ |
|
474 | "filter": [ | |
471 | { |
|
475 | {"terms": {"resource_id": [filter_settings["resource"][0]]}}, | |
472 | "terms": { |
|
|||
473 | "resource_id": [filter_settings["resource"][0]] |
|
|||
474 | } |
|
|||
475 | }, |
|
|||
476 | { |
|
476 | { | |
477 | "range": { |
|
477 | "range": { | |
478 | "timestamp": { |
|
478 | "timestamp": { | |
479 | "gte": filter_settings["start_date"], |
|
479 | "gte": filter_settings["start_date"], | |
480 | "lte": filter_settings["end_date"], |
|
480 | "lte": filter_settings["end_date"], | |
481 | } |
|
481 | } | |
482 | } |
|
482 | } | |
483 | }, |
|
483 | }, | |
484 | ] |
|
484 | ] | |
485 | } |
|
485 | } | |
486 | }, |
|
486 | }, | |
487 | } |
|
487 | } | |
488 | if group_id: |
|
488 | if group_id: | |
489 | parent_agg = es_query["aggs"]["parent_agg"] |
|
489 | parent_agg = es_query["aggs"]["parent_agg"] | |
490 | filters = parent_agg["aggs"]["types"]["filter"]["bool"]["filter"] |
|
490 | filters = parent_agg["aggs"]["types"]["filter"]["bool"]["filter"] | |
491 | filters.append({"terms": {"tags.group_id.values": [group_id]}}) |
|
491 | filters.append({"terms": {"tags.group_id.values": [group_id]}}) | |
492 |
|
492 | |||
493 | index_names = es_index_name_limiter( |
|
493 | index_names = es_index_name_limiter( | |
494 | start_date=filter_settings["start_date"], |
|
494 | start_date=filter_settings["start_date"], | |
495 | end_date=filter_settings["end_date"], |
|
495 | end_date=filter_settings["end_date"], | |
496 | ixtypes=["reports"], |
|
496 | ixtypes=["reports"], | |
497 | ) |
|
497 | ) | |
498 |
|
498 | |||
499 | if not index_names: |
|
499 | if not index_names: | |
500 | return [] |
|
500 | return [] | |
501 |
|
501 | |||
502 | result = Datastores.es.search( |
|
502 | result = Datastores.es.search( | |
503 | body=es_query, index=index_names, doc_type="log", size=0 |
|
503 | body=es_query, index=index_names, doc_type="log", size=0 | |
504 | ) |
|
504 | ) | |
505 | series = [] |
|
505 | series = [] | |
506 | for bucket in result["aggregations"]["parent_agg"]["buckets"]: |
|
506 | for bucket in result["aggregations"]["parent_agg"]["buckets"]: | |
507 | point = { |
|
507 | point = { | |
508 | "x": datetime.utcfromtimestamp(int(bucket["key"]) / 1000), |
|
508 | "x": datetime.utcfromtimestamp(int(bucket["key"]) / 1000), | |
509 | "report": 0, |
|
509 | "report": 0, | |
510 | "not_found": 0, |
|
510 | "not_found": 0, | |
511 | "slow_report": 0, |
|
511 | "slow_report": 0, | |
512 | } |
|
512 | } | |
513 | for subbucket in bucket["types"]["sub_agg"]["buckets"]: |
|
513 | for subbucket in bucket["types"]["sub_agg"]["buckets"]: | |
514 | if subbucket["key"] == "slow": |
|
514 | if subbucket["key"] == "slow": | |
515 | point["slow_report"] = subbucket["doc_count"] |
|
515 | point["slow_report"] = subbucket["doc_count"] | |
516 | elif subbucket["key"] == "error": |
|
516 | elif subbucket["key"] == "error": | |
517 | point["report"] = subbucket["doc_count"] |
|
517 | point["report"] = subbucket["doc_count"] | |
518 | elif subbucket["key"] == "not_found": |
|
518 | elif subbucket["key"] == "not_found": | |
519 | point["not_found"] = subbucket["doc_count"] |
|
519 | point["not_found"] = subbucket["doc_count"] | |
520 | series.append(point) |
|
520 | series.append(point) | |
521 | return series |
|
521 | return series |
@@ -1,63 +1,65 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 |
|
2 | |||
3 | # Copyright 2010 - 2017 RhodeCode GmbH and the AppEnlight project authors |
|
3 | # Copyright 2010 - 2017 RhodeCode GmbH and the AppEnlight project authors | |
4 | # |
|
4 | # | |
5 | # Licensed under the Apache License, Version 2.0 (the "License"); |
|
5 | # Licensed under the Apache License, Version 2.0 (the "License"); | |
6 | # you may not use this file except in compliance with the License. |
|
6 | # you may not use this file except in compliance with the License. | |
7 | # You may obtain a copy of the License at |
|
7 | # You may obtain a copy of the License at | |
8 | # |
|
8 | # | |
9 | # http://www.apache.org/licenses/LICENSE-2.0 |
|
9 | # http://www.apache.org/licenses/LICENSE-2.0 | |
10 | # |
|
10 | # | |
11 | # Unless required by applicable law or agreed to in writing, software |
|
11 | # Unless required by applicable law or agreed to in writing, software | |
12 | # distributed under the License is distributed on an "AS IS" BASIS, |
|
12 | # distributed under the License is distributed on an "AS IS" BASIS, | |
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
14 | # See the License for the specific language governing permissions and |
|
14 | # See the License for the specific language governing permissions and | |
15 | # limitations under the License. |
|
15 | # limitations under the License. | |
16 |
|
16 | |||
17 | from appenlight.models import Datastores |
|
17 | from appenlight.models import Datastores | |
18 | from appenlight.models.services.base import BaseService |
|
18 | from appenlight.models.services.base import BaseService | |
19 | from appenlight.lib.enums import ReportType |
|
19 | from appenlight.lib.enums import ReportType | |
20 | from appenlight.lib.utils import es_index_name_limiter |
|
20 | from appenlight.lib.utils import es_index_name_limiter | |
21 |
|
21 | |||
22 |
|
22 | |||
23 | class ReportStatService(BaseService): |
|
23 | class ReportStatService(BaseService): | |
24 | @classmethod |
|
24 | @classmethod | |
25 | def count_by_type(cls, report_type, resource_id, since_when): |
|
25 | def count_by_type(cls, report_type, resource_id, since_when): | |
26 | report_type = ReportType.key_from_value(report_type) |
|
26 | report_type = ReportType.key_from_value(report_type) | |
27 |
|
27 | |||
28 | index_names = es_index_name_limiter(start_date=since_when, ixtypes=["reports"]) |
|
28 | index_names = es_index_name_limiter(start_date=since_when, ixtypes=["reports"]) | |
29 |
|
29 | |||
30 | es_query = { |
|
30 | es_query = { | |
31 | "aggs": { |
|
31 | "aggs": { | |
32 | "reports": { |
|
32 | "reports": { | |
33 | "aggs": { |
|
33 | "aggs": { | |
34 | "sub_agg": {"value_count": {"field": "tags.group_id.values.keyword"}} |
|
34 | "sub_agg": { | |
|
35 | "value_count": {"field": "tags.group_id.values.keyword"} | |||
|
36 | } | |||
35 | }, |
|
37 | }, | |
36 | "filter": { |
|
38 | "filter": { | |
37 | "bool": { |
|
39 | "bool": { | |
38 | "filter": [ |
|
40 | "filter": [ | |
39 | {"terms": {"resource_id": [resource_id]}}, |
|
41 | {"terms": {"resource_id": [resource_id]}}, | |
40 | {"exists": {"field": "tags.group_id.values"}}, |
|
42 | {"exists": {"field": "tags.group_id.values"}}, | |
41 | ] |
|
43 | ] | |
42 | } |
|
44 | } | |
43 | }, |
|
45 | }, | |
44 | } |
|
46 | } | |
45 | }, |
|
47 | }, | |
46 | "query": { |
|
48 | "query": { | |
47 | "bool": { |
|
49 | "bool": { | |
48 | "filter": [ |
|
50 | "filter": [ | |
49 | {"terms": {"resource_id": [resource_id]}}, |
|
51 | {"terms": {"resource_id": [resource_id]}}, | |
50 | {"terms": {"tags.type.values": [report_type]}}, |
|
52 | {"terms": {"tags.type.values": [report_type]}}, | |
51 | {"range": {"timestamp": {"gte": since_when}}}, |
|
53 | {"range": {"timestamp": {"gte": since_when}}}, | |
52 | ] |
|
54 | ] | |
53 | } |
|
55 | } | |
54 | }, |
|
56 | }, | |
55 | } |
|
57 | } | |
56 |
|
58 | |||
57 | if index_names: |
|
59 | if index_names: | |
58 | result = Datastores.es.search( |
|
60 | result = Datastores.es.search( | |
59 | body=es_query, index=index_names, doc_type="log", size=0 |
|
61 | body=es_query, index=index_names, doc_type="log", size=0 | |
60 | ) |
|
62 | ) | |
61 | return result["aggregations"]["reports"]["sub_agg"]["value"] |
|
63 | return result["aggregations"]["reports"]["sub_agg"]["value"] | |
62 | else: |
|
64 | else: | |
63 | return 0 |
|
65 | return 0 |
@@ -1,612 +1,623 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 |
|
2 | |||
3 | # Copyright 2010 - 2017 RhodeCode GmbH and the AppEnlight project authors |
|
3 | # Copyright 2010 - 2017 RhodeCode GmbH and the AppEnlight project authors | |
4 | # |
|
4 | # | |
5 | # Licensed under the Apache License, Version 2.0 (the "License"); |
|
5 | # Licensed under the Apache License, Version 2.0 (the "License"); | |
6 | # you may not use this file except in compliance with the License. |
|
6 | # you may not use this file except in compliance with the License. | |
7 | # You may obtain a copy of the License at |
|
7 | # You may obtain a copy of the License at | |
8 | # |
|
8 | # | |
9 | # http://www.apache.org/licenses/LICENSE-2.0 |
|
9 | # http://www.apache.org/licenses/LICENSE-2.0 | |
10 | # |
|
10 | # | |
11 | # Unless required by applicable law or agreed to in writing, software |
|
11 | # Unless required by applicable law or agreed to in writing, software | |
12 | # distributed under the License is distributed on an "AS IS" BASIS, |
|
12 | # distributed under the License is distributed on an "AS IS" BASIS, | |
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
14 | # See the License for the specific language governing permissions and |
|
14 | # See the License for the specific language governing permissions and | |
15 | # limitations under the License. |
|
15 | # limitations under the License. | |
16 |
|
16 | |||
17 | from datetime import datetime |
|
17 | from datetime import datetime | |
18 |
|
18 | |||
19 | import appenlight.lib.helpers as h |
|
19 | import appenlight.lib.helpers as h | |
20 | from appenlight.models import get_db_session, Datastores |
|
20 | from appenlight.models import get_db_session, Datastores | |
21 | from appenlight.models.services.base import BaseService |
|
21 | from appenlight.models.services.base import BaseService | |
22 | from appenlight.lib.enums import ReportType |
|
22 | from appenlight.lib.enums import ReportType | |
23 | from appenlight.lib.utils import es_index_name_limiter |
|
23 | from appenlight.lib.utils import es_index_name_limiter | |
24 |
|
24 | |||
25 | try: |
|
25 | try: | |
26 | from ae_uptime_ce.models.services.uptime_metric import UptimeMetricService |
|
26 | from ae_uptime_ce.models.services.uptime_metric import UptimeMetricService | |
27 | except ImportError: |
|
27 | except ImportError: | |
28 | UptimeMetricService = None |
|
28 | UptimeMetricService = None | |
29 |
|
29 | |||
30 |
|
30 | |||
31 | def check_key(key, stats, uptime, total_seconds): |
|
31 | def check_key(key, stats, uptime, total_seconds): | |
32 | if key not in stats: |
|
32 | if key not in stats: | |
33 | stats[key] = { |
|
33 | stats[key] = { | |
34 | "name": key, |
|
34 | "name": key, | |
35 | "requests": 0, |
|
35 | "requests": 0, | |
36 | "errors": 0, |
|
36 | "errors": 0, | |
37 | "tolerated_requests": 0, |
|
37 | "tolerated_requests": 0, | |
38 | "frustrating_requests": 0, |
|
38 | "frustrating_requests": 0, | |
39 | "satisfying_requests": 0, |
|
39 | "satisfying_requests": 0, | |
40 | "total_minutes": total_seconds / 60.0, |
|
40 | "total_minutes": total_seconds / 60.0, | |
41 | "uptime": uptime, |
|
41 | "uptime": uptime, | |
42 | "apdex": 0, |
|
42 | "apdex": 0, | |
43 | "rpm": 0, |
|
43 | "rpm": 0, | |
44 | "response_time": 0, |
|
44 | "response_time": 0, | |
45 | "avg_response_time": 0, |
|
45 | "avg_response_time": 0, | |
46 | } |
|
46 | } | |
47 |
|
47 | |||
48 |
|
48 | |||
49 | class RequestMetricService(BaseService): |
|
49 | class RequestMetricService(BaseService): | |
50 | @classmethod |
|
50 | @classmethod | |
51 | def get_metrics_stats(cls, request, filter_settings, db_session=None): |
|
51 | def get_metrics_stats(cls, request, filter_settings, db_session=None): | |
52 | delta = filter_settings["end_date"] - filter_settings["start_date"] |
|
52 | delta = filter_settings["end_date"] - filter_settings["start_date"] | |
53 | if delta < h.time_deltas.get("12h")["delta"]: |
|
53 | if delta < h.time_deltas.get("12h")["delta"]: | |
54 | interval = "1m" |
|
54 | interval = "1m" | |
55 | elif delta <= h.time_deltas.get("3d")["delta"]: |
|
55 | elif delta <= h.time_deltas.get("3d")["delta"]: | |
56 | interval = "5m" |
|
56 | interval = "5m" | |
57 | elif delta >= h.time_deltas.get("2w")["delta"]: |
|
57 | elif delta >= h.time_deltas.get("2w")["delta"]: | |
58 | interval = "24h" |
|
58 | interval = "24h" | |
59 | else: |
|
59 | else: | |
60 | interval = "1h" |
|
60 | interval = "1h" | |
61 |
|
61 | |||
62 | filter_settings["namespace"] = ["appenlight.request_metric"] |
|
62 | filter_settings["namespace"] = ["appenlight.request_metric"] | |
63 |
|
63 | |||
64 | es_query = { |
|
64 | es_query = { | |
65 | "aggs": { |
|
65 | "aggs": { | |
66 | "parent_agg": { |
|
66 | "parent_agg": { | |
67 | "aggs": { |
|
67 | "aggs": { | |
68 | "custom": { |
|
68 | "custom": { | |
69 | "aggs": { |
|
69 | "aggs": { | |
70 | "sub_agg": { |
|
70 | "sub_agg": { | |
71 | "sum": {"field": "tags.custom.numeric_values"} |
|
71 | "sum": {"field": "tags.custom.numeric_values"} | |
72 | } |
|
72 | } | |
73 | }, |
|
73 | }, | |
74 | "filter": { |
|
74 | "filter": { | |
75 | "exists": {"field": "tags.custom.numeric_values"} |
|
75 | "exists": {"field": "tags.custom.numeric_values"} | |
76 | }, |
|
76 | }, | |
77 | }, |
|
77 | }, | |
78 | "main": { |
|
78 | "main": { | |
79 | "aggs": { |
|
79 | "aggs": { | |
80 | "sub_agg": { |
|
80 | "sub_agg": { | |
81 | "sum": {"field": "tags.main.numeric_values"} |
|
81 | "sum": {"field": "tags.main.numeric_values"} | |
82 | } |
|
82 | } | |
83 | }, |
|
83 | }, | |
84 | "filter": {"exists": {"field": "tags.main.numeric_values"}}, |
|
84 | "filter": {"exists": {"field": "tags.main.numeric_values"}}, | |
85 | }, |
|
85 | }, | |
86 | "nosql": { |
|
86 | "nosql": { | |
87 | "aggs": { |
|
87 | "aggs": { | |
88 | "sub_agg": { |
|
88 | "sub_agg": { | |
89 | "sum": {"field": "tags.nosql.numeric_values"} |
|
89 | "sum": {"field": "tags.nosql.numeric_values"} | |
90 | } |
|
90 | } | |
91 | }, |
|
91 | }, | |
92 | "filter": { |
|
92 | "filter": { | |
93 | "exists": {"field": "tags.nosql.numeric_values"} |
|
93 | "exists": {"field": "tags.nosql.numeric_values"} | |
94 | }, |
|
94 | }, | |
95 | }, |
|
95 | }, | |
96 | "remote": { |
|
96 | "remote": { | |
97 | "aggs": { |
|
97 | "aggs": { | |
98 | "sub_agg": { |
|
98 | "sub_agg": { | |
99 | "sum": {"field": "tags.remote.numeric_values"} |
|
99 | "sum": {"field": "tags.remote.numeric_values"} | |
100 | } |
|
100 | } | |
101 | }, |
|
101 | }, | |
102 | "filter": { |
|
102 | "filter": { | |
103 | "exists": {"field": "tags.remote.numeric_values"} |
|
103 | "exists": {"field": "tags.remote.numeric_values"} | |
104 | }, |
|
104 | }, | |
105 | }, |
|
105 | }, | |
106 | "requests": { |
|
106 | "requests": { | |
107 | "aggs": { |
|
107 | "aggs": { | |
108 | "sub_agg": { |
|
108 | "sub_agg": { | |
109 | "sum": {"field": "tags.requests.numeric_values"} |
|
109 | "sum": {"field": "tags.requests.numeric_values"} | |
110 | } |
|
110 | } | |
111 | }, |
|
111 | }, | |
112 | "filter": { |
|
112 | "filter": { | |
113 | "exists": {"field": "tags.requests.numeric_values"} |
|
113 | "exists": {"field": "tags.requests.numeric_values"} | |
114 | }, |
|
114 | }, | |
115 | }, |
|
115 | }, | |
116 | "sql": { |
|
116 | "sql": { | |
117 | "aggs": { |
|
117 | "aggs": { | |
118 | "sub_agg": {"sum": {"field": "tags.sql.numeric_values"}} |
|
118 | "sub_agg": {"sum": {"field": "tags.sql.numeric_values"}} | |
119 | }, |
|
119 | }, | |
120 | "filter": {"exists": {"field": "tags.sql.numeric_values"}}, |
|
120 | "filter": {"exists": {"field": "tags.sql.numeric_values"}}, | |
121 | }, |
|
121 | }, | |
122 | "tmpl": { |
|
122 | "tmpl": { | |
123 | "aggs": { |
|
123 | "aggs": { | |
124 | "sub_agg": { |
|
124 | "sub_agg": { | |
125 | "sum": {"field": "tags.tmpl.numeric_values"} |
|
125 | "sum": {"field": "tags.tmpl.numeric_values"} | |
126 | } |
|
126 | } | |
127 | }, |
|
127 | }, | |
128 | "filter": {"exists": {"field": "tags.tmpl.numeric_values"}}, |
|
128 | "filter": {"exists": {"field": "tags.tmpl.numeric_values"}}, | |
129 | }, |
|
129 | }, | |
130 | }, |
|
130 | }, | |
131 | "date_histogram": { |
|
131 | "date_histogram": { | |
132 | "extended_bounds": { |
|
132 | "extended_bounds": { | |
133 | "max": filter_settings["end_date"], |
|
133 | "max": filter_settings["end_date"], | |
134 | "min": filter_settings["start_date"], |
|
134 | "min": filter_settings["start_date"], | |
135 | }, |
|
135 | }, | |
136 | "field": "timestamp", |
|
136 | "field": "timestamp", | |
137 | "interval": interval, |
|
137 | "interval": interval, | |
138 | "min_doc_count": 0, |
|
138 | "min_doc_count": 0, | |
139 | }, |
|
139 | }, | |
140 | } |
|
140 | } | |
141 | }, |
|
141 | }, | |
142 | "query": { |
|
142 | "query": { | |
143 | "bool": { |
|
143 | "bool": { | |
144 | "filter": [ |
|
144 | "filter": [ | |
145 | { |
|
145 | {"terms": {"resource_id": [filter_settings["resource"][0]]}}, | |
146 | "terms": { |
|
|||
147 | "resource_id": [filter_settings["resource"][0]] |
|
|||
148 | } |
|
|||
149 | }, |
|
|||
150 | { |
|
146 | { | |
151 | "range": { |
|
147 | "range": { | |
152 | "timestamp": { |
|
148 | "timestamp": { | |
153 | "gte": filter_settings["start_date"], |
|
149 | "gte": filter_settings["start_date"], | |
154 | "lte": filter_settings["end_date"], |
|
150 | "lte": filter_settings["end_date"], | |
155 | } |
|
151 | } | |
156 | } |
|
152 | } | |
157 | }, |
|
153 | }, | |
158 | {"terms": {"namespace": ["appenlight.request_metric"]}}, |
|
154 | {"terms": {"namespace": ["appenlight.request_metric"]}}, | |
159 | ] |
|
155 | ] | |
160 | } |
|
156 | } | |
161 | }, |
|
157 | }, | |
162 | } |
|
158 | } | |
163 |
|
159 | |||
164 | index_names = es_index_name_limiter( |
|
160 | index_names = es_index_name_limiter( | |
165 | start_date=filter_settings["start_date"], |
|
161 | start_date=filter_settings["start_date"], | |
166 | end_date=filter_settings["end_date"], |
|
162 | end_date=filter_settings["end_date"], | |
167 | ixtypes=["metrics"], |
|
163 | ixtypes=["metrics"], | |
168 | ) |
|
164 | ) | |
169 | if not index_names: |
|
165 | if not index_names: | |
170 | return [] |
|
166 | return [] | |
171 |
|
167 | |||
172 | result = Datastores.es.search( |
|
168 | result = Datastores.es.search( | |
173 | body=es_query, index=index_names, doc_type="log", size=0 |
|
169 | body=es_query, index=index_names, doc_type="log", size=0 | |
174 | ) |
|
170 | ) | |
175 |
|
171 | |||
176 | plot_data = [] |
|
172 | plot_data = [] | |
177 | for item in result["aggregations"]["parent_agg"]["buckets"]: |
|
173 | for item in result["aggregations"]["parent_agg"]["buckets"]: | |
178 | x_time = datetime.utcfromtimestamp(int(item["key"]) / 1000) |
|
174 | x_time = datetime.utcfromtimestamp(int(item["key"]) / 1000) | |
179 | point = {"x": x_time} |
|
175 | point = {"x": x_time} | |
180 | for key in ["custom", "main", "nosql", "remote", "requests", "sql", "tmpl"]: |
|
176 | for key in ["custom", "main", "nosql", "remote", "requests", "sql", "tmpl"]: | |
181 | value = item[key]["sub_agg"]["value"] |
|
177 | value = item[key]["sub_agg"]["value"] | |
182 | point[key] = round(value, 3) if value else 0 |
|
178 | point[key] = round(value, 3) if value else 0 | |
183 | plot_data.append(point) |
|
179 | plot_data.append(point) | |
184 |
|
180 | |||
185 | return plot_data |
|
181 | return plot_data | |
186 |
|
182 | |||
187 | @classmethod |
|
183 | @classmethod | |
188 | def get_requests_breakdown(cls, request, filter_settings, db_session=None): |
|
184 | def get_requests_breakdown(cls, request, filter_settings, db_session=None): | |
189 | db_session = get_db_session(db_session) |
|
185 | db_session = get_db_session(db_session) | |
190 |
|
186 | |||
191 | # fetch total time of all requests in this time range |
|
187 | # fetch total time of all requests in this time range | |
192 | index_names = es_index_name_limiter( |
|
188 | index_names = es_index_name_limiter( | |
193 | start_date=filter_settings["start_date"], |
|
189 | start_date=filter_settings["start_date"], | |
194 | end_date=filter_settings["end_date"], |
|
190 | end_date=filter_settings["end_date"], | |
195 | ixtypes=["metrics"], |
|
191 | ixtypes=["metrics"], | |
196 | ) |
|
192 | ) | |
197 |
|
193 | |||
198 | if index_names and filter_settings["resource"]: |
|
194 | if index_names and filter_settings["resource"]: | |
199 | es_query = { |
|
195 | es_query = { | |
200 | "aggs": { |
|
196 | "aggs": { | |
201 | "main": { |
|
197 | "main": { | |
202 | "aggs": { |
|
198 | "aggs": { | |
203 | "sub_agg": {"sum": {"field": "tags.main.numeric_values"}} |
|
199 | "sub_agg": {"sum": {"field": "tags.main.numeric_values"}} | |
204 | }, |
|
200 | }, | |
205 | "filter": {"exists": {"field": "tags.main.numeric_values"}}, |
|
201 | "filter": {"exists": {"field": "tags.main.numeric_values"}}, | |
206 | } |
|
202 | } | |
207 | }, |
|
203 | }, | |
208 | "query": { |
|
204 | "query": { | |
209 | "bool": { |
|
205 | "bool": { | |
210 | "filter": [ |
|
206 | "filter": [ | |
211 | { |
|
207 | { | |
212 | "terms": { |
|
208 | "terms": { | |
213 | "resource_id": [filter_settings["resource"][0]] |
|
209 | "resource_id": [filter_settings["resource"][0]] | |
214 | } |
|
210 | } | |
215 | }, |
|
211 | }, | |
216 | { |
|
212 | { | |
217 | "range": { |
|
213 | "range": { | |
218 | "timestamp": { |
|
214 | "timestamp": { | |
219 | "gte": filter_settings["start_date"], |
|
215 | "gte": filter_settings["start_date"], | |
220 | "lte": filter_settings["end_date"], |
|
216 | "lte": filter_settings["end_date"], | |
221 | } |
|
217 | } | |
222 | } |
|
218 | } | |
223 | }, |
|
219 | }, | |
224 | {"terms": {"namespace": ["appenlight.request_metric"]}}, |
|
220 | {"terms": {"namespace": ["appenlight.request_metric"]}}, | |
225 | ] |
|
221 | ] | |
226 | } |
|
222 | } | |
227 | }, |
|
223 | }, | |
228 | } |
|
224 | } | |
229 | result = Datastores.es.search( |
|
225 | result = Datastores.es.search( | |
230 | body=es_query, index=index_names, doc_type="log", size=0 |
|
226 | body=es_query, index=index_names, doc_type="log", size=0 | |
231 | ) |
|
227 | ) | |
232 | total_time_spent = result["aggregations"]["main"]["sub_agg"]["value"] |
|
228 | total_time_spent = result["aggregations"]["main"]["sub_agg"]["value"] | |
233 | else: |
|
229 | else: | |
234 | total_time_spent = 0 |
|
230 | total_time_spent = 0 | |
235 | script_text = "doc['tags.main.numeric_values'].value / {}".format( |
|
231 | script_text = "doc['tags.main.numeric_values'].value / {}".format( | |
236 | total_time_spent |
|
232 | total_time_spent | |
237 | ) |
|
233 | ) | |
238 | if total_time_spent == 0: |
|
234 | if total_time_spent == 0: | |
239 |
script_text = |
|
235 | script_text = "0" | |
240 |
|
236 | |||
241 | if index_names and filter_settings["resource"]: |
|
237 | if index_names and filter_settings["resource"]: | |
242 | es_query = { |
|
238 | es_query = { | |
243 | "aggs": { |
|
239 | "aggs": { | |
244 | "parent_agg": { |
|
240 | "parent_agg": { | |
245 | "aggs": { |
|
241 | "aggs": { | |
246 | "main": { |
|
242 | "main": { | |
247 | "aggs": { |
|
243 | "aggs": { | |
248 | "sub_agg": { |
|
244 | "sub_agg": { | |
249 | "sum": {"field": "tags.main.numeric_values"} |
|
245 | "sum": {"field": "tags.main.numeric_values"} | |
250 | } |
|
246 | } | |
251 | }, |
|
247 | }, | |
252 | "filter": { |
|
248 | "filter": { | |
253 | "exists": {"field": "tags.main.numeric_values"} |
|
249 | "exists": {"field": "tags.main.numeric_values"} | |
254 | }, |
|
250 | }, | |
255 | }, |
|
251 | }, | |
256 | "percentage": { |
|
252 | "percentage": { | |
257 | "aggs": { |
|
253 | "aggs": {"sub_agg": {"sum": {"script": script_text}}}, | |
258 | "sub_agg": { |
|
|||
259 | "sum": { |
|
|||
260 | "script": script_text, |
|
|||
261 | } |
|
|||
262 | } |
|
|||
263 | }, |
|
|||
264 | "filter": { |
|
254 | "filter": { | |
265 | "exists": {"field": "tags.main.numeric_values"} |
|
255 | "exists": {"field": "tags.main.numeric_values"} | |
266 | }, |
|
256 | }, | |
267 | }, |
|
257 | }, | |
268 | "requests": { |
|
258 | "requests": { | |
269 | "aggs": { |
|
259 | "aggs": { | |
270 | "sub_agg": { |
|
260 | "sub_agg": { | |
271 | "sum": {"field": "tags.requests.numeric_values"} |
|
261 | "sum": {"field": "tags.requests.numeric_values"} | |
272 | } |
|
262 | } | |
273 | }, |
|
263 | }, | |
274 | "filter": { |
|
264 | "filter": { | |
275 | "exists": {"field": "tags.requests.numeric_values"} |
|
265 | "exists": {"field": "tags.requests.numeric_values"} | |
276 | }, |
|
266 | }, | |
277 | }, |
|
267 | }, | |
278 | }, |
|
268 | }, | |
279 | "terms": { |
|
269 | "terms": { | |
280 | "field": "tags.view_name.values.keyword", |
|
270 | "field": "tags.view_name.values.keyword", | |
281 | "order": {"percentage>sub_agg": "desc"}, |
|
271 | "order": {"percentage>sub_agg": "desc"}, | |
282 | "size": 15, |
|
272 | "size": 15, | |
283 | }, |
|
273 | }, | |
284 | } |
|
274 | } | |
285 | }, |
|
275 | }, | |
286 | "query": { |
|
276 | "query": { | |
287 | "bool": { |
|
277 | "bool": { | |
288 | "filter": [ |
|
278 | "filter": [ | |
289 | { |
|
279 | { | |
290 | "terms": { |
|
280 | "terms": { | |
291 | "resource_id": [filter_settings["resource"][0]] |
|
281 | "resource_id": [filter_settings["resource"][0]] | |
292 | } |
|
282 | } | |
293 | }, |
|
283 | }, | |
294 | { |
|
284 | { | |
295 | "range": { |
|
285 | "range": { | |
296 | "timestamp": { |
|
286 | "timestamp": { | |
297 | "gte": filter_settings["start_date"], |
|
287 | "gte": filter_settings["start_date"], | |
298 | "lte": filter_settings["end_date"], |
|
288 | "lte": filter_settings["end_date"], | |
299 | } |
|
289 | } | |
300 | } |
|
290 | } | |
301 | }, |
|
291 | }, | |
302 | ] |
|
292 | ] | |
303 | } |
|
293 | } | |
304 | }, |
|
294 | }, | |
305 | } |
|
295 | } | |
306 | result = Datastores.es.search( |
|
296 | result = Datastores.es.search( | |
307 | body=es_query, index=index_names, doc_type="log", size=0 |
|
297 | body=es_query, index=index_names, doc_type="log", size=0 | |
308 | ) |
|
298 | ) | |
309 | series = result["aggregations"]["parent_agg"]["buckets"] |
|
299 | series = result["aggregations"]["parent_agg"]["buckets"] | |
310 | else: |
|
300 | else: | |
311 | series = [] |
|
301 | series = [] | |
312 |
|
302 | |||
313 | and_part = [ |
|
303 | and_part = [ | |
314 | {"term": {"resource_id": filter_settings["resource"][0]}}, |
|
304 | {"term": {"resource_id": filter_settings["resource"][0]}}, | |
315 | {"terms": {"tags.view_name.values": [row["key"] for row in series]}}, |
|
305 | {"terms": {"tags.view_name.values": [row["key"] for row in series]}}, | |
316 | {"term": {"report_type": str(ReportType.slow)}}, |
|
306 | {"term": {"report_type": str(ReportType.slow)}}, | |
317 | ] |
|
307 | ] | |
318 | query = { |
|
308 | query = { | |
319 | "aggs": { |
|
309 | "aggs": { | |
320 | "top_reports": { |
|
310 | "top_reports": { | |
321 | "terms": {"field": "tags.view_name.values.keyword", "size": len(series)}, |
|
311 | "terms": { | |
|
312 | "field": "tags.view_name.values.keyword", | |||
|
313 | "size": len(series), | |||
|
314 | }, | |||
322 | "aggs": { |
|
315 | "aggs": { | |
323 | "top_calls_hits": { |
|
316 | "top_calls_hits": { | |
324 | "top_hits": {"sort": {"start_time": "desc"}, "size": 5} |
|
317 | "top_hits": {"sort": {"start_time": "desc"}, "size": 5} | |
325 | } |
|
318 | } | |
326 | }, |
|
319 | }, | |
327 | } |
|
320 | } | |
328 | }, |
|
321 | }, | |
329 | "query": {"bool": {"filter": and_part}}, |
|
322 | "query": {"bool": {"filter": and_part}}, | |
330 | } |
|
323 | } | |
331 | details = {} |
|
324 | details = {} | |
332 | index_names = es_index_name_limiter(ixtypes=["reports"]) |
|
325 | index_names = es_index_name_limiter(ixtypes=["reports"]) | |
333 | if index_names and series: |
|
326 | if index_names and series: | |
334 | result = Datastores.es.search( |
|
327 | result = Datastores.es.search( | |
335 | body=query, doc_type="report", size=0, index=index_names |
|
328 | body=query, doc_type="report", size=0, index=index_names | |
336 | ) |
|
329 | ) | |
337 | for bucket in result["aggregations"]["top_reports"]["buckets"]: |
|
330 | for bucket in result["aggregations"]["top_reports"]["buckets"]: | |
338 | details[bucket["key"]] = [] |
|
331 | details[bucket["key"]] = [] | |
339 |
|
332 | |||
340 | for hit in bucket["top_calls_hits"]["hits"]["hits"]: |
|
333 | for hit in bucket["top_calls_hits"]["hits"]["hits"]: | |
341 | details[bucket["key"]].append( |
|
334 | details[bucket["key"]].append( | |
342 | { |
|
335 | { | |
343 | "report_id": hit["_source"]["request_metric_id"], |
|
336 | "report_id": hit["_source"]["request_metric_id"], | |
344 | "group_id": hit["_source"]["group_id"], |
|
337 | "group_id": hit["_source"]["group_id"], | |
345 | } |
|
338 | } | |
346 | ) |
|
339 | ) | |
347 |
|
340 | |||
348 | results = [] |
|
341 | results = [] | |
349 | for row in series: |
|
342 | for row in series: | |
350 | result = { |
|
343 | result = { | |
351 | "key": row["key"], |
|
344 | "key": row["key"], | |
352 | "main": row["main"]["sub_agg"]["value"], |
|
345 | "main": row["main"]["sub_agg"]["value"], | |
353 | "requests": row["requests"]["sub_agg"]["value"], |
|
346 | "requests": row["requests"]["sub_agg"]["value"], | |
354 | } |
|
347 | } | |
355 | # es can return 'infinity' |
|
348 | # es can return 'infinity' | |
356 | try: |
|
349 | try: | |
357 | result["percentage"] = float(row["percentage"]["sub_agg"]["value"]) |
|
350 | result["percentage"] = float(row["percentage"]["sub_agg"]["value"]) | |
358 | except ValueError: |
|
351 | except ValueError: | |
359 | result["percentage"] = 0 |
|
352 | result["percentage"] = 0 | |
360 |
|
353 | |||
361 | result["latest_details"] = details.get(row["key"]) or [] |
|
354 | result["latest_details"] = details.get(row["key"]) or [] | |
362 | results.append(result) |
|
355 | results.append(result) | |
363 |
|
356 | |||
364 | return results |
|
357 | return results | |
365 |
|
358 | |||
366 | @classmethod |
|
359 | @classmethod | |
367 | def get_apdex_stats(cls, request, filter_settings, threshold=1, db_session=None): |
|
360 | def get_apdex_stats(cls, request, filter_settings, threshold=1, db_session=None): | |
368 | """ |
|
361 | """ | |
369 | Returns information and calculates APDEX score per server for dashboard |
|
362 | Returns information and calculates APDEX score per server for dashboard | |
370 | server information (upper right stats boxes) |
|
363 | server information (upper right stats boxes) | |
371 | """ |
|
364 | """ | |
372 | # Apdex t = (Satisfied Count + Tolerated Count / 2) / Total Samples |
|
365 | # Apdex t = (Satisfied Count + Tolerated Count / 2) / Total Samples | |
373 | db_session = get_db_session(db_session) |
|
366 | db_session = get_db_session(db_session) | |
374 | index_names = es_index_name_limiter( |
|
367 | index_names = es_index_name_limiter( | |
375 | start_date=filter_settings["start_date"], |
|
368 | start_date=filter_settings["start_date"], | |
376 | end_date=filter_settings["end_date"], |
|
369 | end_date=filter_settings["end_date"], | |
377 | ixtypes=["metrics"], |
|
370 | ixtypes=["metrics"], | |
378 | ) |
|
371 | ) | |
379 |
|
372 | |||
380 | requests_series = [] |
|
373 | requests_series = [] | |
381 |
|
374 | |||
382 | if index_names and filter_settings["resource"]: |
|
375 | if index_names and filter_settings["resource"]: | |
383 | es_query = { |
|
376 | es_query = { | |
384 | "aggs": { |
|
377 | "aggs": { | |
385 | "parent_agg": { |
|
378 | "parent_agg": { | |
386 | "aggs": { |
|
379 | "aggs": { | |
387 | "frustrating": { |
|
380 | "frustrating": { | |
388 | "aggs": { |
|
381 | "aggs": { | |
389 | "sub_agg": { |
|
382 | "sub_agg": { | |
390 | "sum": {"field": "tags.requests.numeric_values"} |
|
383 | "sum": {"field": "tags.requests.numeric_values"} | |
391 | } |
|
384 | } | |
392 | }, |
|
385 | }, | |
393 | "filter": { |
|
386 | "filter": { | |
394 | "bool": { |
|
387 | "bool": { | |
395 | "filter": [ |
|
388 | "filter": [ | |
396 | { |
|
389 | { | |
397 | "range": { |
|
390 | "range": { | |
398 |
"tags.main.numeric_values": { |
|
391 | "tags.main.numeric_values": { | |
|
392 | "gte": "4" | |||
|
393 | } | |||
399 | } |
|
394 | } | |
400 | }, |
|
395 | }, | |
401 | { |
|
396 | { | |
402 | "exists": { |
|
397 | "exists": { | |
403 | "field": "tags.requests.numeric_values" |
|
398 | "field": "tags.requests.numeric_values" | |
404 | } |
|
399 | } | |
405 | }, |
|
400 | }, | |
406 | ] |
|
401 | ] | |
407 | } |
|
402 | } | |
408 | }, |
|
403 | }, | |
409 | }, |
|
404 | }, | |
410 | "main": { |
|
405 | "main": { | |
411 | "aggs": { |
|
406 | "aggs": { | |
412 | "sub_agg": { |
|
407 | "sub_agg": { | |
413 | "sum": {"field": "tags.main.numeric_values"} |
|
408 | "sum": {"field": "tags.main.numeric_values"} | |
414 | } |
|
409 | } | |
415 | }, |
|
410 | }, | |
416 | "filter": { |
|
411 | "filter": { | |
417 | "exists": {"field": "tags.main.numeric_values"} |
|
412 | "exists": {"field": "tags.main.numeric_values"} | |
418 | }, |
|
413 | }, | |
419 | }, |
|
414 | }, | |
420 | "requests": { |
|
415 | "requests": { | |
421 | "aggs": { |
|
416 | "aggs": { | |
422 | "sub_agg": { |
|
417 | "sub_agg": { | |
423 | "sum": {"field": "tags.requests.numeric_values"} |
|
418 | "sum": {"field": "tags.requests.numeric_values"} | |
424 | } |
|
419 | } | |
425 | }, |
|
420 | }, | |
426 | "filter": { |
|
421 | "filter": { | |
427 | "exists": {"field": "tags.requests.numeric_values"} |
|
422 | "exists": {"field": "tags.requests.numeric_values"} | |
428 | }, |
|
423 | }, | |
429 | }, |
|
424 | }, | |
430 | "tolerated": { |
|
425 | "tolerated": { | |
431 | "aggs": { |
|
426 | "aggs": { | |
432 | "sub_agg": { |
|
427 | "sub_agg": { | |
433 | "sum": {"field": "tags.requests.numeric_values"} |
|
428 | "sum": {"field": "tags.requests.numeric_values"} | |
434 | } |
|
429 | } | |
435 | }, |
|
430 | }, | |
436 | "filter": { |
|
431 | "filter": { | |
437 |
"bool": { |
|
432 | "bool": { | |
|
433 | "filter": [ | |||
438 | { |
|
434 | { | |
439 | "range": { |
|
435 | "range": { | |
440 |
"tags.main.numeric_values": { |
|
436 | "tags.main.numeric_values": { | |
|
437 | "gte": "1" | |||
|
438 | } | |||
441 | } |
|
439 | } | |
442 | }, |
|
440 | }, | |
443 | { |
|
441 | { | |
444 | "range": { |
|
442 | "range": { | |
445 |
"tags.main.numeric_values": { |
|
443 | "tags.main.numeric_values": { | |
|
444 | "lt": "4" | |||
|
445 | } | |||
446 | } |
|
446 | } | |
447 | }, |
|
447 | }, | |
448 | { |
|
448 | { | |
449 | "exists": { |
|
449 | "exists": { | |
450 | "field": "tags.requests.numeric_values" |
|
450 | "field": "tags.requests.numeric_values" | |
451 | } |
|
451 | } | |
452 | }, |
|
452 | }, | |
453 |
] |
|
453 | ] | |
|
454 | } | |||
|
455 | }, | |||
454 |
|
|
456 | }, | |
455 |
|
|
457 | }, | |
|
458 | "terms": { | |||
|
459 | "field": "tags.server_name.values.keyword", | |||
|
460 | "size": 999999, | |||
456 | }, |
|
461 | }, | |
457 | "terms": {"field": "tags.server_name.values.keyword", "size": 999999}, |
|
|||
458 | } |
|
462 | } | |
459 | }, |
|
463 | }, | |
460 | "query": { |
|
464 | "query": { | |
461 | "bool": { |
|
465 | "bool": { | |
462 | "filter": [ |
|
466 | "filter": [ | |
463 | { |
|
467 | { | |
464 | "terms": { |
|
468 | "terms": { | |
465 | "resource_id": [filter_settings["resource"][0]] |
|
469 | "resource_id": [filter_settings["resource"][0]] | |
466 | } |
|
470 | } | |
467 | }, |
|
471 | }, | |
468 | { |
|
472 | { | |
469 | "range": { |
|
473 | "range": { | |
470 | "timestamp": { |
|
474 | "timestamp": { | |
471 | "gte": filter_settings["start_date"], |
|
475 | "gte": filter_settings["start_date"], | |
472 | "lte": filter_settings["end_date"], |
|
476 | "lte": filter_settings["end_date"], | |
473 | } |
|
477 | } | |
474 | } |
|
478 | } | |
475 | }, |
|
479 | }, | |
476 | {"terms": {"namespace": ["appenlight.request_metric"]}}, |
|
480 | {"terms": {"namespace": ["appenlight.request_metric"]}}, | |
477 | ] |
|
481 | ] | |
478 | } |
|
482 | } | |
479 | }, |
|
483 | }, | |
480 | } |
|
484 | } | |
481 |
|
485 | |||
482 | result = Datastores.es.search( |
|
486 | result = Datastores.es.search( | |
483 | body=es_query, index=index_names, doc_type="log", size=0 |
|
487 | body=es_query, index=index_names, doc_type="log", size=0 | |
484 | ) |
|
488 | ) | |
485 | for bucket in result["aggregations"]["parent_agg"]["buckets"]: |
|
489 | for bucket in result["aggregations"]["parent_agg"]["buckets"]: | |
486 | requests_series.append( |
|
490 | requests_series.append( | |
487 | { |
|
491 | { | |
488 | "frustrating": bucket["frustrating"]["sub_agg"]["value"], |
|
492 | "frustrating": bucket["frustrating"]["sub_agg"]["value"], | |
489 | "main": bucket["main"]["sub_agg"]["value"], |
|
493 | "main": bucket["main"]["sub_agg"]["value"], | |
490 | "requests": bucket["requests"]["sub_agg"]["value"], |
|
494 | "requests": bucket["requests"]["sub_agg"]["value"], | |
491 | "tolerated": bucket["tolerated"]["sub_agg"]["value"], |
|
495 | "tolerated": bucket["tolerated"]["sub_agg"]["value"], | |
492 | "key": bucket["key"], |
|
496 | "key": bucket["key"], | |
493 | } |
|
497 | } | |
494 | ) |
|
498 | ) | |
495 |
|
499 | |||
496 | since_when = filter_settings["start_date"] |
|
500 | since_when = filter_settings["start_date"] | |
497 | until = filter_settings["end_date"] |
|
501 | until = filter_settings["end_date"] | |
498 |
|
502 | |||
499 | # total errors |
|
503 | # total errors | |
500 |
|
504 | |||
501 | index_names = es_index_name_limiter( |
|
505 | index_names = es_index_name_limiter( | |
502 | start_date=filter_settings["start_date"], |
|
506 | start_date=filter_settings["start_date"], | |
503 | end_date=filter_settings["end_date"], |
|
507 | end_date=filter_settings["end_date"], | |
504 | ixtypes=["reports"], |
|
508 | ixtypes=["reports"], | |
505 | ) |
|
509 | ) | |
506 |
|
510 | |||
507 | report_series = [] |
|
511 | report_series = [] | |
508 | if index_names and filter_settings["resource"]: |
|
512 | if index_names and filter_settings["resource"]: | |
509 | report_type = ReportType.key_from_value(ReportType.error) |
|
513 | report_type = ReportType.key_from_value(ReportType.error) | |
510 | es_query = { |
|
514 | es_query = { | |
511 | "aggs": { |
|
515 | "aggs": { | |
512 | "parent_agg": { |
|
516 | "parent_agg": { | |
513 | "aggs": { |
|
517 | "aggs": { | |
514 | "errors": { |
|
518 | "errors": { | |
515 | "aggs": { |
|
519 | "aggs": { | |
516 | "sub_agg": { |
|
520 | "sub_agg": { | |
517 | "sum": { |
|
521 | "sum": { | |
518 | "field": "tags.occurences.numeric_values" |
|
522 | "field": "tags.occurences.numeric_values" | |
519 | } |
|
523 | } | |
520 | } |
|
524 | } | |
521 | }, |
|
525 | }, | |
522 | "filter": { |
|
526 | "filter": { | |
523 | "bool": { |
|
527 | "bool": { | |
524 | "filter": [ |
|
528 | "filter": [ | |
525 |
{ |
|
529 | { | |
|
530 | "terms": { | |||
|
531 | "tags.type.values": [report_type] | |||
|
532 | } | |||
|
533 | }, | |||
526 | { |
|
534 | { | |
527 | "exists": { |
|
535 | "exists": { | |
528 | "field": "tags.occurences.numeric_values" |
|
536 | "field": "tags.occurences.numeric_values" | |
529 | } |
|
537 | } | |
530 | }, |
|
538 | }, | |
531 | ] |
|
539 | ] | |
532 | } |
|
540 | } | |
533 | }, |
|
541 | }, | |
534 | } |
|
542 | } | |
535 | }, |
|
543 | }, | |
536 | "terms": {"field": "tags.server_name.values.keyword", "size": 999999}, |
|
544 | "terms": { | |
|
545 | "field": "tags.server_name.values.keyword", | |||
|
546 | "size": 999999, | |||
|
547 | }, | |||
537 | } |
|
548 | } | |
538 | }, |
|
549 | }, | |
539 | "query": { |
|
550 | "query": { | |
540 | "bool": { |
|
551 | "bool": { | |
541 | "filter": [ |
|
552 | "filter": [ | |
542 | { |
|
553 | { | |
543 | "terms": { |
|
554 | "terms": { | |
544 | "resource_id": [filter_settings["resource"][0]] |
|
555 | "resource_id": [filter_settings["resource"][0]] | |
545 | } |
|
556 | } | |
546 | }, |
|
557 | }, | |
547 | { |
|
558 | { | |
548 | "range": { |
|
559 | "range": { | |
549 | "timestamp": { |
|
560 | "timestamp": { | |
550 | "gte": filter_settings["start_date"], |
|
561 | "gte": filter_settings["start_date"], | |
551 | "lte": filter_settings["end_date"], |
|
562 | "lte": filter_settings["end_date"], | |
552 | } |
|
563 | } | |
553 | } |
|
564 | } | |
554 | }, |
|
565 | }, | |
555 | {"terms": {"namespace": ["appenlight.error"]}}, |
|
566 | {"terms": {"namespace": ["appenlight.error"]}}, | |
556 | ] |
|
567 | ] | |
557 | } |
|
568 | } | |
558 | }, |
|
569 | }, | |
559 | } |
|
570 | } | |
560 | result = Datastores.es.search( |
|
571 | result = Datastores.es.search( | |
561 | body=es_query, index=index_names, doc_type="log", size=0 |
|
572 | body=es_query, index=index_names, doc_type="log", size=0 | |
562 | ) |
|
573 | ) | |
563 | for bucket in result["aggregations"]["parent_agg"]["buckets"]: |
|
574 | for bucket in result["aggregations"]["parent_agg"]["buckets"]: | |
564 | report_series.append( |
|
575 | report_series.append( | |
565 | { |
|
576 | { | |
566 | "key": bucket["key"], |
|
577 | "key": bucket["key"], | |
567 | "errors": bucket["errors"]["sub_agg"]["value"], |
|
578 | "errors": bucket["errors"]["sub_agg"]["value"], | |
568 | } |
|
579 | } | |
569 | ) |
|
580 | ) | |
570 |
|
581 | |||
571 | stats = {} |
|
582 | stats = {} | |
572 | if UptimeMetricService is not None: |
|
583 | if UptimeMetricService is not None: | |
573 | uptime = UptimeMetricService.get_uptime_by_app( |
|
584 | uptime = UptimeMetricService.get_uptime_by_app( | |
574 | filter_settings["resource"][0], since_when=since_when, until=until |
|
585 | filter_settings["resource"][0], since_when=since_when, until=until | |
575 | ) |
|
586 | ) | |
576 | else: |
|
587 | else: | |
577 | uptime = 0 |
|
588 | uptime = 0 | |
578 |
|
589 | |||
579 | total_seconds = (until - since_when).total_seconds() |
|
590 | total_seconds = (until - since_when).total_seconds() | |
580 |
|
591 | |||
581 | for stat in requests_series: |
|
592 | for stat in requests_series: | |
582 | check_key(stat["key"], stats, uptime, total_seconds) |
|
593 | check_key(stat["key"], stats, uptime, total_seconds) | |
583 | stats[stat["key"]]["requests"] = int(stat["requests"]) |
|
594 | stats[stat["key"]]["requests"] = int(stat["requests"]) | |
584 | stats[stat["key"]]["response_time"] = stat["main"] |
|
595 | stats[stat["key"]]["response_time"] = stat["main"] | |
585 | stats[stat["key"]]["tolerated_requests"] = stat["tolerated"] |
|
596 | stats[stat["key"]]["tolerated_requests"] = stat["tolerated"] | |
586 | stats[stat["key"]]["frustrating_requests"] = stat["frustrating"] |
|
597 | stats[stat["key"]]["frustrating_requests"] = stat["frustrating"] | |
587 | for server in report_series: |
|
598 | for server in report_series: | |
588 | check_key(server["key"], stats, uptime, total_seconds) |
|
599 | check_key(server["key"], stats, uptime, total_seconds) | |
589 | stats[server["key"]]["errors"] = server["errors"] |
|
600 | stats[server["key"]]["errors"] = server["errors"] | |
590 |
|
601 | |||
591 | server_stats = list(stats.values()) |
|
602 | server_stats = list(stats.values()) | |
592 | for stat in server_stats: |
|
603 | for stat in server_stats: | |
593 | stat["satisfying_requests"] = ( |
|
604 | stat["satisfying_requests"] = ( | |
594 |
|
|
605 | stat["requests"] | |
595 |
|
|
606 | - stat["errors"] | |
596 |
|
|
607 | - stat["frustrating_requests"] | |
597 |
|
|
608 | - stat["tolerated_requests"] | |
598 | ) |
|
609 | ) | |
599 | if stat["satisfying_requests"] < 0: |
|
610 | if stat["satisfying_requests"] < 0: | |
600 | stat["satisfying_requests"] = 0 |
|
611 | stat["satisfying_requests"] = 0 | |
601 |
|
612 | |||
602 | if stat["requests"]: |
|
613 | if stat["requests"]: | |
603 | stat["avg_response_time"] = round( |
|
614 | stat["avg_response_time"] = round( | |
604 | stat["response_time"] / stat["requests"], 3 |
|
615 | stat["response_time"] / stat["requests"], 3 | |
605 | ) |
|
616 | ) | |
606 | qual_requests = ( |
|
617 | qual_requests = ( | |
607 |
|
|
618 | stat["satisfying_requests"] + stat["tolerated_requests"] / 2.0 | |
608 | ) |
|
619 | ) | |
609 | stat["apdex"] = round((qual_requests / stat["requests"]) * 100, 2) |
|
620 | stat["apdex"] = round((qual_requests / stat["requests"]) * 100, 2) | |
610 | stat["rpm"] = round(stat["requests"] / stat["total_minutes"], 2) |
|
621 | stat["rpm"] = round(stat["requests"] / stat["total_minutes"], 2) | |
611 |
|
622 | |||
612 | return sorted(server_stats, key=lambda x: x["name"]) |
|
623 | return sorted(server_stats, key=lambda x: x["name"]) |
@@ -1,182 +1,181 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 |
|
2 | |||
3 | # Copyright 2010 - 2017 RhodeCode GmbH and the AppEnlight project authors |
|
3 | # Copyright 2010 - 2017 RhodeCode GmbH and the AppEnlight project authors | |
4 | # |
|
4 | # | |
5 | # Licensed under the Apache License, Version 2.0 (the "License"); |
|
5 | # Licensed under the Apache License, Version 2.0 (the "License"); | |
6 | # you may not use this file except in compliance with the License. |
|
6 | # you may not use this file except in compliance with the License. | |
7 | # You may obtain a copy of the License at |
|
7 | # You may obtain a copy of the License at | |
8 | # |
|
8 | # | |
9 | # http://www.apache.org/licenses/LICENSE-2.0 |
|
9 | # http://www.apache.org/licenses/LICENSE-2.0 | |
10 | # |
|
10 | # | |
11 | # Unless required by applicable law or agreed to in writing, software |
|
11 | # Unless required by applicable law or agreed to in writing, software | |
12 | # distributed under the License is distributed on an "AS IS" BASIS, |
|
12 | # distributed under the License is distributed on an "AS IS" BASIS, | |
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
14 | # See the License for the specific language governing permissions and |
|
14 | # See the License for the specific language governing permissions and | |
15 | # limitations under the License. |
|
15 | # limitations under the License. | |
16 |
|
16 | |||
17 | from appenlight.models import get_db_session, Datastores |
|
17 | from appenlight.models import get_db_session, Datastores | |
18 | from appenlight.models.report import Report |
|
18 | from appenlight.models.report import Report | |
19 | from appenlight.models.services.base import BaseService |
|
19 | from appenlight.models.services.base import BaseService | |
20 | from appenlight.lib.utils import es_index_name_limiter |
|
20 | from appenlight.lib.utils import es_index_name_limiter | |
21 |
|
21 | |||
22 |
|
22 | |||
23 | class SlowCallService(BaseService): |
|
23 | class SlowCallService(BaseService): | |
24 | @classmethod |
|
24 | @classmethod | |
25 | def get_time_consuming_calls(cls, request, filter_settings, db_session=None): |
|
25 | def get_time_consuming_calls(cls, request, filter_settings, db_session=None): | |
26 | db_session = get_db_session(db_session) |
|
26 | db_session = get_db_session(db_session) | |
27 | # get slow calls from older partitions too |
|
27 | # get slow calls from older partitions too | |
28 | index_names = es_index_name_limiter( |
|
28 | index_names = es_index_name_limiter( | |
29 | start_date=filter_settings["start_date"], |
|
29 | start_date=filter_settings["start_date"], | |
30 | end_date=filter_settings["end_date"], |
|
30 | end_date=filter_settings["end_date"], | |
31 | ixtypes=["slow_calls"], |
|
31 | ixtypes=["slow_calls"], | |
32 | ) |
|
32 | ) | |
33 | if index_names and filter_settings["resource"]: |
|
33 | if index_names and filter_settings["resource"]: | |
34 | # get longest time taking hashes |
|
34 | # get longest time taking hashes | |
35 | es_query = { |
|
35 | es_query = { | |
36 | "aggs": { |
|
36 | "aggs": { | |
37 | "parent_agg": { |
|
37 | "parent_agg": { | |
38 | "aggs": { |
|
38 | "aggs": { | |
39 | "duration": { |
|
39 | "duration": { | |
40 | "aggs": { |
|
40 | "aggs": { | |
41 | "sub_agg": { |
|
41 | "sub_agg": { | |
42 | "sum": {"field": "tags.duration.numeric_values"} |
|
42 | "sum": {"field": "tags.duration.numeric_values"} | |
43 | } |
|
43 | } | |
44 | }, |
|
44 | }, | |
45 | "filter": { |
|
45 | "filter": { | |
46 | "exists": {"field": "tags.duration.numeric_values"} |
|
46 | "exists": {"field": "tags.duration.numeric_values"} | |
47 | }, |
|
47 | }, | |
48 | }, |
|
48 | }, | |
49 | "total": { |
|
49 | "total": { | |
50 | "aggs": { |
|
50 | "aggs": { | |
51 | "sub_agg": { |
|
51 | "sub_agg": { | |
52 | "value_count": { |
|
52 | "value_count": { | |
53 | "field": "tags.statement_hash.values.keyword" |
|
53 | "field": "tags.statement_hash.values.keyword" | |
54 | } |
|
54 | } | |
55 | } |
|
55 | } | |
56 | }, |
|
56 | }, | |
57 | "filter": { |
|
57 | "filter": { | |
58 | "exists": {"field": "tags.statement_hash.values"} |
|
58 | "exists": {"field": "tags.statement_hash.values"} | |
59 | }, |
|
59 | }, | |
60 | }, |
|
60 | }, | |
61 | }, |
|
61 | }, | |
62 | "terms": { |
|
62 | "terms": { | |
63 | "field": "tags.statement_hash.values.keyword", |
|
63 | "field": "tags.statement_hash.values.keyword", | |
64 | "order": {"duration>sub_agg": "desc"}, |
|
64 | "order": {"duration>sub_agg": "desc"}, | |
65 | "size": 15, |
|
65 | "size": 15, | |
66 | }, |
|
66 | }, | |
67 | } |
|
67 | } | |
68 | }, |
|
68 | }, | |
69 | "query": { |
|
69 | "query": { | |
70 | "bool": { |
|
70 | "bool": { | |
71 | "filter": [ |
|
71 | "filter": [ | |
72 | { |
|
72 | { | |
73 | "terms": { |
|
73 | "terms": { | |
74 | "resource_id": [filter_settings["resource"][0]] |
|
74 | "resource_id": [filter_settings["resource"][0]] | |
75 | } |
|
75 | } | |
76 | }, |
|
76 | }, | |
77 | { |
|
77 | { | |
78 | "range": { |
|
78 | "range": { | |
79 | "timestamp": { |
|
79 | "timestamp": { | |
80 | "gte": filter_settings["start_date"], |
|
80 | "gte": filter_settings["start_date"], | |
81 | "lte": filter_settings["end_date"], |
|
81 | "lte": filter_settings["end_date"], | |
82 | } |
|
82 | } | |
83 | } |
|
83 | } | |
84 | }, |
|
84 | }, | |
85 | ] |
|
85 | ] | |
86 | } |
|
86 | } | |
87 | }, |
|
87 | }, | |
88 | } |
|
88 | } | |
89 | result = Datastores.es.search( |
|
89 | result = Datastores.es.search( | |
90 | body=es_query, index=index_names, doc_type="log", size=0 |
|
90 | body=es_query, index=index_names, doc_type="log", size=0 | |
91 | ) |
|
91 | ) | |
92 | results = result["aggregations"]["parent_agg"]["buckets"] |
|
92 | results = result["aggregations"]["parent_agg"]["buckets"] | |
93 | else: |
|
93 | else: | |
94 | return [] |
|
94 | return [] | |
95 | hashes = [i["key"] for i in results] |
|
95 | hashes = [i["key"] for i in results] | |
96 |
|
96 | |||
97 | # get queries associated with hashes |
|
97 | # get queries associated with hashes | |
98 | calls_query = { |
|
98 | calls_query = { | |
99 | "aggs": { |
|
99 | "aggs": { | |
100 | "top_calls": { |
|
100 | "top_calls": { | |
101 | "terms": {"field": "tags.statement_hash.values.keyword", "size": 15}, |
|
101 | "terms": { | |
|
102 | "field": "tags.statement_hash.values.keyword", | |||
|
103 | "size": 15, | |||
|
104 | }, | |||
102 | "aggs": { |
|
105 | "aggs": { | |
103 | "top_calls_hits": { |
|
106 | "top_calls_hits": { | |
104 | "top_hits": {"sort": {"timestamp": "desc"}, "size": 5} |
|
107 | "top_hits": {"sort": {"timestamp": "desc"}, "size": 5} | |
105 | } |
|
108 | } | |
106 | }, |
|
109 | }, | |
107 | } |
|
110 | } | |
108 | }, |
|
111 | }, | |
109 | "query": { |
|
112 | "query": { | |
110 | "bool": { |
|
113 | "bool": { | |
111 | "filter": [ |
|
114 | "filter": [ | |
112 | { |
|
115 | {"terms": {"resource_id": [filter_settings["resource"][0]]}}, | |
113 | "terms": { |
|
|||
114 | "resource_id": [filter_settings["resource"][0]] |
|
|||
115 | } |
|
|||
116 | }, |
|
|||
117 | {"terms": {"tags.statement_hash.values": hashes}}, |
|
116 | {"terms": {"tags.statement_hash.values": hashes}}, | |
118 | { |
|
117 | { | |
119 | "range": { |
|
118 | "range": { | |
120 | "timestamp": { |
|
119 | "timestamp": { | |
121 | "gte": filter_settings["start_date"], |
|
120 | "gte": filter_settings["start_date"], | |
122 | "lte": filter_settings["end_date"], |
|
121 | "lte": filter_settings["end_date"], | |
123 | } |
|
122 | } | |
124 | } |
|
123 | } | |
125 | }, |
|
124 | }, | |
126 | ] |
|
125 | ] | |
127 | } |
|
126 | } | |
128 | }, |
|
127 | }, | |
129 | } |
|
128 | } | |
130 | calls = Datastores.es.search( |
|
129 | calls = Datastores.es.search( | |
131 | body=calls_query, index=index_names, doc_type="log", size=0 |
|
130 | body=calls_query, index=index_names, doc_type="log", size=0 | |
132 | ) |
|
131 | ) | |
133 | call_results = {} |
|
132 | call_results = {} | |
134 | report_ids = [] |
|
133 | report_ids = [] | |
135 | for call in calls["aggregations"]["top_calls"]["buckets"]: |
|
134 | for call in calls["aggregations"]["top_calls"]["buckets"]: | |
136 | hits = call["top_calls_hits"]["hits"]["hits"] |
|
135 | hits = call["top_calls_hits"]["hits"]["hits"] | |
137 | call_results[call["key"]] = [i["_source"] for i in hits] |
|
136 | call_results[call["key"]] = [i["_source"] for i in hits] | |
138 | report_ids.extend( |
|
137 | report_ids.extend( | |
139 | [i["_source"]["tags"]["report_id"]["values"] for i in hits] |
|
138 | [i["_source"]["tags"]["report_id"]["values"] for i in hits] | |
140 | ) |
|
139 | ) | |
141 | if report_ids: |
|
140 | if report_ids: | |
142 | r_query = db_session.query(Report.group_id, Report.id) |
|
141 | r_query = db_session.query(Report.group_id, Report.id) | |
143 | r_query = r_query.filter(Report.id.in_(report_ids)) |
|
142 | r_query = r_query.filter(Report.id.in_(report_ids)) | |
144 | r_query = r_query.filter(Report.start_time >= filter_settings["start_date"]) |
|
143 | r_query = r_query.filter(Report.start_time >= filter_settings["start_date"]) | |
145 | else: |
|
144 | else: | |
146 | r_query = [] |
|
145 | r_query = [] | |
147 | reports_reversed = {} |
|
146 | reports_reversed = {} | |
148 | for report in r_query: |
|
147 | for report in r_query: | |
149 | reports_reversed[report.id] = report.group_id |
|
148 | reports_reversed[report.id] = report.group_id | |
150 |
|
149 | |||
151 | final_results = [] |
|
150 | final_results = [] | |
152 | for item in results: |
|
151 | for item in results: | |
153 | if item["key"] not in call_results: |
|
152 | if item["key"] not in call_results: | |
154 | continue |
|
153 | continue | |
155 | call = call_results[item["key"]][0] |
|
154 | call = call_results[item["key"]][0] | |
156 | row = { |
|
155 | row = { | |
157 | "occurences": item["total"]["sub_agg"]["value"], |
|
156 | "occurences": item["total"]["sub_agg"]["value"], | |
158 | "total_duration": round(item["duration"]["sub_agg"]["value"]), |
|
157 | "total_duration": round(item["duration"]["sub_agg"]["value"]), | |
159 | "statement": call["message"], |
|
158 | "statement": call["message"], | |
160 | "statement_type": call["tags"]["type"]["values"], |
|
159 | "statement_type": call["tags"]["type"]["values"], | |
161 | "statement_subtype": call["tags"]["subtype"]["values"], |
|
160 | "statement_subtype": call["tags"]["subtype"]["values"], | |
162 | "statement_hash": item["key"], |
|
161 | "statement_hash": item["key"], | |
163 | "latest_details": [], |
|
162 | "latest_details": [], | |
164 | } |
|
163 | } | |
165 | if row["statement_type"] in ["tmpl", " remote"]: |
|
164 | if row["statement_type"] in ["tmpl", " remote"]: | |
166 | params = ( |
|
165 | params = ( | |
167 | call["tags"]["parameters"]["values"] |
|
166 | call["tags"]["parameters"]["values"] | |
168 | if "parameters" in call["tags"] |
|
167 | if "parameters" in call["tags"] | |
169 | else "" |
|
168 | else "" | |
170 | ) |
|
169 | ) | |
171 | row["statement"] = "{} ({})".format(call["message"], params) |
|
170 | row["statement"] = "{} ({})".format(call["message"], params) | |
172 | for call in call_results[item["key"]]: |
|
171 | for call in call_results[item["key"]]: | |
173 | report_id = call["tags"]["report_id"]["values"] |
|
172 | report_id = call["tags"]["report_id"]["values"] | |
174 | group_id = reports_reversed.get(report_id) |
|
173 | group_id = reports_reversed.get(report_id) | |
175 | if group_id: |
|
174 | if group_id: | |
176 | row["latest_details"].append( |
|
175 | row["latest_details"].append( | |
177 | {"group_id": group_id, "report_id": report_id} |
|
176 | {"group_id": group_id, "report_id": report_id} | |
178 | ) |
|
177 | ) | |
179 |
|
178 | |||
180 | final_results.append(row) |
|
179 | final_results.append(row) | |
181 |
|
180 | |||
182 | return final_results |
|
181 | return final_results |
@@ -1,572 +1,559 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 |
|
2 | |||
3 | # Copyright 2010 - 2017 RhodeCode GmbH and the AppEnlight project authors |
|
3 | # Copyright 2010 - 2017 RhodeCode GmbH and the AppEnlight project authors | |
4 | # |
|
4 | # | |
5 | # Licensed under the Apache License, Version 2.0 (the "License"); |
|
5 | # Licensed under the Apache License, Version 2.0 (the "License"); | |
6 | # you may not use this file except in compliance with the License. |
|
6 | # you may not use this file except in compliance with the License. | |
7 | # You may obtain a copy of the License at |
|
7 | # You may obtain a copy of the License at | |
8 | # |
|
8 | # | |
9 | # http://www.apache.org/licenses/LICENSE-2.0 |
|
9 | # http://www.apache.org/licenses/LICENSE-2.0 | |
10 | # |
|
10 | # | |
11 | # Unless required by applicable law or agreed to in writing, software |
|
11 | # Unless required by applicable law or agreed to in writing, software | |
12 | # distributed under the License is distributed on an "AS IS" BASIS, |
|
12 | # distributed under the License is distributed on an "AS IS" BASIS, | |
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
14 | # See the License for the specific language governing permissions and |
|
14 | # See the License for the specific language governing permissions and | |
15 | # limitations under the License. |
|
15 | # limitations under the License. | |
16 |
|
16 | |||
17 | import argparse |
|
17 | import argparse | |
18 | import datetime |
|
18 | import datetime | |
19 | import logging |
|
19 | import logging | |
20 | import copy |
|
20 | import copy | |
21 |
|
21 | |||
22 | import sqlalchemy as sa |
|
22 | import sqlalchemy as sa | |
23 | import elasticsearch.exceptions |
|
23 | import elasticsearch.exceptions | |
24 | import elasticsearch.helpers |
|
24 | import elasticsearch.helpers | |
25 |
|
25 | |||
26 | from collections import defaultdict |
|
26 | from collections import defaultdict | |
27 | from pyramid.paster import setup_logging |
|
27 | from pyramid.paster import setup_logging | |
28 | from pyramid.paster import bootstrap |
|
28 | from pyramid.paster import bootstrap | |
29 | from appenlight.models import DBSession, Datastores, metadata |
|
29 | from appenlight.models import DBSession, Datastores, metadata | |
30 | from appenlight.lib import get_callable |
|
30 | from appenlight.lib import get_callable | |
31 | from appenlight.models.report_group import ReportGroup |
|
31 | from appenlight.models.report_group import ReportGroup | |
32 | from appenlight.models.report import Report |
|
32 | from appenlight.models.report import Report | |
33 | from appenlight.models.report_stat import ReportStat |
|
33 | from appenlight.models.report_stat import ReportStat | |
34 | from appenlight.models.log import Log |
|
34 | from appenlight.models.log import Log | |
35 | from appenlight.models.slow_call import SlowCall |
|
35 | from appenlight.models.slow_call import SlowCall | |
36 | from appenlight.models.metric import Metric |
|
36 | from appenlight.models.metric import Metric | |
37 |
|
37 | |||
38 | log = logging.getLogger(__name__) |
|
38 | log = logging.getLogger(__name__) | |
39 |
|
39 | |||
40 | tables = { |
|
40 | tables = { | |
41 | "slow_calls_p_": [], |
|
41 | "slow_calls_p_": [], | |
42 | "reports_stats_p_": [], |
|
42 | "reports_stats_p_": [], | |
43 | "reports_p_": [], |
|
43 | "reports_p_": [], | |
44 | "reports_groups_p_": [], |
|
44 | "reports_groups_p_": [], | |
45 | "logs_p_": [], |
|
45 | "logs_p_": [], | |
46 | "metrics_p_": [], |
|
46 | "metrics_p_": [], | |
47 | } |
|
47 | } | |
48 |
|
48 | |||
49 |
|
49 | |||
50 | def detect_tables(table_prefix): |
|
50 | def detect_tables(table_prefix): | |
51 | found_tables = [] |
|
51 | found_tables = [] | |
52 | db_tables_query = """ |
|
52 | db_tables_query = """ | |
53 | SELECT tablename FROM pg_tables WHERE tablename NOT LIKE 'pg_%' AND |
|
53 | SELECT tablename FROM pg_tables WHERE tablename NOT LIKE 'pg_%' AND | |
54 | tablename NOT LIKE 'sql_%' ORDER BY tablename ASC;""" |
|
54 | tablename NOT LIKE 'sql_%' ORDER BY tablename ASC;""" | |
55 |
|
55 | |||
56 | for table in DBSession.execute(db_tables_query).fetchall(): |
|
56 | for table in DBSession.execute(db_tables_query).fetchall(): | |
57 | tablename = table.tablename |
|
57 | tablename = table.tablename | |
58 | if tablename.startswith(table_prefix): |
|
58 | if tablename.startswith(table_prefix): | |
59 | t = sa.Table( |
|
59 | t = sa.Table( | |
60 | tablename, metadata, autoload=True, autoload_with=DBSession.bind.engine |
|
60 | tablename, metadata, autoload=True, autoload_with=DBSession.bind.engine | |
61 | ) |
|
61 | ) | |
62 | found_tables.append(t) |
|
62 | found_tables.append(t) | |
63 | return found_tables |
|
63 | return found_tables | |
64 |
|
64 | |||
65 |
|
65 | |||
66 | def main(): |
|
66 | def main(): | |
67 | """ |
|
67 | """ | |
68 | Recreates Elasticsearch indexes |
|
68 | Recreates Elasticsearch indexes | |
69 | Performs reindex of whole db to Elasticsearch |
|
69 | Performs reindex of whole db to Elasticsearch | |
70 |
|
70 | |||
71 | """ |
|
71 | """ | |
72 |
|
72 | |||
73 | # need parser twice because we first need to load ini file |
|
73 | # need parser twice because we first need to load ini file | |
74 | # bootstrap pyramid and then load plugins |
|
74 | # bootstrap pyramid and then load plugins | |
75 | pre_parser = argparse.ArgumentParser( |
|
75 | pre_parser = argparse.ArgumentParser( | |
76 | description="Reindex AppEnlight data", add_help=False |
|
76 | description="Reindex AppEnlight data", add_help=False | |
77 | ) |
|
77 | ) | |
78 | pre_parser.add_argument( |
|
78 | pre_parser.add_argument( | |
79 | "-c", "--config", required=True, help="Configuration ini file of application" |
|
79 | "-c", "--config", required=True, help="Configuration ini file of application" | |
80 | ) |
|
80 | ) | |
81 | pre_parser.add_argument("-h", "--help", help="Show help", nargs="?") |
|
81 | pre_parser.add_argument("-h", "--help", help="Show help", nargs="?") | |
82 | pre_parser.add_argument( |
|
82 | pre_parser.add_argument( | |
83 | "-t", "--types", nargs="+", help="Which parts of database should get reindexed" |
|
83 | "-t", "--types", nargs="+", help="Which parts of database should get reindexed" | |
84 | ) |
|
84 | ) | |
85 | args = pre_parser.parse_args() |
|
85 | args = pre_parser.parse_args() | |
86 |
|
86 | |||
87 | config_uri = args.config |
|
87 | config_uri = args.config | |
88 | setup_logging(config_uri) |
|
88 | setup_logging(config_uri) | |
89 | log.setLevel(logging.INFO) |
|
89 | log.setLevel(logging.INFO) | |
90 | env = bootstrap(config_uri) |
|
90 | env = bootstrap(config_uri) | |
91 | parser = argparse.ArgumentParser(description="Reindex AppEnlight data") |
|
91 | parser = argparse.ArgumentParser(description="Reindex AppEnlight data") | |
92 | choices = { |
|
92 | choices = { | |
93 | "reports": "appenlight.scripts.reindex_elasticsearch:reindex_reports", |
|
93 | "reports": "appenlight.scripts.reindex_elasticsearch:reindex_reports", | |
94 | "logs": "appenlight.scripts.reindex_elasticsearch:reindex_logs", |
|
94 | "logs": "appenlight.scripts.reindex_elasticsearch:reindex_logs", | |
95 | "metrics": "appenlight.scripts.reindex_elasticsearch:reindex_metrics", |
|
95 | "metrics": "appenlight.scripts.reindex_elasticsearch:reindex_metrics", | |
96 | "slow_calls": "appenlight.scripts.reindex_elasticsearch:reindex_slow_calls", |
|
96 | "slow_calls": "appenlight.scripts.reindex_elasticsearch:reindex_slow_calls", | |
97 | "template": "appenlight.scripts.reindex_elasticsearch:update_template", |
|
97 | "template": "appenlight.scripts.reindex_elasticsearch:update_template", | |
98 | } |
|
98 | } | |
99 | for k, v in env["registry"].appenlight_plugins.items(): |
|
99 | for k, v in env["registry"].appenlight_plugins.items(): | |
100 | if v.get("fulltext_indexer"): |
|
100 | if v.get("fulltext_indexer"): | |
101 | choices[k] = v["fulltext_indexer"] |
|
101 | choices[k] = v["fulltext_indexer"] | |
102 | parser.add_argument( |
|
102 | parser.add_argument( | |
103 | "-t", |
|
103 | "-t", | |
104 | "--types", |
|
104 | "--types", | |
105 | nargs="*", |
|
105 | nargs="*", | |
106 | choices=["all"] + list(choices.keys()), |
|
106 | choices=["all"] + list(choices.keys()), | |
107 | default=[], |
|
107 | default=[], | |
108 | help="Which parts of database should get reindexed", |
|
108 | help="Which parts of database should get reindexed", | |
109 | ) |
|
109 | ) | |
110 | parser.add_argument( |
|
110 | parser.add_argument( | |
111 | "-c", "--config", required=True, help="Configuration ini file of application" |
|
111 | "-c", "--config", required=True, help="Configuration ini file of application" | |
112 | ) |
|
112 | ) | |
113 | args = parser.parse_args() |
|
113 | args = parser.parse_args() | |
114 |
|
114 | |||
115 | if "all" in args.types: |
|
115 | if "all" in args.types: | |
116 | args.types = list(choices.keys()) |
|
116 | args.types = list(choices.keys()) | |
117 |
|
117 | |||
118 | print("Selected types to reindex: {}".format(args.types)) |
|
118 | print("Selected types to reindex: {}".format(args.types)) | |
119 |
|
119 | |||
120 | log.info("settings {}".format(args.types)) |
|
120 | log.info("settings {}".format(args.types)) | |
121 |
|
121 | |||
122 | if "template" in args.types: |
|
122 | if "template" in args.types: | |
123 | get_callable(choices["template"])() |
|
123 | get_callable(choices["template"])() | |
124 | args.types.remove("template") |
|
124 | args.types.remove("template") | |
125 | for selected in args.types: |
|
125 | for selected in args.types: | |
126 | get_callable(choices[selected])() |
|
126 | get_callable(choices[selected])() | |
127 |
|
127 | |||
128 |
|
128 | |||
129 | def update_template(): |
|
129 | def update_template(): | |
130 | try: |
|
130 | try: | |
131 | Datastores.es.indices.delete_template("rcae_reports") |
|
131 | Datastores.es.indices.delete_template("rcae_reports") | |
132 | except elasticsearch.exceptions.NotFoundError as e: |
|
132 | except elasticsearch.exceptions.NotFoundError as e: | |
133 | log.error(e) |
|
133 | log.error(e) | |
134 |
|
134 | |||
135 | try: |
|
135 | try: | |
136 | Datastores.es.indices.delete_template("rcae_logs") |
|
136 | Datastores.es.indices.delete_template("rcae_logs") | |
137 | except elasticsearch.exceptions.NotFoundError as e: |
|
137 | except elasticsearch.exceptions.NotFoundError as e: | |
138 | log.error(e) |
|
138 | log.error(e) | |
139 | try: |
|
139 | try: | |
140 | Datastores.es.indices.delete_template("rcae_slow_calls") |
|
140 | Datastores.es.indices.delete_template("rcae_slow_calls") | |
141 | except elasticsearch.exceptions.NotFoundError as e: |
|
141 | except elasticsearch.exceptions.NotFoundError as e: | |
142 | log.error(e) |
|
142 | log.error(e) | |
143 | try: |
|
143 | try: | |
144 | Datastores.es.indices.delete_template("rcae_metrics") |
|
144 | Datastores.es.indices.delete_template("rcae_metrics") | |
145 | except elasticsearch.exceptions.NotFoundError as e: |
|
145 | except elasticsearch.exceptions.NotFoundError as e: | |
146 | log.error(e) |
|
146 | log.error(e) | |
147 | log.info("updating elasticsearch template") |
|
147 | log.info("updating elasticsearch template") | |
148 | tag_templates = [ |
|
148 | tag_templates = [ | |
149 | { |
|
149 | { | |
150 | "values": { |
|
150 | "values": { | |
151 | "path_match": "tags.*", |
|
151 | "path_match": "tags.*", | |
152 | "mapping": { |
|
152 | "mapping": { | |
153 | "type": "object", |
|
153 | "type": "object", | |
154 | "properties": { |
|
154 | "properties": { | |
155 |
"values": { |
|
155 | "values": { | |
|
156 | "type": "text", | |||
|
157 | "analyzer": "tag_value", | |||
156 |
|
|
158 | "fields": { | |
157 |
|
|
159 | "keyword": {"type": "keyword", "ignore_above": 256} | |
158 |
|
|
160 | }, | |
159 | "ignore_above": 256 |
|
161 | }, | |
160 | } |
|
|||
161 | }}, |
|
|||
162 | "numeric_values": {"type": "float"}, |
|
162 | "numeric_values": {"type": "float"}, | |
163 | }, |
|
163 | }, | |
164 | }, |
|
164 | }, | |
165 | } |
|
165 | } | |
166 | } |
|
166 | } | |
167 | ] |
|
167 | ] | |
168 |
|
168 | |||
169 | shared_analysis = { |
|
169 | shared_analysis = { | |
170 | "analyzer": { |
|
170 | "analyzer": { | |
171 | "url_path": { |
|
171 | "url_path": { | |
172 | "type": "custom", |
|
172 | "type": "custom", | |
173 | "char_filter": [], |
|
173 | "char_filter": [], | |
174 | "tokenizer": "path_hierarchy", |
|
174 | "tokenizer": "path_hierarchy", | |
175 | "filter": [], |
|
175 | "filter": [], | |
176 | }, |
|
176 | }, | |
177 | "tag_value": { |
|
177 | "tag_value": { | |
178 | "type": "custom", |
|
178 | "type": "custom", | |
179 | "char_filter": [], |
|
179 | "char_filter": [], | |
180 | "tokenizer": "keyword", |
|
180 | "tokenizer": "keyword", | |
181 | "filter": ["lowercase"], |
|
181 | "filter": ["lowercase"], | |
182 | }, |
|
182 | }, | |
183 | } |
|
183 | } | |
184 | } |
|
184 | } | |
185 |
|
185 | |||
186 | shared_log_mapping = { |
|
186 | shared_log_mapping = { | |
187 | "_all": {"enabled": False}, |
|
187 | "_all": {"enabled": False}, | |
188 | "dynamic_templates": tag_templates, |
|
188 | "dynamic_templates": tag_templates, | |
189 | "properties": { |
|
189 | "properties": { | |
190 | "pg_id": {"type": "keyword", "index": True}, |
|
190 | "pg_id": {"type": "keyword", "index": True}, | |
191 | "delete_hash": {"type": "keyword", "index": True}, |
|
191 | "delete_hash": {"type": "keyword", "index": True}, | |
192 | "resource_id": {"type": "integer"}, |
|
192 | "resource_id": {"type": "integer"}, | |
193 | "timestamp": {"type": "date"}, |
|
193 | "timestamp": {"type": "date"}, | |
194 | "permanent": {"type": "boolean"}, |
|
194 | "permanent": {"type": "boolean"}, | |
195 | "request_id": {"type": "keyword", "index": True}, |
|
195 | "request_id": {"type": "keyword", "index": True}, | |
196 | "log_level": {"type": "text", "analyzer": "simple"}, |
|
196 | "log_level": {"type": "text", "analyzer": "simple"}, | |
197 | "message": {"type": "text", "analyzer": "simple"}, |
|
197 | "message": {"type": "text", "analyzer": "simple"}, | |
198 | "namespace": { |
|
198 | "namespace": { | |
199 | "type": "text", |
|
199 | "type": "text", | |
200 | "fields": {"keyword": {"type": "keyword", "ignore_above": 256}}, |
|
200 | "fields": {"keyword": {"type": "keyword", "ignore_above": 256}}, | |
201 | }, |
|
201 | }, | |
202 | "tags": {"type": "object"}, |
|
202 | "tags": {"type": "object"}, | |
203 | "tag_list": {"type": "text", "analyzer": "tag_value", |
|
203 | "tag_list": { | |
204 |
|
|
204 | "type": "text", | |
205 | "keyword": { |
|
205 | "analyzer": "tag_value", | |
206 | "type": "keyword", |
|
206 | "fields": {"keyword": {"type": "keyword", "ignore_above": 256}}, | |
207 | "ignore_above": 256 |
|
207 | }, | |
208 | } |
|
|||
209 | }}, |
|
|||
210 | }, |
|
208 | }, | |
211 | } |
|
209 | } | |
212 |
|
210 | |||
213 | report_schema = { |
|
211 | report_schema = { | |
214 | "template": "rcae_r_*", |
|
212 | "template": "rcae_r_*", | |
215 | "settings": { |
|
213 | "settings": { | |
216 | "index": { |
|
214 | "index": { | |
217 | "refresh_interval": "5s", |
|
215 | "refresh_interval": "5s", | |
218 | "translog": {"sync_interval": "5s", "durability": "async"} |
|
216 | "translog": {"sync_interval": "5s", "durability": "async"}, | |
219 | }, |
|
217 | }, | |
220 | "number_of_shards": 5, |
|
218 | "number_of_shards": 5, | |
221 | "analysis": shared_analysis, |
|
219 | "analysis": shared_analysis, | |
222 | }, |
|
220 | }, | |
223 | "mappings": { |
|
221 | "mappings": { | |
224 | "report": { |
|
222 | "report": { | |
225 | "_all": {"enabled": False}, |
|
223 | "_all": {"enabled": False}, | |
226 | "dynamic_templates": tag_templates, |
|
224 | "dynamic_templates": tag_templates, | |
227 | "properties": { |
|
225 | "properties": { | |
228 | "type": {"type": "keyword", "index": True}, |
|
226 | "type": {"type": "keyword", "index": True}, | |
229 | # report group |
|
227 | # report group | |
230 | "group_id": {"type": "keyword", "index": True}, |
|
228 | "group_id": {"type": "keyword", "index": True}, | |
231 | "resource_id": {"type": "integer"}, |
|
229 | "resource_id": {"type": "integer"}, | |
232 | "priority": {"type": "integer"}, |
|
230 | "priority": {"type": "integer"}, | |
233 | "error": {"type": "text", "analyzer": "simple"}, |
|
231 | "error": {"type": "text", "analyzer": "simple"}, | |
234 | "read": {"type": "boolean"}, |
|
232 | "read": {"type": "boolean"}, | |
235 | "occurences": {"type": "integer"}, |
|
233 | "occurences": {"type": "integer"}, | |
236 | "fixed": {"type": "boolean"}, |
|
234 | "fixed": {"type": "boolean"}, | |
237 | "first_timestamp": {"type": "date"}, |
|
235 | "first_timestamp": {"type": "date"}, | |
238 | "last_timestamp": {"type": "date"}, |
|
236 | "last_timestamp": {"type": "date"}, | |
239 | "average_duration": {"type": "float"}, |
|
237 | "average_duration": {"type": "float"}, | |
240 | "summed_duration": {"type": "float"}, |
|
238 | "summed_duration": {"type": "float"}, | |
241 | "public": {"type": "boolean"}, |
|
239 | "public": {"type": "boolean"}, | |
242 | # report |
|
240 | # report | |
243 |
|
||||
244 | "report_id": {"type": "keyword", "index": True}, |
|
241 | "report_id": {"type": "keyword", "index": True}, | |
245 | "http_status": {"type": "integer"}, |
|
242 | "http_status": {"type": "integer"}, | |
246 | "ip": {"type": "keyword", "index": True}, |
|
243 | "ip": {"type": "keyword", "index": True}, | |
247 | "url_domain": {"type": "text", "analyzer": "simple"}, |
|
244 | "url_domain": {"type": "text", "analyzer": "simple"}, | |
248 | "url_path": {"type": "text", "analyzer": "url_path"}, |
|
245 | "url_path": {"type": "text", "analyzer": "url_path"}, | |
249 | "report_type": {"type": "integer"}, |
|
246 | "report_type": {"type": "integer"}, | |
250 | "start_time": {"type": "date"}, |
|
247 | "start_time": {"type": "date"}, | |
251 | "request_id": {"type": "keyword", "index": True}, |
|
248 | "request_id": {"type": "keyword", "index": True}, | |
252 | "end_time": {"type": "date"}, |
|
249 | "end_time": {"type": "date"}, | |
253 | "duration": {"type": "float"}, |
|
250 | "duration": {"type": "float"}, | |
254 | "tags": {"type": "object"}, |
|
251 | "tags": {"type": "object"}, | |
255 |
"tag_list": { |
|
252 | "tag_list": { | |
256 |
|
|
253 | "type": "text", | |
257 |
|
|
254 | "analyzer": "tag_value", | |
258 | "type": "keyword", |
|
255 | "fields": {"keyword": {"type": "keyword", "ignore_above": 256}}, | |
259 | "ignore_above": 256 |
|
256 | }, | |
260 | } |
|
|||
261 | }}, |
|
|||
262 | "extra": {"type": "object"}, |
|
257 | "extra": {"type": "object"}, | |
263 |
|
||||
264 | # report stats |
|
258 | # report stats | |
265 |
|
||||
266 | "report_stat_id": {"type": "keyword", "index": True}, |
|
259 | "report_stat_id": {"type": "keyword", "index": True}, | |
267 | "timestamp": {"type": "date"}, |
|
260 | "timestamp": {"type": "date"}, | |
268 | "permanent": {"type": "boolean"}, |
|
261 | "permanent": {"type": "boolean"}, | |
269 | "log_level": {"type": "text", "analyzer": "simple"}, |
|
262 | "log_level": {"type": "text", "analyzer": "simple"}, | |
270 | "message": {"type": "text", "analyzer": "simple"}, |
|
263 | "message": {"type": "text", "analyzer": "simple"}, | |
271 | "namespace": { |
|
264 | "namespace": { | |
272 | "type": "text", |
|
265 | "type": "text", | |
273 | "fields": {"keyword": {"type": "keyword", "ignore_above": 256}}, |
|
266 | "fields": {"keyword": {"type": "keyword", "ignore_above": 256}}, | |
274 | }, |
|
267 | }, | |
275 |
|
||||
276 | "join_field": { |
|
268 | "join_field": { | |
277 | "type": "join", |
|
269 | "type": "join", | |
278 | "relations": { |
|
270 | "relations": {"report_group": ["report", "report_stat"]}, | |
279 | "report_group": ["report", "report_stat"] |
|
271 | }, | |
280 | } |
|
|||
281 | } |
|
|||
282 |
|
||||
283 | }, |
|
272 | }, | |
284 | } |
|
273 | } | |
285 | } |
|
274 | }, | |
286 | } |
|
275 | } | |
287 |
|
276 | |||
288 | Datastores.es.indices.put_template("rcae_reports", body=report_schema) |
|
277 | Datastores.es.indices.put_template("rcae_reports", body=report_schema) | |
289 |
|
278 | |||
290 | logs_mapping = copy.deepcopy(shared_log_mapping) |
|
279 | logs_mapping = copy.deepcopy(shared_log_mapping) | |
291 | logs_mapping["properties"]["log_id"] = logs_mapping["properties"]["pg_id"] |
|
280 | logs_mapping["properties"]["log_id"] = logs_mapping["properties"]["pg_id"] | |
292 | del logs_mapping["properties"]["pg_id"] |
|
281 | del logs_mapping["properties"]["pg_id"] | |
293 |
|
282 | |||
294 | log_template = { |
|
283 | log_template = { | |
295 | "template": "rcae_l_*", |
|
284 | "template": "rcae_l_*", | |
296 | "settings": { |
|
285 | "settings": { | |
297 | "index": { |
|
286 | "index": { | |
298 | "refresh_interval": "5s", |
|
287 | "refresh_interval": "5s", | |
299 | "translog": {"sync_interval": "5s", "durability": "async"}, |
|
288 | "translog": {"sync_interval": "5s", "durability": "async"}, | |
300 | }, |
|
289 | }, | |
301 | "number_of_shards": 5, |
|
290 | "number_of_shards": 5, | |
302 | "analysis": shared_analysis, |
|
291 | "analysis": shared_analysis, | |
303 | }, |
|
292 | }, | |
304 | "mappings": { |
|
293 | "mappings": {"log": logs_mapping}, | |
305 | "log": logs_mapping, |
|
|||
306 | }, |
|
|||
307 | } |
|
294 | } | |
308 |
|
295 | |||
309 | Datastores.es.indices.put_template("rcae_logs", body=log_template) |
|
296 | Datastores.es.indices.put_template("rcae_logs", body=log_template) | |
310 |
|
297 | |||
311 | slow_call_mapping = copy.deepcopy(shared_log_mapping) |
|
298 | slow_call_mapping = copy.deepcopy(shared_log_mapping) | |
312 |
slow_call_mapping["properties"]["slow_call_id"] = slow_call_mapping["properties"][ |
|
299 | slow_call_mapping["properties"]["slow_call_id"] = slow_call_mapping["properties"][ | |
|
300 | "pg_id" | |||
|
301 | ] | |||
313 | del slow_call_mapping["properties"]["pg_id"] |
|
302 | del slow_call_mapping["properties"]["pg_id"] | |
314 |
|
303 | |||
315 | slow_call_template = { |
|
304 | slow_call_template = { | |
316 | "template": "rcae_sc_*", |
|
305 | "template": "rcae_sc_*", | |
317 | "settings": { |
|
306 | "settings": { | |
318 | "index": { |
|
307 | "index": { | |
319 | "refresh_interval": "5s", |
|
308 | "refresh_interval": "5s", | |
320 | "translog": {"sync_interval": "5s", "durability": "async"}, |
|
309 | "translog": {"sync_interval": "5s", "durability": "async"}, | |
321 | }, |
|
310 | }, | |
322 | "number_of_shards": 5, |
|
311 | "number_of_shards": 5, | |
323 | "analysis": shared_analysis, |
|
312 | "analysis": shared_analysis, | |
324 | }, |
|
313 | }, | |
325 | "mappings": { |
|
314 | "mappings": {"log": slow_call_mapping}, | |
326 | "log": slow_call_mapping, |
|
|||
327 | }, |
|
|||
328 | } |
|
315 | } | |
329 |
|
316 | |||
330 | Datastores.es.indices.put_template("rcae_slow_calls", body=slow_call_template) |
|
317 | Datastores.es.indices.put_template("rcae_slow_calls", body=slow_call_template) | |
331 |
|
318 | |||
332 | metric_mapping = copy.deepcopy(shared_log_mapping) |
|
319 | metric_mapping = copy.deepcopy(shared_log_mapping) | |
333 | metric_mapping["properties"]["metric_id"] = metric_mapping["properties"]["pg_id"] |
|
320 | metric_mapping["properties"]["metric_id"] = metric_mapping["properties"]["pg_id"] | |
334 | del metric_mapping["properties"]["pg_id"] |
|
321 | del metric_mapping["properties"]["pg_id"] | |
335 |
|
322 | |||
336 | metrics_template = { |
|
323 | metrics_template = { | |
337 | "template": "rcae_m_*", |
|
324 | "template": "rcae_m_*", | |
338 | "settings": { |
|
325 | "settings": { | |
339 | "index": { |
|
326 | "index": { | |
340 | "refresh_interval": "5s", |
|
327 | "refresh_interval": "5s", | |
341 | "translog": {"sync_interval": "5s", "durability": "async"}, |
|
328 | "translog": {"sync_interval": "5s", "durability": "async"}, | |
342 | }, |
|
329 | }, | |
343 | "number_of_shards": 5, |
|
330 | "number_of_shards": 5, | |
344 | "analysis": shared_analysis, |
|
331 | "analysis": shared_analysis, | |
345 | }, |
|
332 | }, | |
346 | "mappings": { |
|
333 | "mappings": {"log": metric_mapping}, | |
347 | "log": metric_mapping, |
|
|||
348 | }, |
|
|||
349 | } |
|
334 | } | |
350 |
|
335 | |||
351 | Datastores.es.indices.put_template("rcae_metrics", body=metrics_template) |
|
336 | Datastores.es.indices.put_template("rcae_metrics", body=metrics_template) | |
352 |
|
337 | |||
353 | uptime_metric_mapping = copy.deepcopy(shared_log_mapping) |
|
338 | uptime_metric_mapping = copy.deepcopy(shared_log_mapping) | |
354 |
uptime_metric_mapping["properties"]["uptime_id"] = uptime_metric_mapping[ |
|
339 | uptime_metric_mapping["properties"]["uptime_id"] = uptime_metric_mapping[ | |
|
340 | "properties" | |||
|
341 | ]["pg_id"] | |||
355 | del uptime_metric_mapping["properties"]["pg_id"] |
|
342 | del uptime_metric_mapping["properties"]["pg_id"] | |
356 |
|
343 | |||
357 | uptime_metrics_template = { |
|
344 | uptime_metrics_template = { | |
358 | "template": "rcae_uptime_ce_*", |
|
345 | "template": "rcae_uptime_ce_*", | |
359 | "settings": { |
|
346 | "settings": { | |
360 | "index": { |
|
347 | "index": { | |
361 | "refresh_interval": "5s", |
|
348 | "refresh_interval": "5s", | |
362 | "translog": {"sync_interval": "5s", "durability": "async"}, |
|
349 | "translog": {"sync_interval": "5s", "durability": "async"}, | |
363 | }, |
|
350 | }, | |
364 | "number_of_shards": 5, |
|
351 | "number_of_shards": 5, | |
365 | "analysis": shared_analysis, |
|
352 | "analysis": shared_analysis, | |
366 | }, |
|
353 | }, | |
367 | "mappings": { |
|
354 | "mappings": {"log": shared_log_mapping}, | |
368 | "log": shared_log_mapping, |
|
|||
369 | }, |
|
|||
370 | } |
|
355 | } | |
371 |
|
356 | |||
372 |
Datastores.es.indices.put_template( |
|
357 | Datastores.es.indices.put_template( | |
|
358 | "rcae_uptime_metrics", body=uptime_metrics_template | |||
|
359 | ) | |||
373 |
|
360 | |||
374 |
|
361 | |||
375 | def reindex_reports(): |
|
362 | def reindex_reports(): | |
376 | reports_groups_tables = detect_tables("reports_groups_p_") |
|
363 | reports_groups_tables = detect_tables("reports_groups_p_") | |
377 | try: |
|
364 | try: | |
378 | Datastores.es.indices.delete("`rcae_r_*") |
|
365 | Datastores.es.indices.delete("`rcae_r_*") | |
379 | except elasticsearch.exceptions.NotFoundError as e: |
|
366 | except elasticsearch.exceptions.NotFoundError as e: | |
380 | log.error(e) |
|
367 | log.error(e) | |
381 |
|
368 | |||
382 | log.info("reindexing report groups") |
|
369 | log.info("reindexing report groups") | |
383 | i = 0 |
|
370 | i = 0 | |
384 | task_start = datetime.datetime.now() |
|
371 | task_start = datetime.datetime.now() | |
385 | for partition_table in reports_groups_tables: |
|
372 | for partition_table in reports_groups_tables: | |
386 | conn = DBSession.connection().execution_options(stream_results=True) |
|
373 | conn = DBSession.connection().execution_options(stream_results=True) | |
387 | result = conn.execute(partition_table.select()) |
|
374 | result = conn.execute(partition_table.select()) | |
388 | while True: |
|
375 | while True: | |
389 | chunk = result.fetchmany(2000) |
|
376 | chunk = result.fetchmany(2000) | |
390 | if not chunk: |
|
377 | if not chunk: | |
391 | break |
|
378 | break | |
392 | es_docs = defaultdict(list) |
|
379 | es_docs = defaultdict(list) | |
393 | for row in chunk: |
|
380 | for row in chunk: | |
394 | i += 1 |
|
381 | i += 1 | |
395 | item = ReportGroup(**dict(list(row.items()))) |
|
382 | item = ReportGroup(**dict(list(row.items()))) | |
396 | d_range = item.partition_id |
|
383 | d_range = item.partition_id | |
397 | es_docs[d_range].append(item.es_doc()) |
|
384 | es_docs[d_range].append(item.es_doc()) | |
398 | if es_docs: |
|
385 | if es_docs: | |
399 | name = partition_table.name |
|
386 | name = partition_table.name | |
400 | log.info("round {}, {}".format(i, name)) |
|
387 | log.info("round {}, {}".format(i, name)) | |
401 | for k, v in es_docs.items(): |
|
388 | for k, v in es_docs.items(): | |
402 | to_update = {"_index": k, "_type": "report"} |
|
389 | to_update = {"_index": k, "_type": "report"} | |
403 | [i.update(to_update) for i in v] |
|
390 | [i.update(to_update) for i in v] | |
404 | elasticsearch.helpers.bulk(Datastores.es, v) |
|
391 | elasticsearch.helpers.bulk(Datastores.es, v) | |
405 |
|
392 | |||
406 | log.info("total docs {} {}".format(i, datetime.datetime.now() - task_start)) |
|
393 | log.info("total docs {} {}".format(i, datetime.datetime.now() - task_start)) | |
407 |
|
394 | |||
408 | i = 0 |
|
395 | i = 0 | |
409 | log.info("reindexing reports") |
|
396 | log.info("reindexing reports") | |
410 | task_start = datetime.datetime.now() |
|
397 | task_start = datetime.datetime.now() | |
411 | reports_tables = detect_tables("reports_p_") |
|
398 | reports_tables = detect_tables("reports_p_") | |
412 | for partition_table in reports_tables: |
|
399 | for partition_table in reports_tables: | |
413 | conn = DBSession.connection().execution_options(stream_results=True) |
|
400 | conn = DBSession.connection().execution_options(stream_results=True) | |
414 | result = conn.execute(partition_table.select()) |
|
401 | result = conn.execute(partition_table.select()) | |
415 | while True: |
|
402 | while True: | |
416 | chunk = result.fetchmany(2000) |
|
403 | chunk = result.fetchmany(2000) | |
417 | if not chunk: |
|
404 | if not chunk: | |
418 | break |
|
405 | break | |
419 | es_docs = defaultdict(list) |
|
406 | es_docs = defaultdict(list) | |
420 | for row in chunk: |
|
407 | for row in chunk: | |
421 | i += 1 |
|
408 | i += 1 | |
422 | item = Report(**dict(list(row.items()))) |
|
409 | item = Report(**dict(list(row.items()))) | |
423 | d_range = item.partition_id |
|
410 | d_range = item.partition_id | |
424 | es_docs[d_range].append(item.es_doc()) |
|
411 | es_docs[d_range].append(item.es_doc()) | |
425 | if es_docs: |
|
412 | if es_docs: | |
426 | name = partition_table.name |
|
413 | name = partition_table.name | |
427 | log.info("round {}, {}".format(i, name)) |
|
414 | log.info("round {}, {}".format(i, name)) | |
428 | for k, v in es_docs.items(): |
|
415 | for k, v in es_docs.items(): | |
429 | to_update = {"_index": k, "_type": "report"} |
|
416 | to_update = {"_index": k, "_type": "report"} | |
430 | [i.update(to_update) for i in v] |
|
417 | [i.update(to_update) for i in v] | |
431 | elasticsearch.helpers.bulk(Datastores.es, v) |
|
418 | elasticsearch.helpers.bulk(Datastores.es, v) | |
432 |
|
419 | |||
433 | log.info("total docs {} {}".format(i, datetime.datetime.now() - task_start)) |
|
420 | log.info("total docs {} {}".format(i, datetime.datetime.now() - task_start)) | |
434 |
|
421 | |||
435 | log.info("reindexing reports stats") |
|
422 | log.info("reindexing reports stats") | |
436 | i = 0 |
|
423 | i = 0 | |
437 | task_start = datetime.datetime.now() |
|
424 | task_start = datetime.datetime.now() | |
438 | reports_stats_tables = detect_tables("reports_stats_p_") |
|
425 | reports_stats_tables = detect_tables("reports_stats_p_") | |
439 | for partition_table in reports_stats_tables: |
|
426 | for partition_table in reports_stats_tables: | |
440 | conn = DBSession.connection().execution_options(stream_results=True) |
|
427 | conn = DBSession.connection().execution_options(stream_results=True) | |
441 | result = conn.execute(partition_table.select()) |
|
428 | result = conn.execute(partition_table.select()) | |
442 | while True: |
|
429 | while True: | |
443 | chunk = result.fetchmany(2000) |
|
430 | chunk = result.fetchmany(2000) | |
444 | if not chunk: |
|
431 | if not chunk: | |
445 | break |
|
432 | break | |
446 | es_docs = defaultdict(list) |
|
433 | es_docs = defaultdict(list) | |
447 | for row in chunk: |
|
434 | for row in chunk: | |
448 | rd = dict(list(row.items())) |
|
435 | rd = dict(list(row.items())) | |
449 | # remove legacy columns |
|
436 | # remove legacy columns | |
450 | # TODO: remove the column later |
|
437 | # TODO: remove the column later | |
451 | rd.pop("size", None) |
|
438 | rd.pop("size", None) | |
452 | item = ReportStat(**rd) |
|
439 | item = ReportStat(**rd) | |
453 | i += 1 |
|
440 | i += 1 | |
454 | d_range = item.partition_id |
|
441 | d_range = item.partition_id | |
455 | es_docs[d_range].append(item.es_doc()) |
|
442 | es_docs[d_range].append(item.es_doc()) | |
456 | if es_docs: |
|
443 | if es_docs: | |
457 | name = partition_table.name |
|
444 | name = partition_table.name | |
458 | log.info("round {}, {}".format(i, name)) |
|
445 | log.info("round {}, {}".format(i, name)) | |
459 | for k, v in es_docs.items(): |
|
446 | for k, v in es_docs.items(): | |
460 | to_update = {"_index": k, "_type": "report"} |
|
447 | to_update = {"_index": k, "_type": "report"} | |
461 | [i.update(to_update) for i in v] |
|
448 | [i.update(to_update) for i in v] | |
462 | elasticsearch.helpers.bulk(Datastores.es, v) |
|
449 | elasticsearch.helpers.bulk(Datastores.es, v) | |
463 |
|
450 | |||
464 | log.info("total docs {} {}".format(i, datetime.datetime.now() - task_start)) |
|
451 | log.info("total docs {} {}".format(i, datetime.datetime.now() - task_start)) | |
465 |
|
452 | |||
466 |
|
453 | |||
467 | def reindex_logs(): |
|
454 | def reindex_logs(): | |
468 | try: |
|
455 | try: | |
469 | Datastores.es.indices.delete("rcae_l_*") |
|
456 | Datastores.es.indices.delete("rcae_l_*") | |
470 | except elasticsearch.exceptions.NotFoundError as e: |
|
457 | except elasticsearch.exceptions.NotFoundError as e: | |
471 | log.error(e) |
|
458 | log.error(e) | |
472 |
|
459 | |||
473 | # logs |
|
460 | # logs | |
474 | log.info("reindexing logs") |
|
461 | log.info("reindexing logs") | |
475 | i = 0 |
|
462 | i = 0 | |
476 | task_start = datetime.datetime.now() |
|
463 | task_start = datetime.datetime.now() | |
477 | log_tables = detect_tables("logs_p_") |
|
464 | log_tables = detect_tables("logs_p_") | |
478 | for partition_table in log_tables: |
|
465 | for partition_table in log_tables: | |
479 | conn = DBSession.connection().execution_options(stream_results=True) |
|
466 | conn = DBSession.connection().execution_options(stream_results=True) | |
480 | result = conn.execute(partition_table.select()) |
|
467 | result = conn.execute(partition_table.select()) | |
481 | while True: |
|
468 | while True: | |
482 | chunk = result.fetchmany(2000) |
|
469 | chunk = result.fetchmany(2000) | |
483 | if not chunk: |
|
470 | if not chunk: | |
484 | break |
|
471 | break | |
485 | es_docs = defaultdict(list) |
|
472 | es_docs = defaultdict(list) | |
486 |
|
473 | |||
487 | for row in chunk: |
|
474 | for row in chunk: | |
488 | i += 1 |
|
475 | i += 1 | |
489 | item = Log(**dict(list(row.items()))) |
|
476 | item = Log(**dict(list(row.items()))) | |
490 | d_range = item.partition_id |
|
477 | d_range = item.partition_id | |
491 | es_docs[d_range].append(item.es_doc()) |
|
478 | es_docs[d_range].append(item.es_doc()) | |
492 | if es_docs: |
|
479 | if es_docs: | |
493 | name = partition_table.name |
|
480 | name = partition_table.name | |
494 | log.info("round {}, {}".format(i, name)) |
|
481 | log.info("round {}, {}".format(i, name)) | |
495 | for k, v in es_docs.items(): |
|
482 | for k, v in es_docs.items(): | |
496 | to_update = {"_index": k, "_type": "log"} |
|
483 | to_update = {"_index": k, "_type": "log"} | |
497 | [i.update(to_update) for i in v] |
|
484 | [i.update(to_update) for i in v] | |
498 | elasticsearch.helpers.bulk(Datastores.es, v) |
|
485 | elasticsearch.helpers.bulk(Datastores.es, v) | |
499 |
|
486 | |||
500 | log.info("total docs {} {}".format(i, datetime.datetime.now() - task_start)) |
|
487 | log.info("total docs {} {}".format(i, datetime.datetime.now() - task_start)) | |
501 |
|
488 | |||
502 |
|
489 | |||
503 | def reindex_metrics(): |
|
490 | def reindex_metrics(): | |
504 | try: |
|
491 | try: | |
505 | Datastores.es.indices.delete("rcae_m_*") |
|
492 | Datastores.es.indices.delete("rcae_m_*") | |
506 | except elasticsearch.exceptions.NotFoundError as e: |
|
493 | except elasticsearch.exceptions.NotFoundError as e: | |
507 | log.error(e) |
|
494 | log.error(e) | |
508 |
|
495 | |||
509 | log.info("reindexing applications metrics") |
|
496 | log.info("reindexing applications metrics") | |
510 | i = 0 |
|
497 | i = 0 | |
511 | task_start = datetime.datetime.now() |
|
498 | task_start = datetime.datetime.now() | |
512 | metric_tables = detect_tables("metrics_p_") |
|
499 | metric_tables = detect_tables("metrics_p_") | |
513 | for partition_table in metric_tables: |
|
500 | for partition_table in metric_tables: | |
514 | conn = DBSession.connection().execution_options(stream_results=True) |
|
501 | conn = DBSession.connection().execution_options(stream_results=True) | |
515 | result = conn.execute(partition_table.select()) |
|
502 | result = conn.execute(partition_table.select()) | |
516 | while True: |
|
503 | while True: | |
517 | chunk = result.fetchmany(2000) |
|
504 | chunk = result.fetchmany(2000) | |
518 | if not chunk: |
|
505 | if not chunk: | |
519 | break |
|
506 | break | |
520 | es_docs = defaultdict(list) |
|
507 | es_docs = defaultdict(list) | |
521 | for row in chunk: |
|
508 | for row in chunk: | |
522 | i += 1 |
|
509 | i += 1 | |
523 | item = Metric(**dict(list(row.items()))) |
|
510 | item = Metric(**dict(list(row.items()))) | |
524 | d_range = item.partition_id |
|
511 | d_range = item.partition_id | |
525 | es_docs[d_range].append(item.es_doc()) |
|
512 | es_docs[d_range].append(item.es_doc()) | |
526 | if es_docs: |
|
513 | if es_docs: | |
527 | name = partition_table.name |
|
514 | name = partition_table.name | |
528 | log.info("round {}, {}".format(i, name)) |
|
515 | log.info("round {}, {}".format(i, name)) | |
529 | for k, v in es_docs.items(): |
|
516 | for k, v in es_docs.items(): | |
530 | to_update = {"_index": k, "_type": "log"} |
|
517 | to_update = {"_index": k, "_type": "log"} | |
531 | [i.update(to_update) for i in v] |
|
518 | [i.update(to_update) for i in v] | |
532 | elasticsearch.helpers.bulk(Datastores.es, v) |
|
519 | elasticsearch.helpers.bulk(Datastores.es, v) | |
533 |
|
520 | |||
534 | log.info("total docs {} {}".format(i, datetime.datetime.now() - task_start)) |
|
521 | log.info("total docs {} {}".format(i, datetime.datetime.now() - task_start)) | |
535 |
|
522 | |||
536 |
|
523 | |||
537 | def reindex_slow_calls(): |
|
524 | def reindex_slow_calls(): | |
538 | try: |
|
525 | try: | |
539 | Datastores.es.indices.delete("rcae_sc_*") |
|
526 | Datastores.es.indices.delete("rcae_sc_*") | |
540 | except elasticsearch.exceptions.NotFoundError as e: |
|
527 | except elasticsearch.exceptions.NotFoundError as e: | |
541 | log.error(e) |
|
528 | log.error(e) | |
542 |
|
529 | |||
543 | log.info("reindexing slow calls") |
|
530 | log.info("reindexing slow calls") | |
544 | i = 0 |
|
531 | i = 0 | |
545 | task_start = datetime.datetime.now() |
|
532 | task_start = datetime.datetime.now() | |
546 | slow_calls_tables = detect_tables("slow_calls_p_") |
|
533 | slow_calls_tables = detect_tables("slow_calls_p_") | |
547 | for partition_table in slow_calls_tables: |
|
534 | for partition_table in slow_calls_tables: | |
548 | conn = DBSession.connection().execution_options(stream_results=True) |
|
535 | conn = DBSession.connection().execution_options(stream_results=True) | |
549 | result = conn.execute(partition_table.select()) |
|
536 | result = conn.execute(partition_table.select()) | |
550 | while True: |
|
537 | while True: | |
551 | chunk = result.fetchmany(2000) |
|
538 | chunk = result.fetchmany(2000) | |
552 | if not chunk: |
|
539 | if not chunk: | |
553 | break |
|
540 | break | |
554 | es_docs = defaultdict(list) |
|
541 | es_docs = defaultdict(list) | |
555 | for row in chunk: |
|
542 | for row in chunk: | |
556 | i += 1 |
|
543 | i += 1 | |
557 | item = SlowCall(**dict(list(row.items()))) |
|
544 | item = SlowCall(**dict(list(row.items()))) | |
558 | d_range = item.partition_id |
|
545 | d_range = item.partition_id | |
559 | es_docs[d_range].append(item.es_doc()) |
|
546 | es_docs[d_range].append(item.es_doc()) | |
560 | if es_docs: |
|
547 | if es_docs: | |
561 | name = partition_table.name |
|
548 | name = partition_table.name | |
562 | log.info("round {}, {}".format(i, name)) |
|
549 | log.info("round {}, {}".format(i, name)) | |
563 | for k, v in es_docs.items(): |
|
550 | for k, v in es_docs.items(): | |
564 | to_update = {"_index": k, "_type": "log"} |
|
551 | to_update = {"_index": k, "_type": "log"} | |
565 | [i.update(to_update) for i in v] |
|
552 | [i.update(to_update) for i in v] | |
566 | elasticsearch.helpers.bulk(Datastores.es, v) |
|
553 | elasticsearch.helpers.bulk(Datastores.es, v) | |
567 |
|
554 | |||
568 | log.info("total docs {} {}".format(i, datetime.datetime.now() - task_start)) |
|
555 | log.info("total docs {} {}".format(i, datetime.datetime.now() - task_start)) | |
569 |
|
556 | |||
570 |
|
557 | |||
571 | if __name__ == "__main__": |
|
558 | if __name__ == "__main__": | |
572 | main() |
|
559 | main() |
@@ -1,220 +1,208 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 |
|
2 | |||
3 | # Copyright 2010 - 2017 RhodeCode GmbH and the AppEnlight project authors |
|
3 | # Copyright 2010 - 2017 RhodeCode GmbH and the AppEnlight project authors | |
4 | # |
|
4 | # | |
5 | # Licensed under the Apache License, Version 2.0 (the "License"); |
|
5 | # Licensed under the Apache License, Version 2.0 (the "License"); | |
6 | # you may not use this file except in compliance with the License. |
|
6 | # you may not use this file except in compliance with the License. | |
7 | # You may obtain a copy of the License at |
|
7 | # You may obtain a copy of the License at | |
8 | # |
|
8 | # | |
9 | # http://www.apache.org/licenses/LICENSE-2.0 |
|
9 | # http://www.apache.org/licenses/LICENSE-2.0 | |
10 | # |
|
10 | # | |
11 | # Unless required by applicable law or agreed to in writing, software |
|
11 | # Unless required by applicable law or agreed to in writing, software | |
12 | # distributed under the License is distributed on an "AS IS" BASIS, |
|
12 | # distributed under the License is distributed on an "AS IS" BASIS, | |
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
14 | # See the License for the specific language governing permissions and |
|
14 | # See the License for the specific language governing permissions and | |
15 | # limitations under the License. |
|
15 | # limitations under the License. | |
16 |
|
16 | |||
17 | import logging |
|
17 | import logging | |
18 | from datetime import datetime, timedelta |
|
18 | from datetime import datetime, timedelta | |
19 |
|
19 | |||
20 | from pyramid.view import view_config |
|
20 | from pyramid.view import view_config | |
21 | from pyramid.httpexceptions import HTTPUnprocessableEntity |
|
21 | from pyramid.httpexceptions import HTTPUnprocessableEntity | |
22 | from appenlight.models import Datastores, Log |
|
22 | from appenlight.models import Datastores, Log | |
23 | from appenlight.models.services.log import LogService |
|
23 | from appenlight.models.services.log import LogService | |
24 | from appenlight.lib.utils import ( |
|
24 | from appenlight.lib.utils import ( | |
25 | build_filter_settings_from_query_dict, |
|
25 | build_filter_settings_from_query_dict, | |
26 | es_index_name_limiter, |
|
26 | es_index_name_limiter, | |
27 | ) |
|
27 | ) | |
28 | from appenlight.lib.helpers import gen_pagination_headers |
|
28 | from appenlight.lib.helpers import gen_pagination_headers | |
29 | from appenlight.celery.tasks import logs_cleanup |
|
29 | from appenlight.celery.tasks import logs_cleanup | |
30 |
|
30 | |||
31 | log = logging.getLogger(__name__) |
|
31 | log = logging.getLogger(__name__) | |
32 |
|
32 | |||
33 | section_filters_key = "appenlight:logs:filter:%s" |
|
33 | section_filters_key = "appenlight:logs:filter:%s" | |
34 |
|
34 | |||
35 |
|
35 | |||
36 | @view_config(route_name="logs_no_id", renderer="json", permission="authenticated") |
|
36 | @view_config(route_name="logs_no_id", renderer="json", permission="authenticated") | |
37 | def fetch_logs(request): |
|
37 | def fetch_logs(request): | |
38 | """ |
|
38 | """ | |
39 | Returns list of log entries from Elasticsearch |
|
39 | Returns list of log entries from Elasticsearch | |
40 | """ |
|
40 | """ | |
41 |
|
41 | |||
42 | filter_settings = build_filter_settings_from_query_dict( |
|
42 | filter_settings = build_filter_settings_from_query_dict( | |
43 | request, request.GET.mixed() |
|
43 | request, request.GET.mixed() | |
44 | ) |
|
44 | ) | |
45 | logs_paginator = LogService.get_paginator_by_app_ids( |
|
45 | logs_paginator = LogService.get_paginator_by_app_ids( | |
46 | app_ids=filter_settings["resource"], |
|
46 | app_ids=filter_settings["resource"], | |
47 | page=filter_settings["page"], |
|
47 | page=filter_settings["page"], | |
48 | filter_settings=filter_settings, |
|
48 | filter_settings=filter_settings, | |
49 | ) |
|
49 | ) | |
50 | headers = gen_pagination_headers(request, logs_paginator) |
|
50 | headers = gen_pagination_headers(request, logs_paginator) | |
51 | request.response.headers.update(headers) |
|
51 | request.response.headers.update(headers) | |
52 |
|
52 | |||
53 | return [l.get_dict() for l in logs_paginator.sa_items] |
|
53 | return [l.get_dict() for l in logs_paginator.sa_items] | |
54 |
|
54 | |||
55 |
|
55 | |||
56 | @view_config( |
|
56 | @view_config( | |
57 | route_name="section_view", |
|
57 | route_name="section_view", | |
58 | match_param=["section=logs_section", "view=fetch_series"], |
|
58 | match_param=["section=logs_section", "view=fetch_series"], | |
59 | renderer="json", |
|
59 | renderer="json", | |
60 | permission="authenticated", |
|
60 | permission="authenticated", | |
61 | ) |
|
61 | ) | |
62 | def logs_fetch_series(request): |
|
62 | def logs_fetch_series(request): | |
63 | """ |
|
63 | """ | |
64 | Handles metric dashboard graphs |
|
64 | Handles metric dashboard graphs | |
65 | Returns information for time/tier breakdown |
|
65 | Returns information for time/tier breakdown | |
66 | """ |
|
66 | """ | |
67 | filter_settings = build_filter_settings_from_query_dict( |
|
67 | filter_settings = build_filter_settings_from_query_dict( | |
68 | request, request.GET.mixed() |
|
68 | request, request.GET.mixed() | |
69 | ) |
|
69 | ) | |
70 | paginator = LogService.get_paginator_by_app_ids( |
|
70 | paginator = LogService.get_paginator_by_app_ids( | |
71 | app_ids=filter_settings["resource"], |
|
71 | app_ids=filter_settings["resource"], | |
72 | page=1, |
|
72 | page=1, | |
73 | filter_settings=filter_settings, |
|
73 | filter_settings=filter_settings, | |
74 | items_per_page=1, |
|
74 | items_per_page=1, | |
75 | ) |
|
75 | ) | |
76 | now = datetime.utcnow().replace(microsecond=0, second=0) |
|
76 | now = datetime.utcnow().replace(microsecond=0, second=0) | |
77 | delta = timedelta(days=7) |
|
77 | delta = timedelta(days=7) | |
78 | if paginator.sa_items: |
|
78 | if paginator.sa_items: | |
79 | start_date = paginator.sa_items[-1].timestamp.replace(microsecond=0, second=0) |
|
79 | start_date = paginator.sa_items[-1].timestamp.replace(microsecond=0, second=0) | |
80 | filter_settings["start_date"] = start_date - delta |
|
80 | filter_settings["start_date"] = start_date - delta | |
81 | else: |
|
81 | else: | |
82 | filter_settings["start_date"] = now - delta |
|
82 | filter_settings["start_date"] = now - delta | |
83 | filter_settings["end_date"] = filter_settings["start_date"] + timedelta(days=7) |
|
83 | filter_settings["end_date"] = filter_settings["start_date"] + timedelta(days=7) | |
84 |
|
84 | |||
85 | @request.registry.cache_regions.redis_sec_30.cache_on_arguments("logs_graphs") |
|
85 | @request.registry.cache_regions.redis_sec_30.cache_on_arguments("logs_graphs") | |
86 | def cached(apps, search_params, delta, now): |
|
86 | def cached(apps, search_params, delta, now): | |
87 | data = LogService.get_time_series_aggregate( |
|
87 | data = LogService.get_time_series_aggregate( | |
88 | filter_settings["resource"], filter_settings |
|
88 | filter_settings["resource"], filter_settings | |
89 | ) |
|
89 | ) | |
90 | if not data: |
|
90 | if not data: | |
91 | return [] |
|
91 | return [] | |
92 | buckets = data["aggregations"]["events_over_time"]["buckets"] |
|
92 | buckets = data["aggregations"]["events_over_time"]["buckets"] | |
93 | return [ |
|
93 | return [ | |
94 | { |
|
94 | { | |
95 | "x": datetime.utcfromtimestamp(item["key"] / 1000), |
|
95 | "x": datetime.utcfromtimestamp(item["key"] / 1000), | |
96 | "logs": item["doc_count"], |
|
96 | "logs": item["doc_count"], | |
97 | } |
|
97 | } | |
98 | for item in buckets |
|
98 | for item in buckets | |
99 | ] |
|
99 | ] | |
100 |
|
100 | |||
101 | return cached(filter_settings, request.GET.mixed(), delta, now) |
|
101 | return cached(filter_settings, request.GET.mixed(), delta, now) | |
102 |
|
102 | |||
103 |
|
103 | |||
104 | @view_config( |
|
104 | @view_config( | |
105 | route_name="logs_no_id", |
|
105 | route_name="logs_no_id", | |
106 | renderer="json", |
|
106 | renderer="json", | |
107 | request_method="DELETE", |
|
107 | request_method="DELETE", | |
108 | permission="authenticated", |
|
108 | permission="authenticated", | |
109 | ) |
|
109 | ) | |
110 | def logs_mass_delete(request): |
|
110 | def logs_mass_delete(request): | |
111 | params = request.GET.mixed() |
|
111 | params = request.GET.mixed() | |
112 | if "resource" not in params: |
|
112 | if "resource" not in params: | |
113 | raise HTTPUnprocessableEntity() |
|
113 | raise HTTPUnprocessableEntity() | |
114 | # this might be '' and then colander will not validate the schema |
|
114 | # this might be '' and then colander will not validate the schema | |
115 | if not params.get("namespace"): |
|
115 | if not params.get("namespace"): | |
116 | params.pop("namespace", None) |
|
116 | params.pop("namespace", None) | |
117 | filter_settings = build_filter_settings_from_query_dict( |
|
117 | filter_settings = build_filter_settings_from_query_dict( | |
118 | request, params, resource_permissions=["update_reports"] |
|
118 | request, params, resource_permissions=["update_reports"] | |
119 | ) |
|
119 | ) | |
120 |
|
120 | |||
121 | resource_id = list(filter_settings["resource"])[0] |
|
121 | resource_id = list(filter_settings["resource"])[0] | |
122 | # filter settings returns list of all of users applications |
|
122 | # filter settings returns list of all of users applications | |
123 | # if app is not matching - normally we would not care as its used for search |
|
123 | # if app is not matching - normally we would not care as its used for search | |
124 | # but here user playing with params would possibly wipe out their whole data |
|
124 | # but here user playing with params would possibly wipe out their whole data | |
125 | if int(resource_id) != int(params["resource"]): |
|
125 | if int(resource_id) != int(params["resource"]): | |
126 | raise HTTPUnprocessableEntity() |
|
126 | raise HTTPUnprocessableEntity() | |
127 |
|
127 | |||
128 | logs_cleanup.delay(resource_id, filter_settings) |
|
128 | logs_cleanup.delay(resource_id, filter_settings) | |
129 | msg = ( |
|
129 | msg = ( | |
130 | "Log cleanup process started - it may take a while for " |
|
130 | "Log cleanup process started - it may take a while for " | |
131 | "everything to get removed" |
|
131 | "everything to get removed" | |
132 | ) |
|
132 | ) | |
133 | request.session.flash(msg) |
|
133 | request.session.flash(msg) | |
134 | return {} |
|
134 | return {} | |
135 |
|
135 | |||
136 |
|
136 | |||
137 | @view_config( |
|
137 | @view_config( | |
138 | route_name="section_view", |
|
138 | route_name="section_view", | |
139 | match_param=("view=common_tags", "section=logs_section"), |
|
139 | match_param=("view=common_tags", "section=logs_section"), | |
140 | renderer="json", |
|
140 | renderer="json", | |
141 | permission="authenticated", |
|
141 | permission="authenticated", | |
142 | ) |
|
142 | ) | |
143 | def common_tags(request): |
|
143 | def common_tags(request): | |
144 | config = request.GET.mixed() |
|
144 | config = request.GET.mixed() | |
145 | filter_settings = build_filter_settings_from_query_dict(request, config) |
|
145 | filter_settings = build_filter_settings_from_query_dict(request, config) | |
146 |
|
146 | |||
147 | resources = list(filter_settings["resource"]) |
|
147 | resources = list(filter_settings["resource"]) | |
148 | query = { |
|
148 | query = { | |
149 | "query": { |
|
149 | "query": {"bool": {"filter": [{"terms": {"resource_id": list(resources)}}]}} | |
150 | "bool": { |
|
|||
151 | "filter": [{"terms": {"resource_id": list(resources)}}] |
|
|||
152 | } |
|
|||
153 | } |
|
|||
154 | } |
|
150 | } | |
155 | start_date = filter_settings.get("start_date") |
|
151 | start_date = filter_settings.get("start_date") | |
156 | end_date = filter_settings.get("end_date") |
|
152 | end_date = filter_settings.get("end_date") | |
157 | filter_part = query["query"]["bool"]["filter"] |
|
153 | filter_part = query["query"]["bool"]["filter"] | |
158 |
|
154 | |||
159 | date_range = {"range": {"timestamp": {}}} |
|
155 | date_range = {"range": {"timestamp": {}}} | |
160 | if start_date: |
|
156 | if start_date: | |
161 | date_range["range"]["timestamp"]["gte"] = start_date |
|
157 | date_range["range"]["timestamp"]["gte"] = start_date | |
162 | if end_date: |
|
158 | if end_date: | |
163 | date_range["range"]["timestamp"]["lte"] = end_date |
|
159 | date_range["range"]["timestamp"]["lte"] = end_date | |
164 | if start_date or end_date: |
|
160 | if start_date or end_date: | |
165 | filter_part.append(date_range) |
|
161 | filter_part.append(date_range) | |
166 |
|
162 | |||
167 | levels = filter_settings.get("level") |
|
163 | levels = filter_settings.get("level") | |
168 | if levels: |
|
164 | if levels: | |
169 | filter_part.append({"terms": {"log_level": levels}}) |
|
165 | filter_part.append({"terms": {"log_level": levels}}) | |
170 | namespaces = filter_settings.get("namespace") |
|
166 | namespaces = filter_settings.get("namespace") | |
171 | if namespaces: |
|
167 | if namespaces: | |
172 | filter_part.append({"terms": {"namespace": namespaces}}) |
|
168 | filter_part.append({"terms": {"namespace": namespaces}}) | |
173 |
|
169 | |||
174 | query["aggs"] = {"sub_agg": {"terms": {"field": "tag_list.keyword", "size": 50}}} |
|
170 | query["aggs"] = {"sub_agg": {"terms": {"field": "tag_list.keyword", "size": 50}}} | |
175 | # tags |
|
171 | # tags | |
176 | index_names = es_index_name_limiter(ixtypes=[config.get("datasource", "logs")]) |
|
172 | index_names = es_index_name_limiter(ixtypes=[config.get("datasource", "logs")]) | |
177 | result = Datastores.es.search(body=query, index=index_names, doc_type="log", size=0) |
|
173 | result = Datastores.es.search(body=query, index=index_names, doc_type="log", size=0) | |
178 | tag_buckets = result["aggregations"]["sub_agg"].get("buckets", []) |
|
174 | tag_buckets = result["aggregations"]["sub_agg"].get("buckets", []) | |
179 | # namespaces |
|
175 | # namespaces | |
180 | query["aggs"] = {"sub_agg": {"terms": {"field": "namespace.keyword", "size": 50}}} |
|
176 | query["aggs"] = {"sub_agg": {"terms": {"field": "namespace.keyword", "size": 50}}} | |
181 | result = Datastores.es.search(body=query, index=index_names, doc_type="log", size=0) |
|
177 | result = Datastores.es.search(body=query, index=index_names, doc_type="log", size=0) | |
182 | namespaces_buckets = result["aggregations"]["sub_agg"].get("buckets", []) |
|
178 | namespaces_buckets = result["aggregations"]["sub_agg"].get("buckets", []) | |
183 | return { |
|
179 | return { | |
184 | "tags": [item["key"] for item in tag_buckets], |
|
180 | "tags": [item["key"] for item in tag_buckets], | |
185 | "namespaces": [item["key"] for item in namespaces_buckets], |
|
181 | "namespaces": [item["key"] for item in namespaces_buckets], | |
186 | } |
|
182 | } | |
187 |
|
183 | |||
188 |
|
184 | |||
189 | @view_config( |
|
185 | @view_config( | |
190 | route_name="section_view", |
|
186 | route_name="section_view", | |
191 | match_param=("view=common_values", "section=logs_section"), |
|
187 | match_param=("view=common_values", "section=logs_section"), | |
192 | renderer="json", |
|
188 | renderer="json", | |
193 | permission="authenticated", |
|
189 | permission="authenticated", | |
194 | ) |
|
190 | ) | |
195 | def common_values(request): |
|
191 | def common_values(request): | |
196 | config = request.GET.mixed() |
|
192 | config = request.GET.mixed() | |
197 | datasource = config.pop("datasource", "logs") |
|
193 | datasource = config.pop("datasource", "logs") | |
198 | filter_settings = build_filter_settings_from_query_dict(request, config) |
|
194 | filter_settings = build_filter_settings_from_query_dict(request, config) | |
199 | resources = list(filter_settings["resource"]) |
|
195 | resources = list(filter_settings["resource"]) | |
200 | tag_name = filter_settings["tags"][0]["value"][0] |
|
196 | tag_name = filter_settings["tags"][0]["value"][0] | |
201 |
|
197 | |||
202 | and_part = [ |
|
198 | and_part = [{"terms": {"resource_id": list(resources)}}] | |
203 | {"terms": {"resource_id": list(resources)}}, |
|
|||
204 | ] |
|
|||
205 | if filter_settings["namespace"]: |
|
199 | if filter_settings["namespace"]: | |
206 | and_part.append({"terms": {"namespace": filter_settings["namespace"]}}) |
|
200 | and_part.append({"terms": {"namespace": filter_settings["namespace"]}}) | |
207 | query = { |
|
201 | query = {"query": {"bool": {"filter": and_part}}} | |
208 | "query": { |
|
|||
209 | "bool": { |
|
|||
210 | "filter": and_part |
|
|||
211 | } |
|
|||
212 | } |
|
|||
213 | } |
|
|||
214 | query["aggs"] = { |
|
202 | query["aggs"] = { | |
215 | "sub_agg": {"terms": {"field": "tags.{}.values".format(tag_name), "size": 50}} |
|
203 | "sub_agg": {"terms": {"field": "tags.{}.values".format(tag_name), "size": 50}} | |
216 | } |
|
204 | } | |
217 | index_names = es_index_name_limiter(ixtypes=[datasource]) |
|
205 | index_names = es_index_name_limiter(ixtypes=[datasource]) | |
218 | result = Datastores.es.search(body=query, index=index_names, doc_type="log", size=0) |
|
206 | result = Datastores.es.search(body=query, index=index_names, doc_type="log", size=0) | |
219 | values_buckets = result["aggregations"]["sub_agg"].get("buckets", []) |
|
207 | values_buckets = result["aggregations"]["sub_agg"].get("buckets", []) | |
220 | return {"values": [item["key"] for item in values_buckets]} |
|
208 | return {"values": [item["key"] for item in values_buckets]} |
General Comments 4
Auto status change to "Under Review"
You need to be logged in to leave comments.
Login now