##// END OF EJS Templates
request_metric: fix wrong key
ergo -
Show More
@@ -1,623 +1,623 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright 2010 - 2017 RhodeCode GmbH and the AppEnlight project authors
4 4 #
5 5 # Licensed under the Apache License, Version 2.0 (the "License");
6 6 # you may not use this file except in compliance with the License.
7 7 # You may obtain a copy of the License at
8 8 #
9 9 # http://www.apache.org/licenses/LICENSE-2.0
10 10 #
11 11 # Unless required by applicable law or agreed to in writing, software
12 12 # distributed under the License is distributed on an "AS IS" BASIS,
13 13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 14 # See the License for the specific language governing permissions and
15 15 # limitations under the License.
16 16
17 17 from datetime import datetime
18 18
19 19 import appenlight.lib.helpers as h
20 20 from appenlight.models import get_db_session, Datastores
21 21 from appenlight.models.services.base import BaseService
22 22 from appenlight.lib.enums import ReportType
23 23 from appenlight.lib.utils import es_index_name_limiter
24 24
25 25 try:
26 26 from ae_uptime_ce.models.services.uptime_metric import UptimeMetricService
27 27 except ImportError:
28 28 UptimeMetricService = None
29 29
30 30
31 31 def check_key(key, stats, uptime, total_seconds):
32 32 if key not in stats:
33 33 stats[key] = {
34 34 "name": key,
35 35 "requests": 0,
36 36 "errors": 0,
37 37 "tolerated_requests": 0,
38 38 "frustrating_requests": 0,
39 39 "satisfying_requests": 0,
40 40 "total_minutes": total_seconds / 60.0,
41 41 "uptime": uptime,
42 42 "apdex": 0,
43 43 "rpm": 0,
44 44 "response_time": 0,
45 45 "avg_response_time": 0,
46 46 }
47 47
48 48
49 49 class RequestMetricService(BaseService):
50 50 @classmethod
51 51 def get_metrics_stats(cls, request, filter_settings, db_session=None):
52 52 delta = filter_settings["end_date"] - filter_settings["start_date"]
53 53 if delta < h.time_deltas.get("12h")["delta"]:
54 54 interval = "1m"
55 55 elif delta <= h.time_deltas.get("3d")["delta"]:
56 56 interval = "5m"
57 57 elif delta >= h.time_deltas.get("2w")["delta"]:
58 58 interval = "24h"
59 59 else:
60 60 interval = "1h"
61 61
62 62 filter_settings["namespace"] = ["appenlight.request_metric"]
63 63
64 64 es_query = {
65 65 "aggs": {
66 66 "parent_agg": {
67 67 "aggs": {
68 68 "custom": {
69 69 "aggs": {
70 70 "sub_agg": {
71 71 "sum": {"field": "tags.custom.numeric_values"}
72 72 }
73 73 },
74 74 "filter": {
75 75 "exists": {"field": "tags.custom.numeric_values"}
76 76 },
77 77 },
78 78 "main": {
79 79 "aggs": {
80 80 "sub_agg": {
81 81 "sum": {"field": "tags.main.numeric_values"}
82 82 }
83 83 },
84 84 "filter": {"exists": {"field": "tags.main.numeric_values"}},
85 85 },
86 86 "nosql": {
87 87 "aggs": {
88 88 "sub_agg": {
89 89 "sum": {"field": "tags.nosql.numeric_values"}
90 90 }
91 91 },
92 92 "filter": {
93 93 "exists": {"field": "tags.nosql.numeric_values"}
94 94 },
95 95 },
96 96 "remote": {
97 97 "aggs": {
98 98 "sub_agg": {
99 99 "sum": {"field": "tags.remote.numeric_values"}
100 100 }
101 101 },
102 102 "filter": {
103 103 "exists": {"field": "tags.remote.numeric_values"}
104 104 },
105 105 },
106 106 "requests": {
107 107 "aggs": {
108 108 "sub_agg": {
109 109 "sum": {"field": "tags.requests.numeric_values"}
110 110 }
111 111 },
112 112 "filter": {
113 113 "exists": {"field": "tags.requests.numeric_values"}
114 114 },
115 115 },
116 116 "sql": {
117 117 "aggs": {
118 118 "sub_agg": {"sum": {"field": "tags.sql.numeric_values"}}
119 119 },
120 120 "filter": {"exists": {"field": "tags.sql.numeric_values"}},
121 121 },
122 122 "tmpl": {
123 123 "aggs": {
124 124 "sub_agg": {
125 125 "sum": {"field": "tags.tmpl.numeric_values"}
126 126 }
127 127 },
128 128 "filter": {"exists": {"field": "tags.tmpl.numeric_values"}},
129 129 },
130 130 },
131 131 "date_histogram": {
132 132 "extended_bounds": {
133 133 "max": filter_settings["end_date"],
134 134 "min": filter_settings["start_date"],
135 135 },
136 136 "field": "timestamp",
137 137 "interval": interval,
138 138 "min_doc_count": 0,
139 139 },
140 140 }
141 141 },
142 142 "query": {
143 143 "bool": {
144 144 "filter": [
145 145 {"terms": {"resource_id": [filter_settings["resource"][0]]}},
146 146 {
147 147 "range": {
148 148 "timestamp": {
149 149 "gte": filter_settings["start_date"],
150 150 "lte": filter_settings["end_date"],
151 151 }
152 152 }
153 153 },
154 154 {"terms": {"namespace": ["appenlight.request_metric"]}},
155 155 ]
156 156 }
157 157 },
158 158 }
159 159
160 160 index_names = es_index_name_limiter(
161 161 start_date=filter_settings["start_date"],
162 162 end_date=filter_settings["end_date"],
163 163 ixtypes=["metrics"],
164 164 )
165 165 if not index_names:
166 166 return []
167 167
168 168 result = Datastores.es.search(
169 169 body=es_query, index=index_names, doc_type="log", size=0
170 170 )
171 171
172 172 plot_data = []
173 173 for item in result["aggregations"]["parent_agg"]["buckets"]:
174 174 x_time = datetime.utcfromtimestamp(int(item["key"]) / 1000)
175 175 point = {"x": x_time}
176 176 for key in ["custom", "main", "nosql", "remote", "requests", "sql", "tmpl"]:
177 177 value = item[key]["sub_agg"]["value"]
178 178 point[key] = round(value, 3) if value else 0
179 179 plot_data.append(point)
180 180
181 181 return plot_data
182 182
183 183 @classmethod
184 184 def get_requests_breakdown(cls, request, filter_settings, db_session=None):
185 185 db_session = get_db_session(db_session)
186 186
187 187 # fetch total time of all requests in this time range
188 188 index_names = es_index_name_limiter(
189 189 start_date=filter_settings["start_date"],
190 190 end_date=filter_settings["end_date"],
191 191 ixtypes=["metrics"],
192 192 )
193 193
194 194 if index_names and filter_settings["resource"]:
195 195 es_query = {
196 196 "aggs": {
197 197 "main": {
198 198 "aggs": {
199 199 "sub_agg": {"sum": {"field": "tags.main.numeric_values"}}
200 200 },
201 201 "filter": {"exists": {"field": "tags.main.numeric_values"}},
202 202 }
203 203 },
204 204 "query": {
205 205 "bool": {
206 206 "filter": [
207 207 {
208 208 "terms": {
209 209 "resource_id": [filter_settings["resource"][0]]
210 210 }
211 211 },
212 212 {
213 213 "range": {
214 214 "timestamp": {
215 215 "gte": filter_settings["start_date"],
216 216 "lte": filter_settings["end_date"],
217 217 }
218 218 }
219 219 },
220 220 {"terms": {"namespace": ["appenlight.request_metric"]}},
221 221 ]
222 222 }
223 223 },
224 224 }
225 225 result = Datastores.es.search(
226 226 body=es_query, index=index_names, doc_type="log", size=0
227 227 )
228 228 total_time_spent = result["aggregations"]["main"]["sub_agg"]["value"]
229 229 else:
230 230 total_time_spent = 0
231 231 script_text = "doc['tags.main.numeric_values'].value / {}".format(
232 232 total_time_spent
233 233 )
234 234 if total_time_spent == 0:
235 235 script_text = "0"
236 236
237 237 if index_names and filter_settings["resource"]:
238 238 es_query = {
239 239 "aggs": {
240 240 "parent_agg": {
241 241 "aggs": {
242 242 "main": {
243 243 "aggs": {
244 244 "sub_agg": {
245 245 "sum": {"field": "tags.main.numeric_values"}
246 246 }
247 247 },
248 248 "filter": {
249 249 "exists": {"field": "tags.main.numeric_values"}
250 250 },
251 251 },
252 252 "percentage": {
253 253 "aggs": {"sub_agg": {"sum": {"script": script_text}}},
254 254 "filter": {
255 255 "exists": {"field": "tags.main.numeric_values"}
256 256 },
257 257 },
258 258 "requests": {
259 259 "aggs": {
260 260 "sub_agg": {
261 261 "sum": {"field": "tags.requests.numeric_values"}
262 262 }
263 263 },
264 264 "filter": {
265 265 "exists": {"field": "tags.requests.numeric_values"}
266 266 },
267 267 },
268 268 },
269 269 "terms": {
270 270 "field": "tags.view_name.values.keyword",
271 271 "order": {"percentage>sub_agg": "desc"},
272 272 "size": 15,
273 273 },
274 274 }
275 275 },
276 276 "query": {
277 277 "bool": {
278 278 "filter": [
279 279 {
280 280 "terms": {
281 281 "resource_id": [filter_settings["resource"][0]]
282 282 }
283 283 },
284 284 {
285 285 "range": {
286 286 "timestamp": {
287 287 "gte": filter_settings["start_date"],
288 288 "lte": filter_settings["end_date"],
289 289 }
290 290 }
291 291 },
292 292 ]
293 293 }
294 294 },
295 295 }
296 296 result = Datastores.es.search(
297 297 body=es_query, index=index_names, doc_type="log", size=0
298 298 )
299 299 series = result["aggregations"]["parent_agg"]["buckets"]
300 300 else:
301 301 series = []
302 302
303 303 and_part = [
304 304 {"term": {"resource_id": filter_settings["resource"][0]}},
305 305 {"terms": {"tags.view_name.values": [row["key"] for row in series]}},
306 306 {"term": {"report_type": str(ReportType.slow)}},
307 307 ]
308 308 query = {
309 309 "aggs": {
310 310 "top_reports": {
311 311 "terms": {
312 312 "field": "tags.view_name.values.keyword",
313 313 "size": len(series),
314 314 },
315 315 "aggs": {
316 316 "top_calls_hits": {
317 317 "top_hits": {"sort": {"start_time": "desc"}, "size": 5}
318 318 }
319 319 },
320 320 }
321 321 },
322 322 "query": {"bool": {"filter": and_part}},
323 323 }
324 324 details = {}
325 325 index_names = es_index_name_limiter(ixtypes=["reports"])
326 326 if index_names and series:
327 327 result = Datastores.es.search(
328 328 body=query, doc_type="report", size=0, index=index_names
329 329 )
330 330 for bucket in result["aggregations"]["top_reports"]["buckets"]:
331 331 details[bucket["key"]] = []
332 332
333 333 for hit in bucket["top_calls_hits"]["hits"]["hits"]:
334 334 details[bucket["key"]].append(
335 335 {
336 "report_id": hit["_source"]["request_metric_id"],
336 "report_id": hit["_source"]["report_id"],
337 337 "group_id": hit["_source"]["group_id"],
338 338 }
339 339 )
340 340
341 341 results = []
342 342 for row in series:
343 343 result = {
344 344 "key": row["key"],
345 345 "main": row["main"]["sub_agg"]["value"],
346 346 "requests": row["requests"]["sub_agg"]["value"],
347 347 }
348 348 # es can return 'infinity'
349 349 try:
350 350 result["percentage"] = float(row["percentage"]["sub_agg"]["value"])
351 351 except ValueError:
352 352 result["percentage"] = 0
353 353
354 354 result["latest_details"] = details.get(row["key"]) or []
355 355 results.append(result)
356 356
357 357 return results
358 358
359 359 @classmethod
360 360 def get_apdex_stats(cls, request, filter_settings, threshold=1, db_session=None):
361 361 """
362 362 Returns information and calculates APDEX score per server for dashboard
363 363 server information (upper right stats boxes)
364 364 """
365 365 # Apdex t = (Satisfied Count + Tolerated Count / 2) / Total Samples
366 366 db_session = get_db_session(db_session)
367 367 index_names = es_index_name_limiter(
368 368 start_date=filter_settings["start_date"],
369 369 end_date=filter_settings["end_date"],
370 370 ixtypes=["metrics"],
371 371 )
372 372
373 373 requests_series = []
374 374
375 375 if index_names and filter_settings["resource"]:
376 376 es_query = {
377 377 "aggs": {
378 378 "parent_agg": {
379 379 "aggs": {
380 380 "frustrating": {
381 381 "aggs": {
382 382 "sub_agg": {
383 383 "sum": {"field": "tags.requests.numeric_values"}
384 384 }
385 385 },
386 386 "filter": {
387 387 "bool": {
388 388 "filter": [
389 389 {
390 390 "range": {
391 391 "tags.main.numeric_values": {
392 392 "gte": "4"
393 393 }
394 394 }
395 395 },
396 396 {
397 397 "exists": {
398 398 "field": "tags.requests.numeric_values"
399 399 }
400 400 },
401 401 ]
402 402 }
403 403 },
404 404 },
405 405 "main": {
406 406 "aggs": {
407 407 "sub_agg": {
408 408 "sum": {"field": "tags.main.numeric_values"}
409 409 }
410 410 },
411 411 "filter": {
412 412 "exists": {"field": "tags.main.numeric_values"}
413 413 },
414 414 },
415 415 "requests": {
416 416 "aggs": {
417 417 "sub_agg": {
418 418 "sum": {"field": "tags.requests.numeric_values"}
419 419 }
420 420 },
421 421 "filter": {
422 422 "exists": {"field": "tags.requests.numeric_values"}
423 423 },
424 424 },
425 425 "tolerated": {
426 426 "aggs": {
427 427 "sub_agg": {
428 428 "sum": {"field": "tags.requests.numeric_values"}
429 429 }
430 430 },
431 431 "filter": {
432 432 "bool": {
433 433 "filter": [
434 434 {
435 435 "range": {
436 436 "tags.main.numeric_values": {
437 437 "gte": "1"
438 438 }
439 439 }
440 440 },
441 441 {
442 442 "range": {
443 443 "tags.main.numeric_values": {
444 444 "lt": "4"
445 445 }
446 446 }
447 447 },
448 448 {
449 449 "exists": {
450 450 "field": "tags.requests.numeric_values"
451 451 }
452 452 },
453 453 ]
454 454 }
455 455 },
456 456 },
457 457 },
458 458 "terms": {
459 459 "field": "tags.server_name.values.keyword",
460 460 "size": 999999,
461 461 },
462 462 }
463 463 },
464 464 "query": {
465 465 "bool": {
466 466 "filter": [
467 467 {
468 468 "terms": {
469 469 "resource_id": [filter_settings["resource"][0]]
470 470 }
471 471 },
472 472 {
473 473 "range": {
474 474 "timestamp": {
475 475 "gte": filter_settings["start_date"],
476 476 "lte": filter_settings["end_date"],
477 477 }
478 478 }
479 479 },
480 480 {"terms": {"namespace": ["appenlight.request_metric"]}},
481 481 ]
482 482 }
483 483 },
484 484 }
485 485
486 486 result = Datastores.es.search(
487 487 body=es_query, index=index_names, doc_type="log", size=0
488 488 )
489 489 for bucket in result["aggregations"]["parent_agg"]["buckets"]:
490 490 requests_series.append(
491 491 {
492 492 "frustrating": bucket["frustrating"]["sub_agg"]["value"],
493 493 "main": bucket["main"]["sub_agg"]["value"],
494 494 "requests": bucket["requests"]["sub_agg"]["value"],
495 495 "tolerated": bucket["tolerated"]["sub_agg"]["value"],
496 496 "key": bucket["key"],
497 497 }
498 498 )
499 499
500 500 since_when = filter_settings["start_date"]
501 501 until = filter_settings["end_date"]
502 502
503 503 # total errors
504 504
505 505 index_names = es_index_name_limiter(
506 506 start_date=filter_settings["start_date"],
507 507 end_date=filter_settings["end_date"],
508 508 ixtypes=["reports"],
509 509 )
510 510
511 511 report_series = []
512 512 if index_names and filter_settings["resource"]:
513 513 report_type = ReportType.key_from_value(ReportType.error)
514 514 es_query = {
515 515 "aggs": {
516 516 "parent_agg": {
517 517 "aggs": {
518 518 "errors": {
519 519 "aggs": {
520 520 "sub_agg": {
521 521 "sum": {
522 522 "field": "tags.occurences.numeric_values"
523 523 }
524 524 }
525 525 },
526 526 "filter": {
527 527 "bool": {
528 528 "filter": [
529 529 {
530 530 "terms": {
531 531 "tags.type.values": [report_type]
532 532 }
533 533 },
534 534 {
535 535 "exists": {
536 536 "field": "tags.occurences.numeric_values"
537 537 }
538 538 },
539 539 ]
540 540 }
541 541 },
542 542 }
543 543 },
544 544 "terms": {
545 545 "field": "tags.server_name.values.keyword",
546 546 "size": 999999,
547 547 },
548 548 }
549 549 },
550 550 "query": {
551 551 "bool": {
552 552 "filter": [
553 553 {
554 554 "terms": {
555 555 "resource_id": [filter_settings["resource"][0]]
556 556 }
557 557 },
558 558 {
559 559 "range": {
560 560 "timestamp": {
561 561 "gte": filter_settings["start_date"],
562 562 "lte": filter_settings["end_date"],
563 563 }
564 564 }
565 565 },
566 566 {"terms": {"namespace": ["appenlight.error"]}},
567 567 ]
568 568 }
569 569 },
570 570 }
571 571 result = Datastores.es.search(
572 572 body=es_query, index=index_names, doc_type="log", size=0
573 573 )
574 574 for bucket in result["aggregations"]["parent_agg"]["buckets"]:
575 575 report_series.append(
576 576 {
577 577 "key": bucket["key"],
578 578 "errors": bucket["errors"]["sub_agg"]["value"],
579 579 }
580 580 )
581 581
582 582 stats = {}
583 583 if UptimeMetricService is not None:
584 584 uptime = UptimeMetricService.get_uptime_by_app(
585 585 filter_settings["resource"][0], since_when=since_when, until=until
586 586 )
587 587 else:
588 588 uptime = 0
589 589
590 590 total_seconds = (until - since_when).total_seconds()
591 591
592 592 for stat in requests_series:
593 593 check_key(stat["key"], stats, uptime, total_seconds)
594 594 stats[stat["key"]]["requests"] = int(stat["requests"])
595 595 stats[stat["key"]]["response_time"] = stat["main"]
596 596 stats[stat["key"]]["tolerated_requests"] = stat["tolerated"]
597 597 stats[stat["key"]]["frustrating_requests"] = stat["frustrating"]
598 598 for server in report_series:
599 599 check_key(server["key"], stats, uptime, total_seconds)
600 600 stats[server["key"]]["errors"] = server["errors"]
601 601
602 602 server_stats = list(stats.values())
603 603 for stat in server_stats:
604 604 stat["satisfying_requests"] = (
605 605 stat["requests"]
606 606 - stat["errors"]
607 607 - stat["frustrating_requests"]
608 608 - stat["tolerated_requests"]
609 609 )
610 610 if stat["satisfying_requests"] < 0:
611 611 stat["satisfying_requests"] = 0
612 612
613 613 if stat["requests"]:
614 614 stat["avg_response_time"] = round(
615 615 stat["response_time"] / stat["requests"], 3
616 616 )
617 617 qual_requests = (
618 618 stat["satisfying_requests"] + stat["tolerated_requests"] / 2.0
619 619 )
620 620 stat["apdex"] = round((qual_requests / stat["requests"]) * 100, 2)
621 621 stat["rpm"] = round(stat["requests"] / stat["total_minutes"], 2)
622 622
623 623 return sorted(server_stats, key=lambda x: x["name"])
General Comments 4
Under Review
author

Auto status change to "Under Review"

Under Review
author

Auto status change to "Under Review"

You need to be logged in to leave comments. Login now