##// END OF EJS Templates
Pull request !2405 Created on Tue, 13 Feb 2024 04:51:24, by
  • setup: change url to github
  • readme: provide better descriptions
  • ini: disable secure cookie by default
  • setup.py: include additional package data
  • README: mention getappenlight.com documentation
1 version available for this pull request, show versions.
ver Time Author Commit Description
32 commits hidden, click expand to show them.
@@ -63,7 +63,7 b' setup('
63 63 version="2.0.0rc1",
64 64 license=__license__,
65 65 author=__author__,
66 url=__url__,
66 url="https://github.com/AppEnlight/appenlight",
67 67 keywords="web wsgi bfg pylons pyramid flask django monitoring apm instrumentation appenlight",
68 68 python_requires=">=3.5",
69 69 long_description_content_type="text/markdown",
@@ -8,9 +8,11 b' Performance, exception, and uptime monitoring for the Web'
8 8 Automatic Installation
9 9 ======================
10 10
11 Use the ansible scripts in the `automation` repository to build complete instance of application
11 Use the ansible or vagrant scripts in the `automation` repository to build complete instance of application.
12 12 You can also use `packer` files in `automation/packer` to create whole VM's for KVM and VMWare.
13 13
14 https://github.com/AppEnlight/automation
15
14 16 Manual Installation
15 17 ===================
16 18
@@ -41,7 +43,7 b' After installing the application you need to perform following steps:'
41 43
42 44 appenlight-make-config production.ini
43 45
44 2. Setup database structure:
46 2. Setup database structure (replace filename with the name you picked for `appenlight-make-config`):
45 47
46 48 appenlight-migratedb -c FILENAME.ini
47 49
@@ -64,7 +66,7 b' Running application'
64 66
65 67 To run the main app:
66 68
67 pserve development.ini
69 pserve FILENAME.ini
68 70
69 71 To run celery workers:
70 72
@@ -74,16 +76,17 b' To run celery beat:'
74 76
75 77 celery beat -A appenlight.celery --ini FILENAME.ini
76 78
77 To run appenlight's uptime plugin:
79 To run appenlight's uptime plugin (example of uptime plugin config can be found here
80 https://github.com/AppEnlight/appenlight-uptime-ce ):
78 81
79 appenlight-uptime-monitor -c FILENAME.ini
82 appenlight-uptime-monitor -c UPTIME_PLUGIN_CONFIG_FILENAME.ini
80 83
81 84 Real-time Notifications
82 85 =======================
83 86
84 87 You should also run the `channelstream websocket server for real-time notifications
85 88
86 channelstream -i filename.ini
89 channelstream -i CHANELSTRAM_CONFIG_FILENAME.ini
87 90
88 91 Additional documentation
89 92 ========================
@@ -45,7 +45,7 b' redis.sessions.cookie_max_age = 2592000'
45 45 redis.sessions.cookie_path = /
46 46 redis.sessions.cookie_domain =
47 47 # uncomment if you use SSL
48 redis.sessions.cookie_secure = True
48 # redis.sessions.cookie_secure = True
49 49 redis.sessions.cookie_httponly = True
50 50 redis.sessions.cookie_on_exception = True
51 51 redis.sessions.prefix = appenlight:session:
@@ -41,6 +41,7 b' __author__ = _get_meta_var("__author__", _metadata)'
41 41 __url__ = _get_meta_var("__url__", _metadata)
42 42
43 43 found_packages = find_packages("src")
44 found_packages.append("appenlight.migrations")
44 45 found_packages.append("appenlight.migrations.versions")
45 46 setup(
46 47 name="appenlight",
@@ -84,7 +84,12 b' Real-time Notifications'
84 84 You should also run the `channelstream websocket server for real-time notifications
85 85
86 86 channelstream -i filename.ini
87
87
88 Additional documentation
89 ========================
90
91 Visit https://getappenlight.com for additional server and client documentation.
92
88 93 Testing
89 94 =======
90 95
@@ -1,4 +1,9 b''
1 Visit:
1 # AppEnlight
2
3 Performance, exception, and uptime monitoring for the Web
2 4
5 ![AppEnlight image](https://raw.githubusercontent.com/AppEnlight/appenlight/gh-pages/static/appenlight.png)
6
7 Visit:
3 8
4 9 [Readme moved to backend directory](backend/README.md)
@@ -1,6 +1,10 b''
1 1 AppEnlight
2 2 -----------
3 3
4 Performance, exception, and uptime monitoring for the Web
5
6 ![AppEnlight image](https://raw.githubusercontent.com/AppEnlight/appenlight/gh-pages/static/appenlight.png)
7
4 8 Automatic Installation
5 9 ======================
6 10
@@ -35,25 +35,25 b' After installing the application you need to perform following steps:'
35 35
36 36 1. (optional) generate production.ini (or use a copy of development.ini)
37 37
38 appenlight-make-config production.ini
38 appenlight-make-config production.ini
39 39
40 40 2. Setup database structure:
41 41
42 appenlight-migratedb -c FILENAME.ini
42 appenlight-migratedb -c FILENAME.ini
43 43
44 44 3. To configure elasticsearch:
45 45
46 appenlight-reindex-elasticsearch -t all -c FILENAME.ini
46 appenlight-reindex-elasticsearch -t all -c FILENAME.ini
47 47
48 48 4. Create base database objects
49 49
50 50 (run this command with help flag to see how to create administrator user)
51 51
52 appenlight-initializedb -c FILENAME.ini
52 appenlight-initializedb -c FILENAME.ini
53 53
54 54 5. Generate static assets
55 55
56 appenlight-static -c FILENAME.ini
56 appenlight-static -c FILENAME.ini
57 57
58 58 Running application
59 59 ===================
@@ -16,7 +16,10 b' def parse_req(req):'
16 16 return compiled.search(req).group(1).strip()
17 17
18 18
19 requires = [_f for _f in map(parse_req, REQUIREMENTS) if _f]
19 if "APPENLIGHT_DEVELOP" in os.environ:
20 requires = [_f for _f in map(parse_req, REQUIREMENTS) if _f]
21 else:
22 requires = REQUIREMENTS
20 23
21 24
22 25 def _get_meta_var(name, data, callback_handler=None):
@@ -35,29 +35,24 b' After installing the application you need to perform following steps:'
35 35
36 36 1. (optional) generate production.ini (or use a copy of development.ini)
37 37
38
39 38 appenlight-make-config production.ini
40 39
41 40 2. Setup database structure:
42 41
43
44 42 appenlight-migratedb -c FILENAME.ini
45 43
46 44 3. To configure elasticsearch:
47 45
48
49 46 appenlight-reindex-elasticsearch -t all -c FILENAME.ini
50 47
51 48 4. Create base database objects
52 49
53 50 (run this command with help flag to see how to create administrator user)
54 51
55
56 52 appenlight-initializedb -c FILENAME.ini
57 53
58 54 5. Generate static assets
59 55
60
61 56 appenlight-static -c FILENAME.ini
62 57
63 58 Running application
@@ -44,12 +44,19 b' setup('
44 44 description="appenlight",
45 45 long_description=README,
46 46 classifiers=[
47 "Framework :: Pyramid",
48 "License :: OSI Approved :: Apache Software License",
47 49 "Programming Language :: Python",
48 "Framework :: Pylons",
50 "Programming Language :: Python :: 3 :: Only",
51 "Programming Language :: Python :: 3.6",
52 "Topic :: System :: Monitoring",
53 "Topic :: Software Development",
54 "Topic :: Software Development :: Bug Tracking",
55 "Topic :: Internet :: Log Analysis",
49 56 "Topic :: Internet :: WWW/HTTP",
50 57 "Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
51 58 ],
52 version='2.0.0rc1',
59 version="2.0.0rc1",
53 60 license=__license__,
54 61 author=__author__,
55 62 url=__url__,
@@ -14,7 +14,7 b' The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).'
14 14 <!-- ### Fixed -->
15 15
16 16
17 ## Unreleased [2.0.0 - 2019-04-XX]
17 ## [2.0.0rc1 - 2019-04-13]
18 18 ### Changed
19 19 * require Elasticsearch 6.x
20 20 * move data structure to single document per index
@@ -103,8 +103,3 b' To develop appenlight frontend:'
103 103 npm install
104 104 grunt watch
105 105
106
107 Tagging release
108 ===============
109
110 bumpversion --current-version 1.1.1 minor --verbose --tag --commit --dry-run
@@ -33,9 +33,6 b' def _get_meta_var(name, data, callback_handler=None):'
33 33 with open(os.path.join(here, "src", "appenlight", "__init__.py"), "r") as _meta:
34 34 _metadata = _meta.read()
35 35
36 with open(os.path.join(here, "VERSION"), "r") as _meta_version:
37 __version__ = _meta_version.read().strip()
38
39 36 __license__ = _get_meta_var("__license__", _metadata)
40 37 __author__ = _get_meta_var("__author__", _metadata)
41 38 __url__ = _get_meta_var("__url__", _metadata)
@@ -45,18 +42,20 b' found_packages.append("appenlight.migrations.versions")'
45 42 setup(
46 43 name="appenlight",
47 44 description="appenlight",
48 long_description=README + "\n\n" + CHANGES,
45 long_description=README,
49 46 classifiers=[
50 47 "Programming Language :: Python",
51 48 "Framework :: Pylons",
52 49 "Topic :: Internet :: WWW/HTTP",
53 50 "Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
54 51 ],
55 version=__version__,
52 version='2.0.0rc1',
56 53 license=__license__,
57 54 author=__author__,
58 55 url=__url__,
59 keywords="web wsgi bfg pylons pyramid",
56 keywords="web wsgi bfg pylons pyramid flask django monitoring apm instrumentation appenlight",
57 python_requires=">=3.5",
58 long_description_content_type="text/markdown",
60 59 package_dir={"": "src"},
61 60 packages=found_packages,
62 61 include_package_data=True,
1 NO CONTENT: file was removed
1 NO CONTENT: file was removed
@@ -12,7 +12,7 b' Manual Installation'
12 12
13 13 To run the app you need to have meet prerequsites:
14 14
15 - python 3.5+
15 - python 3.5+ (currently 3.6 tested)
16 16 - running elasticsearch (6.6.2 tested)
17 17 - running postgresql (9.5+ required, tested 9.6 and 10.6)
18 18 - running redis
@@ -25,6 +25,12 b' Install the app by performing'
25 25
26 26 Install the appenlight uptime plugin (`ae_uptime_ce` package from `appenlight-uptime-ce` repository).
27 27
28 For production usage you can do:
29
30 pip install appenlight
31 pip install ae_uptime_ce
32
33
28 34 After installing the application you need to perform following steps:
29 35
30 36 1. (optional) generate production.ini (or use a copy of development.ini)
@@ -1,2 +1,2 b''
1 1 include *.txt *.ini *.cfg *.rst *.md VERSION
2 recursive-include appenlight *.ico *.png *.css *.gif *.jpg *.pt *.txt *.mak *.mako *.js *.html *.xml *.jinja2 *.rst *.otf *.ttf *.svg *.woff *.eot
2 recursive-include src *.ico *.png *.css *.gif *.jpg *.pt *.txt *.mak *.mako *.js *.html *.xml *.jinja2 *.rst *.otf *.ttf *.svg *.woff *.woff2 *.eot
@@ -102,7 +102,7 b' ziggurat_foundations.sign_in.came_from_key = came_from'
102 102 #cometd
103 103 cometd.server = http://127.0.0.1:8088/
104 104 cometd.secret = secret
105 cometd.ws_url = http://127.0.0.1:8088/
105 cometd.ws_url = ws://127.0.0.1:8088/
106 106
107 107
108 108 # for celery
@@ -95,7 +95,6 b' To develop appenlight frontend:'
95 95
96 96 cd frontend
97 97 npm install
98 bower install
99 98 grunt watch
100 99
101 100
@@ -81,6 +81,7 b' ziggurat_foundations.sign_in.came_from_key = came_from'
81 81 #cometd
82 82 cometd.server = http://127.0.0.1:8088
83 83 cometd.secret = secret
84 # change to # ws:// for development
84 85 cometd.ws_url = wss://{{appenlight_domain}}/channelstream
85 86
86 87 # for celery
@@ -18,6 +18,8 b' The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).'
18 18 ### Changed
19 19 * require Elasticsearch 6.x
20 20 * move data structure to single document per index
21 * got rid of bower and moved to npm in build process
22 * updated angular packages to new versions
21 23
22 24 ## [1.2.0 - 2019-03-17]
23 25 ### Changed

The requested changes are too big and content was truncated. Show full diff

1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file was removed
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file was removed
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file was removed
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file was removed
The requested commit or file is too big and content was truncated. Show full diff
@@ -666,13 +666,11 b' class LogSearchSchema(colander.MappingSchema):'
666 666 namespace = colander.SchemaNode(
667 667 colander.Sequence(accept_scalar=True),
668 668 colander.SchemaNode(colander.String()),
669 preparer=lowercase_preparer,
670 669 missing=None,
671 670 )
672 671 request_id = colander.SchemaNode(
673 672 colander.Sequence(accept_scalar=True),
674 673 colander.SchemaNode(colander.String()),
675 preparer=lowercase_preparer,
676 674 missing=None,
677 675 )
678 676 start_date = colander.SchemaNode(PermissiveDate(), missing=None)
@@ -333,7 +333,7 b' class RequestMetricService(BaseService):'
333 333 for hit in bucket["top_calls_hits"]["hits"]["hits"]:
334 334 details[bucket["key"]].append(
335 335 {
336 "report_id": hit["_source"]["request_metric_id"],
336 "report_id": hit["_source"]["report_id"],
337 337 "group_id": hit["_source"]["group_id"],
338 338 }
339 339 )
@@ -14,6 +14,11 b' The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).'
14 14 <!-- ### Fixed -->
15 15
16 16
17 ## Unreleased [2.0.0 - 2019-04-XX]
18 ### Changed
19 * require Elasticsearch 6.x
20 * move data structure to single document per index
21
17 22 ## [1.2.0 - 2019-03-17]
18 23 ### Changed
19 24 * Replaced elasticsearch client
@@ -13,8 +13,8 b' Manual Installation'
13 13 To run the app you need to have meet prerequsites:
14 14
15 15 - python 3.5+
16 - running elasticsearch (2.3+/2.4 tested)
17 - running postgresql (9.5+ required)
16 - running elasticsearch (6.6.2 tested)
17 - running postgresql (9.5+ required, tested 9.6 and 10.6)
18 18 - running redis
19 19
20 20 Install the app by performing
@@ -36,7 +36,7 b' pygments==2.3.1'
36 36 lxml==4.3.2
37 37 paginate==0.5.6
38 38 paginate-sqlalchemy==0.3.0
39 elasticsearch>=5.0.0,<6.0.0
39 elasticsearch>=6.0.0,<7.0.0
40 40 mock==1.0.1
41 41 itsdangerous==1.1.0
42 42 camplight==0.9.6
@@ -326,8 +326,10 b' def add_logs(resource_id, request_params, dataset, **kwargs):'
326 326
327 327 try:
328 328 Datastores.es.delete_by_query(
329 index=es_index, doc_type="log",
330 body=query, conflicts="proceed"
329 index=es_index,
330 doc_type="log",
331 body=query,
332 conflicts="proceed",
331 333 )
332 334 except elasticsearch.exceptions.NotFoundError as exc:
333 335 msg = "skipping index {}".format(es_index)
@@ -688,11 +690,7 b' def alerting_reports():'
688 690 def logs_cleanup(resource_id, filter_settings):
689 691 request = get_current_request()
690 692 request.tm.begin()
691 es_query = {
692 "query": {
693 "bool": {"filter": [{"term": {"resource_id": resource_id}}]}
694 }
695 }
693 es_query = {"query": {"bool": {"filter": [{"term": {"resource_id": resource_id}}]}}}
696 694
697 695 query = DBSession.query(Log).filter(Log.resource_id == resource_id)
698 696 if filter_settings["namespace"]:
@@ -703,5 +701,5 b' def logs_cleanup(resource_id, filter_settings):'
703 701 query.delete(synchronize_session=False)
704 702 request.tm.commit()
705 703 Datastores.es.delete_by_query(
706 index="rcae_l_*", doc_type="log", body=es_query, conflicts="proceed"
704 index="rcae_l_*", doc_type="log", body=es_query, conflicts="proceed"
707 705 )
@@ -552,7 +552,9 b' def get_es_info(cache_regions, es_conn):'
552 552 @cache_regions.memory_min_10.cache_on_arguments()
553 553 def get_es_info_cached():
554 554 returned_info = {"raw_info": es_conn.info()}
555 returned_info["version"] = returned_info["raw_info"]["version"]["number"].split('.')
555 returned_info["version"] = returned_info["raw_info"]["version"]["number"].split(
556 "."
557 )
556 558 return returned_info
557 559
558 560 return get_es_info_cached()
@@ -483,13 +483,10 b' class Report(Base, BaseModel):'
483 483 "ip": self.ip,
484 484 "group_id": str(self.group_id),
485 485 "type": "report",
486 "join_field": {
487 "name": "report",
488 "parent": str(self.group_id)
489 },
486 "join_field": {"name": "report", "parent": str(self.group_id)},
490 487 "tags": tags,
491 488 "tag_list": tag_list,
492 "_routing": str(self.group_id)
489 "_routing": str(self.group_id),
493 490 }
494 491
495 492 @property
@@ -525,7 +522,10 b' def after_delete(mapper, connection, target):'
525 522 if not hasattr(target, "_skip_ft_index"):
526 523 query = {"query": {"term": {"report_id": target.id}}}
527 524 Datastores.es.delete_by_query(
528 index=target.partition_id, doc_type="report", body=query, conflicts="proceed"
525 index=target.partition_id,
526 doc_type="report",
527 body=query,
528 conflicts="proceed",
529 529 )
530 530
531 531
@@ -191,9 +191,7 b' class ReportGroup(Base, BaseModel):'
191 191 "first_timestamp": self.first_timestamp,
192 192 "last_timestamp": self.last_timestamp,
193 193 "type": "report_group",
194 "join_field": {
195 "name": "report_group"
196 },
194 "join_field": {"name": "report_group"},
197 195 }
198 196
199 197 def set_notification_info(self, notify_10=False, notify_100=False):
@@ -56,11 +56,7 b' class LogService(BaseService):'
56 56 filter_settings = {}
57 57
58 58 query = {
59 "query": {
60 "bool": {
61 "filter": [{"terms": {"resource_id": list(app_ids)}}]
62 }
63 }
59 "query": {"bool": {"filter": [{"terms": {"resource_id": list(app_ids)}}]}}
64 60 }
65 61
66 62 start_date = filter_settings.get("start_date")
@@ -132,13 +128,13 b' class LogService(BaseService):'
132 128
133 129 @classmethod
134 130 def get_search_iterator(
135 cls,
136 app_ids=None,
137 page=1,
138 items_per_page=50,
139 order_by=None,
140 filter_settings=None,
141 limit=None,
131 cls,
132 app_ids=None,
133 page=1,
134 items_per_page=50,
135 order_by=None,
136 filter_settings=None,
137 limit=None,
142 138 ):
143 139 if not app_ids:
144 140 return {}, 0
@@ -171,15 +167,15 b' class LogService(BaseService):'
171 167
172 168 @classmethod
173 169 def get_paginator_by_app_ids(
174 cls,
175 app_ids=None,
176 page=1,
177 item_count=None,
178 items_per_page=50,
179 order_by=None,
180 filter_settings=None,
181 exclude_columns=None,
182 db_session=None,
170 cls,
171 app_ids=None,
172 page=1,
173 item_count=None,
174 items_per_page=50,
175 order_by=None,
176 filter_settings=None,
177 exclude_columns=None,
178 db_session=None,
183 179 ):
184 180 if not filter_settings:
185 181 filter_settings = {}
@@ -64,7 +64,9 b' class ReportGroupService(BaseService):'
64 64 "groups": {
65 65 "aggs": {
66 66 "sub_agg": {
67 "value_count": {"field": "tags.group_id.values.keyword"}
67 "value_count": {
68 "field": "tags.group_id.values.keyword"
69 }
68 70 }
69 71 },
70 72 "filter": {"exists": {"field": "tags.group_id.values"}},
@@ -76,11 +78,7 b' class ReportGroupService(BaseService):'
76 78 "query": {
77 79 "bool": {
78 80 "filter": [
79 {
80 "terms": {
81 "resource_id": [filter_settings["resource"][0]]
82 }
83 },
81 {"terms": {"resource_id": [filter_settings["resource"][0]]}},
84 82 {
85 83 "range": {
86 84 "timestamp": {
@@ -136,7 +134,7 b' class ReportGroupService(BaseService):'
136 134 "bool": {
137 135 "must": [],
138 136 "should": [],
139 "filter": [{"terms": {"resource_id": list(app_ids)}}]
137 "filter": [{"terms": {"resource_id": list(app_ids)}}],
140 138 }
141 139 },
142 140 "aggs": {
@@ -315,7 +313,9 b' class ReportGroupService(BaseService):'
315 313 ordered_ids = []
316 314 if results:
317 315 for item in results["top_groups"]["buckets"]:
318 pg_id = item["top_reports_hits"]["hits"]["hits"][0]["_source"]["report_id"]
316 pg_id = item["top_reports_hits"]["hits"]["hits"][0]["_source"][
317 "report_id"
318 ]
319 319 ordered_ids.append(pg_id)
320 320 log.info(filter_settings)
321 321 paginator = paginate.Page(
@@ -445,12 +445,16 b' class ReportGroupService(BaseService):'
445 445 "aggs": {
446 446 "types": {
447 447 "aggs": {
448 "sub_agg": {"terms": {"field": "tags.type.values.keyword"}}
448 "sub_agg": {
449 "terms": {"field": "tags.type.values.keyword"}
450 }
449 451 },
450 452 "filter": {
451 "bool": {
452 "filter": [{"exists": {"field": "tags.type.values"}}]
453 }
453 "bool": {
454 "filter": [
455 {"exists": {"field": "tags.type.values"}}
456 ]
457 }
454 458 },
455 459 }
456 460 },
@@ -468,11 +472,7 b' class ReportGroupService(BaseService):'
468 472 "query": {
469 473 "bool": {
470 474 "filter": [
471 {
472 "terms": {
473 "resource_id": [filter_settings["resource"][0]]
474 }
475 },
475 {"terms": {"resource_id": [filter_settings["resource"][0]]}},
476 476 {
477 477 "range": {
478 478 "timestamp": {
@@ -31,7 +31,9 b' class ReportStatService(BaseService):'
31 31 "aggs": {
32 32 "reports": {
33 33 "aggs": {
34 "sub_agg": {"value_count": {"field": "tags.group_id.values.keyword"}}
34 "sub_agg": {
35 "value_count": {"field": "tags.group_id.values.keyword"}
36 }
35 37 },
36 38 "filter": {
37 39 "bool": {
@@ -142,11 +142,7 b' class RequestMetricService(BaseService):'
142 142 "query": {
143 143 "bool": {
144 144 "filter": [
145 {
146 "terms": {
147 "resource_id": [filter_settings["resource"][0]]
148 }
149 },
145 {"terms": {"resource_id": [filter_settings["resource"][0]]}},
150 146 {
151 147 "range": {
152 148 "timestamp": {
@@ -236,7 +232,7 b' class RequestMetricService(BaseService):'
236 232 total_time_spent
237 233 )
238 234 if total_time_spent == 0:
239 script_text = '0'
235 script_text = "0"
240 236
241 237 if index_names and filter_settings["resource"]:
242 238 es_query = {
@@ -254,13 +250,7 b' class RequestMetricService(BaseService):'
254 250 },
255 251 },
256 252 "percentage": {
257 "aggs": {
258 "sub_agg": {
259 "sum": {
260 "script": script_text,
261 }
262 }
263 },
253 "aggs": {"sub_agg": {"sum": {"script": script_text}}},
264 254 "filter": {
265 255 "exists": {"field": "tags.main.numeric_values"}
266 256 },
@@ -318,7 +308,10 b' class RequestMetricService(BaseService):'
318 308 query = {
319 309 "aggs": {
320 310 "top_reports": {
321 "terms": {"field": "tags.view_name.values.keyword", "size": len(series)},
311 "terms": {
312 "field": "tags.view_name.values.keyword",
313 "size": len(series),
314 },
322 315 "aggs": {
323 316 "top_calls_hits": {
324 317 "top_hits": {"sort": {"start_time": "desc"}, "size": 5}
@@ -395,7 +388,9 b' class RequestMetricService(BaseService):'
395 388 "filter": [
396 389 {
397 390 "range": {
398 "tags.main.numeric_values": {"gte": "4"}
391 "tags.main.numeric_values": {
392 "gte": "4"
393 }
399 394 }
400 395 },
401 396 {
@@ -434,27 +429,36 b' class RequestMetricService(BaseService):'
434 429 }
435 430 },
436 431 "filter": {
437 "bool": {"filter": [
438 {
439 "range": {
440 "tags.main.numeric_values": {"gte": "1"}
441 }
442 },
443 {
444 "range": {
445 "tags.main.numeric_values": {"lt": "4"}
446 }
447 },
448 {
449 "exists": {
450 "field": "tags.requests.numeric_values"
451 }
452 },
453 ]}
432 "bool": {
433 "filter": [
434 {
435 "range": {
436 "tags.main.numeric_values": {
437 "gte": "1"
438 }
439 }
440 },
441 {
442 "range": {
443 "tags.main.numeric_values": {
444 "lt": "4"
445 }
446 }
447 },
448 {
449 "exists": {
450 "field": "tags.requests.numeric_values"
451 }
452 },
453 ]
454 }
454 455 },
455 456 },
456 457 },
457 "terms": {"field": "tags.server_name.values.keyword", "size": 999999},
458 "terms": {
459 "field": "tags.server_name.values.keyword",
460 "size": 999999,
461 },
458 462 }
459 463 },
460 464 "query": {
@@ -522,7 +526,11 b' class RequestMetricService(BaseService):'
522 526 "filter": {
523 527 "bool": {
524 528 "filter": [
525 {"terms": {"tags.type.values": [report_type]}},
529 {
530 "terms": {
531 "tags.type.values": [report_type]
532 }
533 },
526 534 {
527 535 "exists": {
528 536 "field": "tags.occurences.numeric_values"
@@ -533,7 +541,10 b' class RequestMetricService(BaseService):'
533 541 },
534 542 }
535 543 },
536 "terms": {"field": "tags.server_name.values.keyword", "size": 999999},
544 "terms": {
545 "field": "tags.server_name.values.keyword",
546 "size": 999999,
547 },
537 548 }
538 549 },
539 550 "query": {
@@ -591,10 +602,10 b' class RequestMetricService(BaseService):'
591 602 server_stats = list(stats.values())
592 603 for stat in server_stats:
593 604 stat["satisfying_requests"] = (
594 stat["requests"]
595 - stat["errors"]
596 - stat["frustrating_requests"]
597 - stat["tolerated_requests"]
605 stat["requests"]
606 - stat["errors"]
607 - stat["frustrating_requests"]
608 - stat["tolerated_requests"]
598 609 )
599 610 if stat["satisfying_requests"] < 0:
600 611 stat["satisfying_requests"] = 0
@@ -604,7 +615,7 b' class RequestMetricService(BaseService):'
604 615 stat["response_time"] / stat["requests"], 3
605 616 )
606 617 qual_requests = (
607 stat["satisfying_requests"] + stat["tolerated_requests"] / 2.0
618 stat["satisfying_requests"] + stat["tolerated_requests"] / 2.0
608 619 )
609 620 stat["apdex"] = round((qual_requests / stat["requests"]) * 100, 2)
610 621 stat["rpm"] = round(stat["requests"] / stat["total_minutes"], 2)
@@ -98,7 +98,10 b' class SlowCallService(BaseService):'
98 98 calls_query = {
99 99 "aggs": {
100 100 "top_calls": {
101 "terms": {"field": "tags.statement_hash.values.keyword", "size": 15},
101 "terms": {
102 "field": "tags.statement_hash.values.keyword",
103 "size": 15,
104 },
102 105 "aggs": {
103 106 "top_calls_hits": {
104 107 "top_hits": {"sort": {"timestamp": "desc"}, "size": 5}
@@ -109,11 +112,7 b' class SlowCallService(BaseService):'
109 112 "query": {
110 113 "bool": {
111 114 "filter": [
112 {
113 "terms": {
114 "resource_id": [filter_settings["resource"][0]]
115 }
116 },
115 {"terms": {"resource_id": [filter_settings["resource"][0]]}},
117 116 {"terms": {"tags.statement_hash.values": hashes}},
118 117 {
119 118 "range": {
@@ -152,13 +152,13 b' def update_template():'
152 152 "mapping": {
153 153 "type": "object",
154 154 "properties": {
155 "values": {"type": "text", "analyzer": "tag_value",
156 "fields": {
157 "keyword": {
158 "type": "keyword",
159 "ignore_above": 256
160 }
161 }},
155 "values": {
156 "type": "text",
157 "analyzer": "tag_value",
158 "fields": {
159 "keyword": {"type": "keyword", "ignore_above": 256}
160 },
161 },
162 162 "numeric_values": {"type": "float"},
163 163 },
164 164 },
@@ -200,13 +200,11 b' def update_template():'
200 200 "fields": {"keyword": {"type": "keyword", "ignore_above": 256}},
201 201 },
202 202 "tags": {"type": "object"},
203 "tag_list": {"type": "text", "analyzer": "tag_value",
204 "fields": {
205 "keyword": {
206 "type": "keyword",
207 "ignore_above": 256
208 }
209 }},
203 "tag_list": {
204 "type": "text",
205 "analyzer": "tag_value",
206 "fields": {"keyword": {"type": "keyword", "ignore_above": 256}},
207 },
210 208 },
211 209 }
212 210
@@ -215,7 +213,7 b' def update_template():'
215 213 "settings": {
216 214 "index": {
217 215 "refresh_interval": "5s",
218 "translog": {"sync_interval": "5s", "durability": "async"}
216 "translog": {"sync_interval": "5s", "durability": "async"},
219 217 },
220 218 "number_of_shards": 5,
221 219 "analysis": shared_analysis,
@@ -240,7 +238,6 b' def update_template():'
240 238 "summed_duration": {"type": "float"},
241 239 "public": {"type": "boolean"},
242 240 # report
243
244 241 "report_id": {"type": "keyword", "index": True},
245 242 "http_status": {"type": "integer"},
246 243 "ip": {"type": "keyword", "index": True},
@@ -252,17 +249,13 b' def update_template():'
252 249 "end_time": {"type": "date"},
253 250 "duration": {"type": "float"},
254 251 "tags": {"type": "object"},
255 "tag_list": {"type": "text", "analyzer": "tag_value",
256 "fields": {
257 "keyword": {
258 "type": "keyword",
259 "ignore_above": 256
260 }
261 }},
252 "tag_list": {
253 "type": "text",
254 "analyzer": "tag_value",
255 "fields": {"keyword": {"type": "keyword", "ignore_above": 256}},
256 },
262 257 "extra": {"type": "object"},
263
264 258 # report stats
265
266 259 "report_stat_id": {"type": "keyword", "index": True},
267 260 "timestamp": {"type": "date"},
268 261 "permanent": {"type": "boolean"},
@@ -272,17 +265,13 b' def update_template():'
272 265 "type": "text",
273 266 "fields": {"keyword": {"type": "keyword", "ignore_above": 256}},
274 267 },
275
276 268 "join_field": {
277 269 "type": "join",
278 "relations": {
279 "report_group": ["report", "report_stat"]
280 }
281 }
282
270 "relations": {"report_group": ["report", "report_stat"]},
271 },
283 272 },
284 273 }
285 }
274 },
286 275 }
287 276
288 277 Datastores.es.indices.put_template("rcae_reports", body=report_schema)
@@ -301,15 +290,15 b' def update_template():'
301 290 "number_of_shards": 5,
302 291 "analysis": shared_analysis,
303 292 },
304 "mappings": {
305 "log": logs_mapping,
306 },
293 "mappings": {"log": logs_mapping},
307 294 }
308 295
309 296 Datastores.es.indices.put_template("rcae_logs", body=log_template)
310 297
311 298 slow_call_mapping = copy.deepcopy(shared_log_mapping)
312 slow_call_mapping["properties"]["slow_call_id"] = slow_call_mapping["properties"]["pg_id"]
299 slow_call_mapping["properties"]["slow_call_id"] = slow_call_mapping["properties"][
300 "pg_id"
301 ]
313 302 del slow_call_mapping["properties"]["pg_id"]
314 303
315 304 slow_call_template = {
@@ -322,9 +311,7 b' def update_template():'
322 311 "number_of_shards": 5,
323 312 "analysis": shared_analysis,
324 313 },
325 "mappings": {
326 "log": slow_call_mapping,
327 },
314 "mappings": {"log": slow_call_mapping},
328 315 }
329 316
330 317 Datastores.es.indices.put_template("rcae_slow_calls", body=slow_call_template)
@@ -343,15 +330,15 b' def update_template():'
343 330 "number_of_shards": 5,
344 331 "analysis": shared_analysis,
345 332 },
346 "mappings": {
347 "log": metric_mapping,
348 },
333 "mappings": {"log": metric_mapping},
349 334 }
350 335
351 336 Datastores.es.indices.put_template("rcae_metrics", body=metrics_template)
352 337
353 338 uptime_metric_mapping = copy.deepcopy(shared_log_mapping)
354 uptime_metric_mapping["properties"]["uptime_id"] = uptime_metric_mapping["properties"]["pg_id"]
339 uptime_metric_mapping["properties"]["uptime_id"] = uptime_metric_mapping[
340 "properties"
341 ]["pg_id"]
355 342 del uptime_metric_mapping["properties"]["pg_id"]
356 343
357 344 uptime_metrics_template = {
@@ -364,12 +351,12 b' def update_template():'
364 351 "number_of_shards": 5,
365 352 "analysis": shared_analysis,
366 353 },
367 "mappings": {
368 "log": shared_log_mapping,
369 },
354 "mappings": {"log": shared_log_mapping},
370 355 }
371 356
372 Datastores.es.indices.put_template("rcae_uptime_metrics", body=uptime_metrics_template)
357 Datastores.es.indices.put_template(
358 "rcae_uptime_metrics", body=uptime_metrics_template
359 )
373 360
374 361
375 362 def reindex_reports():
@@ -146,11 +146,7 b' def common_tags(request):'
146 146
147 147 resources = list(filter_settings["resource"])
148 148 query = {
149 "query": {
150 "bool": {
151 "filter": [{"terms": {"resource_id": list(resources)}}]
152 }
153 }
149 "query": {"bool": {"filter": [{"terms": {"resource_id": list(resources)}}]}}
154 150 }
155 151 start_date = filter_settings.get("start_date")
156 152 end_date = filter_settings.get("end_date")
@@ -199,18 +195,10 b' def common_values(request):'
199 195 resources = list(filter_settings["resource"])
200 196 tag_name = filter_settings["tags"][0]["value"][0]
201 197
202 and_part = [
203 {"terms": {"resource_id": list(resources)}},
204 ]
198 and_part = [{"terms": {"resource_id": list(resources)}}]
205 199 if filter_settings["namespace"]:
206 200 and_part.append({"terms": {"namespace": filter_settings["namespace"]}})
207 query = {
208 "query": {
209 "bool": {
210 "filter": and_part
211 }
212 }
213 }
201 query = {"query": {"bool": {"filter": and_part}}}
214 202 query["aggs"] = {
215 203 "sub_agg": {"terms": {"field": "tags.{}.values".format(tag_name), "size": 50}}
216 204 }
@@ -30,7 +30,7 b' install:'
30 30 - travis_retry pip install -U setuptools pip tox
31 31
32 32 script:
33 - travis_retry tox
33 - travis_retry tox -- -vv
34 34
35 35 services:
36 36 - postgresql
@@ -12,4 +12,4 b' commands='
12 12 pip install -r backend/requirements.txt
13 13 pip install -e backend
14 14 appenlight-reindex-elasticsearch -c testing.ini -t all
15 pytest backend/src/appenlight/tests
15 pytest backend/src/appenlight/tests {posargs}
@@ -26,7 +26,7 b' matrix:'
26 26 install:
27 27 - wget ${ES_DOWNLOAD_URL}
28 28 - tar -xzf elasticsearch-oss-${ES_VERSION}.tar.gz
29 - ./elasticsearch-oss-${ES_VERSION}/bin/elasticsearch &
29 - ./elasticsearch-${ES_VERSION}/bin/elasticsearch &
30 30 - travis_retry pip install -U setuptools pip tox
31 31
32 32 script:
@@ -34,7 +34,6 b' script:'
34 34
35 35 services:
36 36 - postgresql
37 - elasticsearch
38 37 - redis
39 38
40 39 before_script:
@@ -25,8 +25,8 b' matrix:'
25 25
26 26 install:
27 27 - wget ${ES_DOWNLOAD_URL}
28 - tar -xzf elasticsearch-${ES_VERSION}.tar.gz
29 - ./elasticsearch-${ES_VERSION}/bin/elasticsearch &
28 - tar -xzf elasticsearch-oss-${ES_VERSION}.tar.gz
29 - ./elasticsearch-oss-${ES_VERSION}/bin/elasticsearch &
30 30 - travis_retry pip install -U setuptools pip tox
31 31
32 32 script:
@@ -9,13 +9,13 b' notifications:'
9 9 matrix:
10 10 include:
11 11 - python: 3.5
12 env: TOXENV=py35
12 env: TOXENV=py35 ES_VERSION=6.6.2 ES_DOWNLOAD_URL=https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-${ES_VERSION}.tar.gz
13 13 - python: 3.6
14 env: TOXENV=py36
14 env: TOXENV=py36 ES_VERSION=6.6.2 ES_DOWNLOAD_URL=https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-${ES_VERSION}.tar.gz
15 15 addons:
16 16 postgresql: "9.6"
17 17 - python: 3.6
18 env: TOXENV=py36 PGPORT=5432
18 env: TOXENV=py36 PGPORT=5432 ES_VERSION=6.6.2 ES_DOWNLOAD_URL=https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-${ES_VERSION}.tar.gz
19 19 addons:
20 20 postgresql: "10"
21 21 apt:
@@ -24,6 +24,9 b' matrix:'
24 24 - postgresql-client-10
25 25
26 26 install:
27 - wget ${ES_DOWNLOAD_URL}
28 - tar -xzf elasticsearch-${ES_VERSION}.tar.gz
29 - ./elasticsearch-${ES_VERSION}/bin/elasticsearch &
27 30 - travis_retry pip install -U setuptools pip tox
28 31
29 32 script:
@@ -215,8 +215,7 b' def update_template():'
215 215 "settings": {
216 216 "index": {
217 217 "refresh_interval": "5s",
218 "translog": {"sync_interval": "5s", "durability": "async"},
219 "mapping": {"single_type": True}
218 "translog": {"sync_interval": "5s", "durability": "async"}
220 219 },
221 220 "number_of_shards": 5,
222 221 "analysis": shared_analysis,
@@ -298,7 +297,6 b' def update_template():'
298 297 "index": {
299 298 "refresh_interval": "5s",
300 299 "translog": {"sync_interval": "5s", "durability": "async"},
301 "mapping": {"single_type": True}
302 300 },
303 301 "number_of_shards": 5,
304 302 "analysis": shared_analysis,
@@ -320,7 +318,6 b' def update_template():'
320 318 "index": {
321 319 "refresh_interval": "5s",
322 320 "translog": {"sync_interval": "5s", "durability": "async"},
323 "mapping": {"single_type": True}
324 321 },
325 322 "number_of_shards": 5,
326 323 "analysis": shared_analysis,
@@ -342,7 +339,6 b' def update_template():'
342 339 "index": {
343 340 "refresh_interval": "5s",
344 341 "translog": {"sync_interval": "5s", "durability": "async"},
345 "mapping": {"single_type": True}
346 342 },
347 343 "number_of_shards": 5,
348 344 "analysis": shared_analysis,
@@ -364,7 +360,6 b' def update_template():'
364 360 "index": {
365 361 "refresh_interval": "5s",
366 362 "translog": {"sync_interval": "5s", "durability": "async"},
367 "mapping": {"single_type": True}
368 363 },
369 364 "number_of_shards": 5,
370 365 "analysis": shared_analysis,
@@ -208,7 +208,7 b' def es_index_name_limiter('
208 208 elif t == "metrics":
209 209 es_index_types.append("rcae_m_%s")
210 210 elif t == "uptime":
211 es_index_types.append("rcae_u_%s")
211 es_index_types.append("rcae_uptime_ce_%s")
212 212 elif t == "slow_calls":
213 213 es_index_types.append("rcae_sc_%s")
214 214
@@ -239,7 +239,7 b' def add_reports(resource_id, request_params, dataset, **kwargs):'
239 239 @celery.task(queue="es", default_retry_delay=600, max_retries=144)
240 240 def add_reports_es(report_group_docs, report_docs):
241 241 for k, v in report_group_docs.items():
242 to_update = {"_index": k, "_type": "report_group"}
242 to_update = {"_index": k, "_type": "report"}
243 243 [i.update(to_update) for i in v]
244 244 elasticsearch.helpers.bulk(Datastores.es, v)
245 245 for k, v in report_docs.items():
@@ -259,7 +259,7 b' def add_reports_slow_calls_es(es_docs):'
259 259 @celery.task(queue="es", default_retry_delay=600, max_retries=144)
260 260 def add_reports_stats_rows_es(es_docs):
261 261 for k, v in es_docs.items():
262 to_update = {"_index": k, "_type": "log"}
262 to_update = {"_index": k, "_type": "report"}
263 263 [i.update(to_update) for i in v]
264 264 elasticsearch.helpers.bulk(Datastores.es, v)
265 265
@@ -287,7 +287,7 b' def add_logs(resource_id, request_params, dataset, **kwargs):'
287 287 if entry["primary_key"] is None:
288 288 es_docs[log_entry.partition_id].append(log_entry.es_doc())
289 289
290 # 2nd pass to delete all log entries from db foe same pk/ns pair
290 # 2nd pass to delete all log entries from db for same pk/ns pair
291 291 if ns_pairs:
292 292 ids_to_delete = []
293 293 es_docs = collections.defaultdict(list)
@@ -112,7 +112,7 b' class Log(Base, BaseModel):'
112 112 else None,
113 113 }
114 114 return {
115 "pg_id": str(self.log_id),
115 "log_id": str(self.log_id),
116 116 "delete_hash": self.delete_hash,
117 117 "resource_id": self.resource_id,
118 118 "request_id": self.request_id,
@@ -60,6 +60,7 b' class Metric(Base, BaseModel):'
60 60 }
61 61
62 62 return {
63 "metric_id": self.pkey,
63 64 "resource_id": self.resource_id,
64 65 "timestamp": self.timestamp,
65 66 "namespace": self.namespace,
@@ -314,7 +314,7 b' class Report(Base, BaseModel):'
314 314 "bool": {
315 315 "filter": [
316 316 {"term": {"group_id": self.group_id}},
317 {"range": {"pg_id": {"lt": self.id}}},
317 {"range": {"report_id": {"lt": self.id}}},
318 318 ]
319 319 }
320 320 },
@@ -324,7 +324,7 b' class Report(Base, BaseModel):'
324 324 body=query, index=self.partition_id, doc_type="report"
325 325 )
326 326 if result["hits"]["total"]:
327 return result["hits"]["hits"][0]["_source"]["pg_id"]
327 return result["hits"]["hits"][0]["_source"]["report_id"]
328 328
329 329 def get_next_in_group(self, request):
330 330 query = {
@@ -333,7 +333,7 b' class Report(Base, BaseModel):'
333 333 "bool": {
334 334 "filter": [
335 335 {"term": {"group_id": self.group_id}},
336 {"range": {"pg_id": {"gt": self.id}}},
336 {"range": {"report_id": {"gt": self.id}}},
337 337 ]
338 338 }
339 339 },
@@ -343,7 +343,7 b' class Report(Base, BaseModel):'
343 343 body=query, index=self.partition_id, doc_type="report"
344 344 )
345 345 if result["hits"]["total"]:
346 return result["hits"]["hits"][0]["_source"]["pg_id"]
346 return result["hits"]["hits"][0]["_source"]["report_id"]
347 347
348 348 def get_public_url(self, request=None, report_group=None, _app_url=None):
349 349 """
@@ -469,7 +469,7 b' class Report(Base, BaseModel):'
469 469 tags["user_name"] = {"value": [self.username], "numeric_value": None}
470 470 return {
471 471 "_id": str(self.id),
472 "pg_id": str(self.id),
472 "report_id": str(self.id),
473 473 "resource_id": self.resource_id,
474 474 "http_status": self.http_status or "",
475 475 "start_time": self.start_time,
@@ -482,9 +482,14 b' class Report(Base, BaseModel):'
482 482 "request_id": self.request_id,
483 483 "ip": self.ip,
484 484 "group_id": str(self.group_id),
485 "_parent": str(self.group_id),
485 "type": "report",
486 "join_field": {
487 "name": "report",
488 "parent": str(self.group_id)
489 },
486 490 "tags": tags,
487 491 "tag_list": tag_list,
492 "_routing": str(self.group_id)
488 493 }
489 494
490 495 @property
@@ -518,7 +523,7 b' def after_update(mapper, connection, target):'
518 523
519 524 def after_delete(mapper, connection, target):
520 525 if not hasattr(target, "_skip_ft_index"):
521 query = {"query": {"term": {"pg_id": target.id}}}
526 query = {"query": {"term": {"report_id": target.id}}}
522 527 Datastores.es.delete_by_query(
523 528 index=target.partition_id, doc_type="report", body=query, conflicts="proceed"
524 529 )
@@ -178,7 +178,7 b' class ReportGroup(Base, BaseModel):'
178 178 def es_doc(self):
179 179 return {
180 180 "_id": str(self.id),
181 "pg_id": str(self.id),
181 "group_id": str(self.id),
182 182 "resource_id": self.resource_id,
183 183 "error": self.error,
184 184 "fixed": self.fixed,
@@ -190,6 +190,10 b' class ReportGroup(Base, BaseModel):'
190 190 "summed_duration": self.summed_duration,
191 191 "first_timestamp": self.first_timestamp,
192 192 "last_timestamp": self.last_timestamp,
193 "type": "report_group",
194 "join_field": {
195 "name": "report_group"
196 },
193 197 }
194 198
195 199 def set_notification_info(self, notify_10=False, notify_100=False):
@@ -258,14 +262,14 b' def after_insert(mapper, connection, target):'
258 262 if not hasattr(target, "_skip_ft_index"):
259 263 data = target.es_doc()
260 264 data.pop("_id", None)
261 Datastores.es.index(target.partition_id, "report_group", data, id=target.id)
265 Datastores.es.index(target.partition_id, "report", data, id=target.id)
262 266
263 267
264 268 def after_update(mapper, connection, target):
265 269 if not hasattr(target, "_skip_ft_index"):
266 270 data = target.es_doc()
267 271 data.pop("_id", None)
268 Datastores.es.index(target.partition_id, "report_group", data, id=target.id)
272 Datastores.es.index(target.partition_id, "report", data, id=target.id)
269 273
270 274
271 275 def after_delete(mapper, connection, target):
@@ -274,10 +278,6 b' def after_delete(mapper, connection, target):'
274 278 Datastores.es.delete_by_query(
275 279 index=target.partition_id, doc_type="report", body=query, conflicts="proceed"
276 280 )
277 query = {"query": {"term": {"pg_id": target.id}}}
278 Datastores.es.delete_by_query(
279 index=target.partition_id, doc_type="report_group", body=query, conflicts="proceed"
280 )
281 281
282 282
283 283 sa.event.listen(ReportGroup, "after_insert", after_insert)
@@ -48,12 +48,13 b' class ReportStat(Base, BaseModel):'
48 48 return {
49 49 "resource_id": self.resource_id,
50 50 "timestamp": self.start_interval,
51 "pg_id": str(self.id),
51 "report_stat_id": str(self.id),
52 52 "permanent": True,
53 53 "request_id": None,
54 54 "log_level": "ERROR",
55 55 "message": None,
56 56 "namespace": "appenlight.error",
57 "group_id": str(self.group_id),
57 58 "tags": {
58 59 "duration": {"values": self.duration, "numeric_values": self.duration},
59 60 "occurences": {
@@ -76,4 +77,5 b' class ReportStat(Base, BaseModel):'
76 77 "server_name",
77 78 "view_name",
78 79 ],
80 "type": "report_stat",
79 81 }
@@ -190,7 +190,7 b' class LogService(BaseService):'
190 190 [], item_count=item_count, items_per_page=items_per_page, **filter_settings
191 191 )
192 192 ordered_ids = tuple(
193 item["_source"]["pg_id"] for item in results.get("hits", [])
193 item["_source"]["log_id"] for item in results.get("hits", [])
194 194 )
195 195
196 196 sorted_instance_list = []
@@ -97,7 +97,7 b' class ReportGroupService(BaseService):'
97 97 es_query["query"]["bool"]["filter"].extend(tags)
98 98
99 99 result = Datastores.es.search(
100 body=es_query, index=index_names, doc_type="log", size=0
100 body=es_query, index=index_names, doc_type="report", size=0
101 101 )
102 102 series = []
103 103 for bucket in result["aggregations"]["parent_agg"]["buckets"]:
@@ -143,7 +143,7 b' class ReportGroupService(BaseService):'
143 143 "top_groups": {
144 144 "terms": {
145 145 "size": 5000,
146 "field": "_parent#report_group",
146 "field": "join_field#report_group",
147 147 "order": {"newest": "desc"},
148 148 },
149 149 "aggs": {
@@ -315,7 +315,7 b' class ReportGroupService(BaseService):'
315 315 ordered_ids = []
316 316 if results:
317 317 for item in results["top_groups"]["buckets"]:
318 pg_id = item["top_reports_hits"]["hits"]["hits"][0]["_source"]["pg_id"]
318 pg_id = item["top_reports_hits"]["hits"]["hits"][0]["_source"]["report_id"]
319 319 ordered_ids.append(pg_id)
320 320 log.info(filter_settings)
321 321 paginator = paginate.Page(
@@ -340,7 +340,7 b' class RequestMetricService(BaseService):'
340 340 for hit in bucket["top_calls_hits"]["hits"]["hits"]:
341 341 details[bucket["key"]].append(
342 342 {
343 "report_id": hit["_source"]["pg_id"],
343 "report_id": hit["_source"]["request_metric_id"],
344 344 "group_id": hit["_source"]["group_id"],
345 345 }
346 346 )
@@ -88,7 +88,7 b' class SlowCall(Base, BaseModel):'
88 88 doc = {
89 89 "resource_id": self.resource_id,
90 90 "timestamp": self.timestamp,
91 "pg_id": str(self.id),
91 "slow_call_id": str(self.id),
92 92 "permanent": False,
93 93 "request_id": None,
94 94 "log_level": "UNKNOWN",
@@ -17,6 +17,7 b''
17 17 import argparse
18 18 import datetime
19 19 import logging
20 import copy
20 21
21 22 import sqlalchemy as sa
22 23 import elasticsearch.exceptions
@@ -34,7 +35,6 b' from appenlight.models.log import Log'
34 35 from appenlight.models.slow_call import SlowCall
35 36 from appenlight.models.metric import Metric
36 37
37
38 38 log = logging.getLogger(__name__)
39 39
40 40 tables = {
@@ -128,7 +128,20 b' def main():'
128 128
129 129 def update_template():
130 130 try:
131 Datastores.es.indices.delete_template("rcae")
131 Datastores.es.indices.delete_template("rcae_reports")
132 except elasticsearch.exceptions.NotFoundError as e:
133 log.error(e)
134
135 try:
136 Datastores.es.indices.delete_template("rcae_logs")
137 except elasticsearch.exceptions.NotFoundError as e:
138 log.error(e)
139 try:
140 Datastores.es.indices.delete_template("rcae_slow_calls")
141 except elasticsearch.exceptions.NotFoundError as e:
142 log.error(e)
143 try:
144 Datastores.es.indices.delete_template("rcae_metrics")
132 145 except elasticsearch.exceptions.NotFoundError as e:
133 146 log.error(e)
134 147 log.info("updating elasticsearch template")
@@ -153,37 +166,69 b' def update_template():'
153 166 }
154 167 ]
155 168
156 template_schema = {
157 "template": "rcae_*",
169 shared_analysis = {
170 "analyzer": {
171 "url_path": {
172 "type": "custom",
173 "char_filter": [],
174 "tokenizer": "path_hierarchy",
175 "filter": [],
176 },
177 "tag_value": {
178 "type": "custom",
179 "char_filter": [],
180 "tokenizer": "keyword",
181 "filter": ["lowercase"],
182 },
183 }
184 }
185
186 shared_log_mapping = {
187 "_all": {"enabled": False},
188 "dynamic_templates": tag_templates,
189 "properties": {
190 "pg_id": {"type": "keyword", "index": True},
191 "delete_hash": {"type": "keyword", "index": True},
192 "resource_id": {"type": "integer"},
193 "timestamp": {"type": "date"},
194 "permanent": {"type": "boolean"},
195 "request_id": {"type": "keyword", "index": True},
196 "log_level": {"type": "text", "analyzer": "simple"},
197 "message": {"type": "text", "analyzer": "simple"},
198 "namespace": {
199 "type": "text",
200 "fields": {"keyword": {"type": "keyword", "ignore_above": 256}},
201 },
202 "tags": {"type": "object"},
203 "tag_list": {"type": "text", "analyzer": "tag_value",
204 "fields": {
205 "keyword": {
206 "type": "keyword",
207 "ignore_above": 256
208 }
209 }},
210 },
211 }
212
213 report_schema = {
214 "template": "rcae_r_*",
158 215 "settings": {
159 216 "index": {
160 217 "refresh_interval": "5s",
161 218 "translog": {"sync_interval": "5s", "durability": "async"},
219 "mapping": {"single_type": True}
162 220 },
163 221 "number_of_shards": 5,
164 "analysis": {
165 "analyzer": {
166 "url_path": {
167 "type": "custom",
168 "char_filter": [],
169 "tokenizer": "path_hierarchy",
170 "filter": [],
171 },
172 "tag_value": {
173 "type": "custom",
174 "char_filter": [],
175 "tokenizer": "keyword",
176 "filter": ["lowercase"],
177 },
178 }
179 },
222 "analysis": shared_analysis,
180 223 },
181 224 "mappings": {
182 "report_group": {
225 "report": {
183 226 "_all": {"enabled": False},
184 227 "dynamic_templates": tag_templates,
185 228 "properties": {
186 "pg_id": {"type": "keyword", "index": True},
229 "type": {"type": "keyword", "index": True},
230 # report group
231 "group_id": {"type": "keyword", "index": True},
187 232 "resource_id": {"type": "integer"},
188 233 "priority": {"type": "integer"},
189 234 "error": {"type": "text", "analyzer": "simple"},
@@ -195,20 +240,13 b' def update_template():'
195 240 "average_duration": {"type": "float"},
196 241 "summed_duration": {"type": "float"},
197 242 "public": {"type": "boolean"},
198 },
199 },
200 "report": {
201 "_all": {"enabled": False},
202 "dynamic_templates": tag_templates,
203 "properties": {
204 "pg_id": {"type": "keyword", "index": True},
205 "resource_id": {"type": "integer"},
206 "group_id": {"type": "keyword"},
243 # report
244
245 "report_id": {"type": "keyword", "index": True},
207 246 "http_status": {"type": "integer"},
208 247 "ip": {"type": "keyword", "index": True},
209 248 "url_domain": {"type": "text", "analyzer": "simple"},
210 249 "url_path": {"type": "text", "analyzer": "url_path"},
211 "error": {"type": "text", "analyzer": "simple"},
212 250 "report_type": {"type": "integer"},
213 251 "start_time": {"type": "date"},
214 252 "request_id": {"type": "keyword", "index": True},
@@ -223,45 +261,126 b' def update_template():'
223 261 }
224 262 }},
225 263 "extra": {"type": "object"},
226 },
227 "_parent": {"type": "report_group"},
228 },
229 "log": {
230 "_all": {"enabled": False},
231 "dynamic_templates": tag_templates,
232 "properties": {
233 "pg_id": {"type": "keyword", "index": True},
234 "delete_hash": {"type": "keyword", "index": True},
235 "resource_id": {"type": "integer"},
264
265 # report stats
266
267 "report_stat_id": {"type": "keyword", "index": True},
236 268 "timestamp": {"type": "date"},
237 269 "permanent": {"type": "boolean"},
238 "request_id": {"type": "keyword", "index": True},
239 270 "log_level": {"type": "text", "analyzer": "simple"},
240 271 "message": {"type": "text", "analyzer": "simple"},
241 272 "namespace": {
242 273 "type": "text",
243 274 "fields": {"keyword": {"type": "keyword", "ignore_above": 256}},
244 275 },
245 "tags": {"type": "object"},
246 "tag_list": {"type": "text", "analyzer": "tag_value",
247 "fields": {
248 "keyword": {
249 "type": "keyword",
250 "ignore_above": 256
251 }
252 }},
276
277 "join_field": {
278 "type": "join",
279 "relations": {
280 "report_group": ["report", "report_stat"]
281 }
282 }
283
253 284 },
285 }
286 }
287 }
288
289 Datastores.es.indices.put_template("rcae_reports", body=report_schema)
290
291 logs_mapping = copy.deepcopy(shared_log_mapping)
292 logs_mapping["properties"]["log_id"] = logs_mapping["properties"]["pg_id"]
293 del logs_mapping["properties"]["pg_id"]
294
295 log_template = {
296 "template": "rcae_l_*",
297 "settings": {
298 "index": {
299 "refresh_interval": "5s",
300 "translog": {"sync_interval": "5s", "durability": "async"},
301 "mapping": {"single_type": True}
254 302 },
303 "number_of_shards": 5,
304 "analysis": shared_analysis,
305 },
306 "mappings": {
307 "log": logs_mapping,
255 308 },
256 309 }
257 310
258 Datastores.es.indices.put_template("rcae", body=template_schema)
311 Datastores.es.indices.put_template("rcae_logs", body=log_template)
312
313 slow_call_mapping = copy.deepcopy(shared_log_mapping)
314 slow_call_mapping["properties"]["slow_call_id"] = slow_call_mapping["properties"]["pg_id"]
315 del slow_call_mapping["properties"]["pg_id"]
316
317 slow_call_template = {
318 "template": "rcae_sc_*",
319 "settings": {
320 "index": {
321 "refresh_interval": "5s",
322 "translog": {"sync_interval": "5s", "durability": "async"},
323 "mapping": {"single_type": True}
324 },
325 "number_of_shards": 5,
326 "analysis": shared_analysis,
327 },
328 "mappings": {
329 "log": slow_call_mapping,
330 },
331 }
332
333 Datastores.es.indices.put_template("rcae_slow_calls", body=slow_call_template)
334
335 metric_mapping = copy.deepcopy(shared_log_mapping)
336 metric_mapping["properties"]["metric_id"] = metric_mapping["properties"]["pg_id"]
337 del metric_mapping["properties"]["pg_id"]
338
339 metrics_template = {
340 "template": "rcae_m_*",
341 "settings": {
342 "index": {
343 "refresh_interval": "5s",
344 "translog": {"sync_interval": "5s", "durability": "async"},
345 "mapping": {"single_type": True}
346 },
347 "number_of_shards": 5,
348 "analysis": shared_analysis,
349 },
350 "mappings": {
351 "log": metric_mapping,
352 },
353 }
354
355 Datastores.es.indices.put_template("rcae_metrics", body=metrics_template)
356
357 uptime_metric_mapping = copy.deepcopy(shared_log_mapping)
358 uptime_metric_mapping["properties"]["uptime_id"] = uptime_metric_mapping["properties"]["pg_id"]
359 del uptime_metric_mapping["properties"]["pg_id"]
360
361 uptime_metrics_template = {
362 "template": "rcae_uptime_ce_*",
363 "settings": {
364 "index": {
365 "refresh_interval": "5s",
366 "translog": {"sync_interval": "5s", "durability": "async"},
367 "mapping": {"single_type": True}
368 },
369 "number_of_shards": 5,
370 "analysis": shared_analysis,
371 },
372 "mappings": {
373 "log": shared_log_mapping,
374 },
375 }
376
377 Datastores.es.indices.put_template("rcae_uptime_metrics", body=uptime_metrics_template)
259 378
260 379
261 380 def reindex_reports():
262 381 reports_groups_tables = detect_tables("reports_groups_p_")
263 382 try:
264 Datastores.es.indices.delete("rcae_r*")
383 Datastores.es.indices.delete("`rcae_r_*")
265 384 except elasticsearch.exceptions.NotFoundError as e:
266 385 log.error(e)
267 386
@@ -285,7 +404,7 b' def reindex_reports():'
285 404 name = partition_table.name
286 405 log.info("round {}, {}".format(i, name))
287 406 for k, v in es_docs.items():
288 to_update = {"_index": k, "_type": "report_group"}
407 to_update = {"_index": k, "_type": "report"}
289 408 [i.update(to_update) for i in v]
290 409 elasticsearch.helpers.bulk(Datastores.es, v)
291 410
@@ -343,7 +462,7 b' def reindex_reports():'
343 462 name = partition_table.name
344 463 log.info("round {}, {}".format(i, name))
345 464 for k, v in es_docs.items():
346 to_update = {"_index": k, "_type": "log"}
465 to_update = {"_index": k, "_type": "report"}
347 466 [i.update(to_update) for i in v]
348 467 elasticsearch.helpers.bulk(Datastores.es, v)
349 468
@@ -352,7 +471,7 b' def reindex_reports():'
352 471
353 472 def reindex_logs():
354 473 try:
355 Datastores.es.indices.delete("rcae_l*")
474 Datastores.es.indices.delete("rcae_l_*")
356 475 except elasticsearch.exceptions.NotFoundError as e:
357 476 log.error(e)
358 477
@@ -388,7 +507,7 b' def reindex_logs():'
388 507
389 508 def reindex_metrics():
390 509 try:
391 Datastores.es.indices.delete("rcae_m*")
510 Datastores.es.indices.delete("rcae_m_*")
392 511 except elasticsearch.exceptions.NotFoundError as e:
393 512 log.error(e)
394 513
@@ -422,7 +541,7 b' def reindex_metrics():'
422 541
423 542 def reindex_slow_calls():
424 543 try:
425 Datastores.es.indices.delete("rcae_sc*")
544 Datastores.es.indices.delete("rcae_sc_*")
426 545 except elasticsearch.exceptions.NotFoundError as e:
427 546 log.error(e)
428 547
@@ -31,7 +31,7 b' class ReportStatService(BaseService):'
31 31 "aggs": {
32 32 "reports": {
33 33 "aggs": {
34 "sub_agg": {"value_count": {"field": "tags.group_id.values"}}
34 "sub_agg": {"value_count": {"field": "tags.group_id.values.keyword"}}
35 35 },
36 36 "filter": {
37 37 "bool": {
@@ -36,7 +36,7 b' pygments==2.3.1'
36 36 lxml==4.3.2
37 37 paginate==0.5.6
38 38 paginate-sqlalchemy==0.3.0
39 elasticsearch>=2.0.0,<3.0.0
39 elasticsearch>=5.0.0,<6.0.0
40 40 mock==1.0.1
41 41 itsdangerous==1.1.0
42 42 camplight==0.9.6
@@ -325,10 +325,9 b' def add_logs(resource_id, request_params, dataset, **kwargs):'
325 325 query = {"query": {"terms": {"delete_hash": batch}}}
326 326
327 327 try:
328 Datastores.es.transport.perform_request(
329 "DELETE",
330 "/{}/{}/_query".format(es_index, "log"),
331 body=query,
328 Datastores.es.delete_by_query(
329 index=es_index, doc_type="log",
330 body=query, conflicts="proceed"
332 331 )
333 332 except elasticsearch.exceptions.NotFoundError as exc:
334 333 msg = "skipping index {}".format(es_index)
@@ -703,6 +702,6 b' def logs_cleanup(resource_id, filter_settings):'
703 702 )
704 703 query.delete(synchronize_session=False)
705 704 request.tm.commit()
706 Datastores.es.transport.perform_request(
707 "DELETE", "/{}/{}/_query".format("rcae_l_*", "log"), body=es_query
705 Datastores.es.delete_by_query(
706 index="rcae_l_*", doc_type="log", body=es_query, conflicts="proceed"
708 707 )
@@ -519,8 +519,8 b' def after_update(mapper, connection, target):'
519 519 def after_delete(mapper, connection, target):
520 520 if not hasattr(target, "_skip_ft_index"):
521 521 query = {"query": {"term": {"pg_id": target.id}}}
522 Datastores.es.transport.perform_request(
523 "DELETE", "/{}/{}/_query".format(target.partition_id, "report"), body=query
522 Datastores.es.delete_by_query(
523 index=target.partition_id, doc_type="report", body=query, conflicts="proceed"
524 524 )
525 525
526 526
@@ -271,14 +271,12 b' def after_update(mapper, connection, target):'
271 271 def after_delete(mapper, connection, target):
272 272 query = {"query": {"term": {"group_id": target.id}}}
273 273 # delete by query
274 Datastores.es.transport.perform_request(
275 "DELETE", "/{}/{}/_query".format(target.partition_id, "report"), body=query
274 Datastores.es.delete_by_query(
275 index=target.partition_id, doc_type="report", body=query, conflicts="proceed"
276 276 )
277 277 query = {"query": {"term": {"pg_id": target.id}}}
278 Datastores.es.transport.perform_request(
279 "DELETE",
280 "/{}/{}/_query".format(target.partition_id, "report_group"),
281 body=query,
278 Datastores.es.delete_by_query(
279 index=target.partition_id, doc_type="report_group", body=query, conflicts="proceed"
282 280 )
283 281
284 282
@@ -64,13 +64,13 b' class ReportGroupService(BaseService):'
64 64 "groups": {
65 65 "aggs": {
66 66 "sub_agg": {
67 "value_count": {"field": "tags.group_id.values"}
67 "value_count": {"field": "tags.group_id.values.keyword"}
68 68 }
69 69 },
70 70 "filter": {"exists": {"field": "tags.group_id.values"}},
71 71 }
72 72 },
73 "terms": {"field": "tags.group_id.values", "size": limit},
73 "terms": {"field": "tags.group_id.values.keyword", "size": limit},
74 74 }
75 75 },
76 76 "query": {
@@ -143,7 +143,7 b' class ReportGroupService(BaseService):'
143 143 "top_groups": {
144 144 "terms": {
145 145 "size": 5000,
146 "field": "_parent",
146 "field": "_parent#report_group",
147 147 "order": {"newest": "desc"},
148 148 },
149 149 "aggs": {
@@ -445,10 +445,12 b' class ReportGroupService(BaseService):'
445 445 "aggs": {
446 446 "types": {
447 447 "aggs": {
448 "sub_agg": {"terms": {"field": "tags.type.values"}}
448 "sub_agg": {"terms": {"field": "tags.type.values.keyword"}}
449 449 },
450 450 "filter": {
451 "and": [{"exists": {"field": "tags.type.values"}}]
451 "bool": {
452 "filter": [{"exists": {"field": "tags.type.values"}}]
453 }
452 454 },
453 455 }
454 456 },
@@ -485,7 +487,7 b' class ReportGroupService(BaseService):'
485 487 }
486 488 if group_id:
487 489 parent_agg = es_query["aggs"]["parent_agg"]
488 filters = parent_agg["aggs"]["types"]["filter"]["and"]
490 filters = parent_agg["aggs"]["types"]["filter"]["bool"]["filter"]
489 491 filters.append({"terms": {"tags.group_id.values": [group_id]}})
490 492
491 493 index_names = es_index_name_limiter(
@@ -34,10 +34,12 b' class ReportStatService(BaseService):'
34 34 "sub_agg": {"value_count": {"field": "tags.group_id.values"}}
35 35 },
36 36 "filter": {
37 "and": [
38 {"terms": {"resource_id": [resource_id]}},
39 {"exists": {"field": "tags.group_id.values"}},
40 ]
37 "bool": {
38 "filter": [
39 {"terms": {"resource_id": [resource_id]}},
40 {"exists": {"field": "tags.group_id.values"}},
41 ]
42 }
41 43 },
42 44 }
43 45 },
@@ -235,6 +235,8 b' class RequestMetricService(BaseService):'
235 235 script_text = "doc['tags.main.numeric_values'].value / {}".format(
236 236 total_time_spent
237 237 )
238 if total_time_spent == 0:
239 script_text = '0'
238 240
239 241 if index_names and filter_settings["resource"]:
240 242 es_query = {
@@ -255,7 +257,6 b' class RequestMetricService(BaseService):'
255 257 "aggs": {
256 258 "sub_agg": {
257 259 "sum": {
258 "lang": "expression",
259 260 "script": script_text,
260 261 }
261 262 }
@@ -276,7 +277,7 b' class RequestMetricService(BaseService):'
276 277 },
277 278 },
278 279 "terms": {
279 "field": "tags.view_name.values",
280 "field": "tags.view_name.values.keyword",
280 281 "order": {"percentage>sub_agg": "desc"},
281 282 "size": 15,
282 283 },
@@ -317,7 +318,7 b' class RequestMetricService(BaseService):'
317 318 query = {
318 319 "aggs": {
319 320 "top_reports": {
320 "terms": {"field": "tags.view_name.values", "size": len(series)},
321 "terms": {"field": "tags.view_name.values.keyword", "size": len(series)},
321 322 "aggs": {
322 323 "top_calls_hits": {
323 324 "top_hits": {"sort": {"start_time": "desc"}, "size": 5}
@@ -390,18 +391,20 b' class RequestMetricService(BaseService):'
390 391 }
391 392 },
392 393 "filter": {
393 "and": [
394 {
395 "range": {
396 "tags.main.numeric_values": {"gte": "4"}
397 }
398 },
399 {
400 "exists": {
401 "field": "tags.requests.numeric_values"
402 }
403 },
404 ]
394 "bool": {
395 "filter": [
396 {
397 "range": {
398 "tags.main.numeric_values": {"gte": "4"}
399 }
400 },
401 {
402 "exists": {
403 "field": "tags.requests.numeric_values"
404 }
405 },
406 ]
407 }
405 408 },
406 409 },
407 410 "main": {
@@ -431,7 +434,7 b' class RequestMetricService(BaseService):'
431 434 }
432 435 },
433 436 "filter": {
434 "and": [
437 "bool": {"filter": [
435 438 {
436 439 "range": {
437 440 "tags.main.numeric_values": {"gte": "1"}
@@ -447,11 +450,11 b' class RequestMetricService(BaseService):'
447 450 "field": "tags.requests.numeric_values"
448 451 }
449 452 },
450 ]
453 ]}
451 454 },
452 455 },
453 456 },
454 "terms": {"field": "tags.server_name.values", "size": 999999},
457 "terms": {"field": "tags.server_name.values.keyword", "size": 999999},
455 458 }
456 459 },
457 460 "query": {
@@ -517,18 +520,20 b' class RequestMetricService(BaseService):'
517 520 }
518 521 },
519 522 "filter": {
520 "and": [
521 {"terms": {"tags.type.values": [report_type]}},
522 {
523 "exists": {
524 "field": "tags.occurences.numeric_values"
525 }
526 },
527 ]
523 "bool": {
524 "filter": [
525 {"terms": {"tags.type.values": [report_type]}},
526 {
527 "exists": {
528 "field": "tags.occurences.numeric_values"
529 }
530 },
531 ]
532 }
528 533 },
529 534 }
530 535 },
531 "terms": {"field": "tags.server_name.values", "size": 999999},
536 "terms": {"field": "tags.server_name.values.keyword", "size": 999999},
532 537 }
533 538 },
534 539 "query": {
@@ -586,10 +591,10 b' class RequestMetricService(BaseService):'
586 591 server_stats = list(stats.values())
587 592 for stat in server_stats:
588 593 stat["satisfying_requests"] = (
589 stat["requests"]
590 - stat["errors"]
591 - stat["frustrating_requests"]
592 - stat["tolerated_requests"]
594 stat["requests"]
595 - stat["errors"]
596 - stat["frustrating_requests"]
597 - stat["tolerated_requests"]
593 598 )
594 599 if stat["satisfying_requests"] < 0:
595 600 stat["satisfying_requests"] = 0
@@ -599,7 +604,7 b' class RequestMetricService(BaseService):'
599 604 stat["response_time"] / stat["requests"], 3
600 605 )
601 606 qual_requests = (
602 stat["satisfying_requests"] + stat["tolerated_requests"] / 2.0
607 stat["satisfying_requests"] + stat["tolerated_requests"] / 2.0
603 608 )
604 609 stat["apdex"] = round((qual_requests / stat["requests"]) * 100, 2)
605 610 stat["rpm"] = round(stat["requests"] / stat["total_minutes"], 2)
@@ -50,7 +50,7 b' class SlowCallService(BaseService):'
50 50 "aggs": {
51 51 "sub_agg": {
52 52 "value_count": {
53 "field": "tags.statement_hash.values"
53 "field": "tags.statement_hash.values.keyword"
54 54 }
55 55 }
56 56 },
@@ -60,7 +60,7 b' class SlowCallService(BaseService):'
60 60 },
61 61 },
62 62 "terms": {
63 "field": "tags.statement_hash.values",
63 "field": "tags.statement_hash.values.keyword",
64 64 "order": {"duration>sub_agg": "desc"},
65 65 "size": 15,
66 66 },
@@ -98,7 +98,7 b' class SlowCallService(BaseService):'
98 98 calls_query = {
99 99 "aggs": {
100 100 "top_calls": {
101 "terms": {"field": "tags.statement_hash.values", "size": 15},
101 "terms": {"field": "tags.statement_hash.values.keyword", "size": 15},
102 102 "aggs": {
103 103 "top_calls_hits": {
104 104 "top_hits": {"sort": {"timestamp": "desc"}, "size": 5}
@@ -139,7 +139,13 b' def update_template():'
139 139 "mapping": {
140 140 "type": "object",
141 141 "properties": {
142 "values": {"type": "string", "analyzer": "tag_value"},
142 "values": {"type": "text", "analyzer": "tag_value",
143 "fields": {
144 "keyword": {
145 "type": "keyword",
146 "ignore_above": 256
147 }
148 }},
143 149 "numeric_values": {"type": "float"},
144 150 },
145 151 },
@@ -177,10 +183,10 b' def update_template():'
177 183 "_all": {"enabled": False},
178 184 "dynamic_templates": tag_templates,
179 185 "properties": {
180 "pg_id": {"type": "string", "index": "not_analyzed"},
186 "pg_id": {"type": "keyword", "index": True},
181 187 "resource_id": {"type": "integer"},
182 188 "priority": {"type": "integer"},
183 "error": {"type": "string", "analyzer": "simple"},
189 "error": {"type": "text", "analyzer": "simple"},
184 190 "read": {"type": "boolean"},
185 191 "occurences": {"type": "integer"},
186 192 "fixed": {"type": "boolean"},
@@ -195,21 +201,27 b' def update_template():'
195 201 "_all": {"enabled": False},
196 202 "dynamic_templates": tag_templates,
197 203 "properties": {
198 "pg_id": {"type": "string", "index": "not_analyzed"},
204 "pg_id": {"type": "keyword", "index": True},
199 205 "resource_id": {"type": "integer"},
200 "group_id": {"type": "string"},
206 "group_id": {"type": "keyword"},
201 207 "http_status": {"type": "integer"},
202 "ip": {"type": "string", "index": "not_analyzed"},
203 "url_domain": {"type": "string", "analyzer": "simple"},
204 "url_path": {"type": "string", "analyzer": "url_path"},
205 "error": {"type": "string", "analyzer": "simple"},
208 "ip": {"type": "keyword", "index": True},
209 "url_domain": {"type": "text", "analyzer": "simple"},
210 "url_path": {"type": "text", "analyzer": "url_path"},
211 "error": {"type": "text", "analyzer": "simple"},
206 212 "report_type": {"type": "integer"},
207 213 "start_time": {"type": "date"},
208 "request_id": {"type": "string", "index": "not_analyzed"},
214 "request_id": {"type": "keyword", "index": True},
209 215 "end_time": {"type": "date"},
210 216 "duration": {"type": "float"},
211 217 "tags": {"type": "object"},
212 "tag_list": {"type": "string", "analyzer": "tag_value"},
218 "tag_list": {"type": "text", "analyzer": "tag_value",
219 "fields": {
220 "keyword": {
221 "type": "keyword",
222 "ignore_above": 256
223 }
224 }},
213 225 "extra": {"type": "object"},
214 226 },
215 227 "_parent": {"type": "report_group"},
@@ -218,17 +230,26 b' def update_template():'
218 230 "_all": {"enabled": False},
219 231 "dynamic_templates": tag_templates,
220 232 "properties": {
221 "pg_id": {"type": "string", "index": "not_analyzed"},
222 "delete_hash": {"type": "string", "index": "not_analyzed"},
233 "pg_id": {"type": "keyword", "index": True},
234 "delete_hash": {"type": "keyword", "index": True},
223 235 "resource_id": {"type": "integer"},
224 236 "timestamp": {"type": "date"},
225 237 "permanent": {"type": "boolean"},
226 "request_id": {"type": "string", "index": "not_analyzed"},
227 "log_level": {"type": "string", "analyzer": "simple"},
228 "message": {"type": "string", "analyzer": "simple"},
229 "namespace": {"type": "string", "index": "not_analyzed"},
238 "request_id": {"type": "keyword", "index": True},
239 "log_level": {"type": "text", "analyzer": "simple"},
240 "message": {"type": "text", "analyzer": "simple"},
241 "namespace": {
242 "type": "text",
243 "fields": {"keyword": {"type": "keyword", "ignore_above": 256}},
244 },
230 245 "tags": {"type": "object"},
231 "tag_list": {"type": "string", "analyzer": "tag_value"},
246 "tag_list": {"type": "text", "analyzer": "tag_value",
247 "fields": {
248 "keyword": {
249 "type": "keyword",
250 "ignore_above": 256
251 }
252 }},
232 253 },
233 254 },
234 255 },
@@ -171,13 +171,13 b' def common_tags(request):'
171 171 if namespaces:
172 172 filter_part.append({"terms": {"namespace": namespaces}})
173 173
174 query["aggs"] = {"sub_agg": {"terms": {"field": "tag_list", "size": 50}}}
174 query["aggs"] = {"sub_agg": {"terms": {"field": "tag_list.keyword", "size": 50}}}
175 175 # tags
176 176 index_names = es_index_name_limiter(ixtypes=[config.get("datasource", "logs")])
177 177 result = Datastores.es.search(body=query, index=index_names, doc_type="log", size=0)
178 178 tag_buckets = result["aggregations"]["sub_agg"].get("buckets", [])
179 179 # namespaces
180 query["aggs"] = {"sub_agg": {"terms": {"field": "namespace", "size": 50}}}
180 query["aggs"] = {"sub_agg": {"terms": {"field": "namespace.keyword", "size": 50}}}
181 181 result = Datastores.es.search(body=query, index=index_names, doc_type="log", size=0)
182 182 namespaces_buckets = result["aggregations"]["sub_agg"].get("buckets", [])
183 183 return {
@@ -181,7 +181,7 b' class Report(Base, BaseModel):'
181 181 request_data = data.get("request", {})
182 182
183 183 self.request = request_data
184 self.request_stats = data.get("request_stats", {})
184 self.request_stats = data.get("request_stats") or {}
185 185 traceback = data.get("traceback")
186 186 if not traceback:
187 187 traceback = data.get("frameinfo")
General Comments 1
Under Review
author

Auto status change to "Under Review"

You need to be logged in to leave comments. Login now

Merge is not currently possible because of below failed checks.

  • - User `default` not allowed to perform merge.
  • - Pull request reviewer approval is pending.