Show More
@@ -1,265 +1,267 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 |
|
2 | |||
3 | # Copyright (C) 2010-2016 RhodeCode GmbH |
|
3 | # Copyright (C) 2010-2016 RhodeCode GmbH | |
4 | # |
|
4 | # | |
5 | # This program is free software: you can redistribute it and/or modify |
|
5 | # This program is free software: you can redistribute it and/or modify | |
6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
6 | # it under the terms of the GNU Affero General Public License, version 3 | |
7 | # (only), as published by the Free Software Foundation. |
|
7 | # (only), as published by the Free Software Foundation. | |
8 | # |
|
8 | # | |
9 | # This program is distributed in the hope that it will be useful, |
|
9 | # This program is distributed in the hope that it will be useful, | |
10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | # GNU General Public License for more details. |
|
12 | # GNU General Public License for more details. | |
13 | # |
|
13 | # | |
14 | # You should have received a copy of the GNU Affero General Public License |
|
14 | # You should have received a copy of the GNU Affero General Public License | |
15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | # |
|
16 | # | |
17 | # This program is dual-licensed. If you wish to learn more about the |
|
17 | # This program is dual-licensed. If you wish to learn more about the | |
18 | # AppEnlight Enterprise Edition, including its added features, Support |
|
18 | # AppEnlight Enterprise Edition, including its added features, Support | |
19 | # services, and proprietary license terms, please see |
|
19 | # services, and proprietary license terms, please see | |
20 | # https://rhodecode.com/licenses/ |
|
20 | # https://rhodecode.com/licenses/ | |
21 |
|
21 | |||
22 | import logging |
|
22 | import logging | |
23 | import sqlalchemy as sa |
|
23 | import sqlalchemy as sa | |
24 |
|
24 | |||
25 | from datetime import datetime |
|
25 | from datetime import datetime | |
26 |
|
26 | |||
27 | from pyramid.threadlocal import get_current_request |
|
27 | from pyramid.threadlocal import get_current_request | |
28 | from sqlalchemy.dialects.postgresql import JSON |
|
28 | from sqlalchemy.dialects.postgresql import JSON | |
29 | from ziggurat_foundations.models.base import BaseModel |
|
29 | from ziggurat_foundations.models.base import BaseModel | |
30 |
|
30 | |||
31 | from appenlight.models import Base, get_db_session, Datastores |
|
31 | from appenlight.models import Base, get_db_session, Datastores | |
32 | from appenlight.lib.enums import ReportType |
|
32 | from appenlight.lib.enums import ReportType | |
33 | from appenlight.lib.rule import Rule |
|
33 | from appenlight.lib.rule import Rule | |
34 | from appenlight.lib.redis_keys import REDIS_KEYS |
|
34 | from appenlight.lib.redis_keys import REDIS_KEYS | |
35 | from appenlight.models.report import REPORT_TYPE_MATRIX |
|
35 | from appenlight.models.report import REPORT_TYPE_MATRIX | |
36 |
|
36 | |||
37 | log = logging.getLogger(__name__) |
|
37 | log = logging.getLogger(__name__) | |
38 |
|
38 | |||
39 |
|
39 | |||
40 | class ReportGroup(Base, BaseModel): |
|
40 | class ReportGroup(Base, BaseModel): | |
41 | __tablename__ = 'reports_groups' |
|
41 | __tablename__ = 'reports_groups' | |
42 | __table_args__ = {'implicit_returning': False} |
|
42 | __table_args__ = {'implicit_returning': False} | |
43 |
|
43 | |||
44 | id = sa.Column(sa.BigInteger(), nullable=False, primary_key=True) |
|
44 | id = sa.Column(sa.BigInteger(), nullable=False, primary_key=True) | |
45 | resource_id = sa.Column(sa.Integer(), |
|
45 | resource_id = sa.Column(sa.Integer(), | |
46 | sa.ForeignKey('applications.resource_id', |
|
46 | sa.ForeignKey('applications.resource_id', | |
47 | onupdate='CASCADE', |
|
47 | onupdate='CASCADE', | |
48 | ondelete='CASCADE'), |
|
48 | ondelete='CASCADE'), | |
49 | nullable=False, |
|
49 | nullable=False, | |
50 | index=True) |
|
50 | index=True) | |
51 | priority = sa.Column(sa.Integer, nullable=False, index=True, default=5, |
|
51 | priority = sa.Column(sa.Integer, nullable=False, index=True, default=5, | |
52 | server_default='5') |
|
52 | server_default='5') | |
53 | first_timestamp = sa.Column(sa.DateTime(), default=datetime.utcnow, |
|
53 | first_timestamp = sa.Column(sa.DateTime(), default=datetime.utcnow, | |
54 | server_default=sa.func.now()) |
|
54 | server_default=sa.func.now()) | |
55 | last_timestamp = sa.Column(sa.DateTime(), default=datetime.utcnow, |
|
55 | last_timestamp = sa.Column(sa.DateTime(), default=datetime.utcnow, | |
56 | server_default=sa.func.now()) |
|
56 | server_default=sa.func.now()) | |
57 | error = sa.Column(sa.UnicodeText(), index=True) |
|
57 | error = sa.Column(sa.UnicodeText(), index=True) | |
58 | grouping_hash = sa.Column(sa.String(40), default='') |
|
58 | grouping_hash = sa.Column(sa.String(40), default='') | |
59 | triggered_postprocesses_ids = sa.Column(JSON(), nullable=False, |
|
59 | triggered_postprocesses_ids = sa.Column(JSON(), nullable=False, | |
60 | default=list) |
|
60 | default=list) | |
61 | report_type = sa.Column(sa.Integer, default=1) |
|
61 | report_type = sa.Column(sa.Integer, default=1) | |
62 | total_reports = sa.Column(sa.Integer, default=1) |
|
62 | total_reports = sa.Column(sa.Integer, default=1) | |
63 | last_report = sa.Column(sa.Integer) |
|
63 | last_report = sa.Column(sa.Integer) | |
64 | occurences = sa.Column(sa.Integer, default=1) |
|
64 | occurences = sa.Column(sa.Integer, default=1) | |
65 | average_duration = sa.Column(sa.Float, default=0) |
|
65 | average_duration = sa.Column(sa.Float, default=0) | |
66 | summed_duration = sa.Column(sa.Float, default=0) |
|
66 | summed_duration = sa.Column(sa.Float, default=0) | |
67 | read = sa.Column(sa.Boolean(), index=True, default=False) |
|
67 | read = sa.Column(sa.Boolean(), index=True, default=False) | |
68 | fixed = sa.Column(sa.Boolean(), index=True, default=False) |
|
68 | fixed = sa.Column(sa.Boolean(), index=True, default=False) | |
69 | notified = sa.Column(sa.Boolean(), index=True, default=False) |
|
69 | notified = sa.Column(sa.Boolean(), index=True, default=False) | |
70 | public = sa.Column(sa.Boolean(), index=True, default=False) |
|
70 | public = sa.Column(sa.Boolean(), index=True, default=False) | |
71 |
|
71 | |||
72 | reports = sa.orm.relationship('Report', |
|
72 | reports = sa.orm.relationship('Report', | |
73 | lazy='dynamic', |
|
73 | lazy='dynamic', | |
74 | backref='report_group', |
|
74 | backref='report_group', | |
75 | cascade="all, delete-orphan", |
|
75 | cascade="all, delete-orphan", | |
76 | passive_deletes=True, |
|
76 | passive_deletes=True, | |
77 | passive_updates=True, ) |
|
77 | passive_updates=True, ) | |
78 |
|
78 | |||
79 | comments = sa.orm.relationship('ReportComment', |
|
79 | comments = sa.orm.relationship('ReportComment', | |
80 | lazy='dynamic', |
|
80 | lazy='dynamic', | |
81 | backref='report', |
|
81 | backref='report', | |
82 | cascade="all, delete-orphan", |
|
82 | cascade="all, delete-orphan", | |
83 | passive_deletes=True, |
|
83 | passive_deletes=True, | |
84 | passive_updates=True, |
|
84 | passive_updates=True, | |
85 | order_by="ReportComment.comment_id") |
|
85 | order_by="ReportComment.comment_id") | |
86 |
|
86 | |||
87 | assigned_users = sa.orm.relationship('User', |
|
87 | assigned_users = sa.orm.relationship('User', | |
88 | backref=sa.orm.backref( |
|
88 | backref=sa.orm.backref( | |
89 | 'assigned_reports_relation', |
|
89 | 'assigned_reports_relation', | |
90 | lazy='dynamic', |
|
90 | lazy='dynamic', | |
91 | order_by=sa.desc( |
|
91 | order_by=sa.desc( | |
92 | "reports_groups.id") |
|
92 | "reports_groups.id") | |
93 | ), |
|
93 | ), | |
94 | passive_deletes=True, |
|
94 | passive_deletes=True, | |
95 | passive_updates=True, |
|
95 | passive_updates=True, | |
96 | secondary='reports_assignments', |
|
96 | secondary='reports_assignments', | |
97 | order_by="User.user_name") |
|
97 | order_by="User.user_name") | |
98 |
|
98 | |||
99 | stats = sa.orm.relationship('ReportStat', |
|
99 | stats = sa.orm.relationship('ReportStat', | |
100 | lazy='dynamic', |
|
100 | lazy='dynamic', | |
101 | backref='report', |
|
101 | backref='report', | |
102 | passive_deletes=True, |
|
102 | passive_deletes=True, | |
103 | passive_updates=True, ) |
|
103 | passive_updates=True, ) | |
104 |
|
104 | |||
105 | last_report_ref = sa.orm.relationship('Report', |
|
105 | last_report_ref = sa.orm.relationship('Report', | |
106 | uselist=False, |
|
106 | uselist=False, | |
107 | primaryjoin="ReportGroup.last_report " |
|
107 | primaryjoin="ReportGroup.last_report " | |
108 | "== Report.id", |
|
108 | "== Report.id", | |
109 | foreign_keys="Report.id", |
|
109 | foreign_keys="Report.id", | |
110 | cascade="all, delete-orphan", |
|
110 | cascade="all, delete-orphan", | |
111 | passive_deletes=True, |
|
111 | passive_deletes=True, | |
112 | passive_updates=True, ) |
|
112 | passive_updates=True, ) | |
113 |
|
113 | |||
114 | def __repr__(self): |
|
114 | def __repr__(self): | |
115 | return '<ReportGroup id:{}>'.format(self.id) |
|
115 | return '<ReportGroup id:{}>'.format(self.id) | |
116 |
|
116 | |||
117 | def get_report(self, report_id=None, public=False): |
|
117 | def get_report(self, report_id=None, public=False): | |
118 | """ |
|
118 | """ | |
119 | Gets report with specific id or latest report if id was not specified |
|
119 | Gets report with specific id or latest report if id was not specified | |
120 | """ |
|
120 | """ | |
121 | from .report import Report |
|
121 | from .report import Report | |
122 |
|
122 | |||
123 | if not report_id: |
|
123 | if not report_id: | |
124 | return self.last_report_ref |
|
124 | return self.last_report_ref | |
125 | else: |
|
125 | else: | |
126 | return self.reports.filter(Report.id == report_id).first() |
|
126 | return self.reports.filter(Report.id == report_id).first() | |
127 |
|
127 | |||
128 | def get_public_url(self, request, _app_url=None): |
|
128 | def get_public_url(self, request, _app_url=None): | |
129 | url = request.route_url('/', _app_url=_app_url) |
|
129 | url = request.route_url('/', _app_url=_app_url) | |
130 | return (url + 'ui/report/%s') % self.id |
|
130 | return (url + 'ui/report/%s') % self.id | |
131 |
|
131 | |||
132 | def run_postprocessing(self, report): |
|
132 | def run_postprocessing(self, report): | |
133 | """ |
|
133 | """ | |
134 | Alters report group priority based on postprocessing configuration |
|
134 | Alters report group priority based on postprocessing configuration | |
135 | """ |
|
135 | """ | |
136 | request = get_current_request() |
|
136 | request = get_current_request() | |
137 | get_db_session(None, self).flush() |
|
137 | get_db_session(None, self).flush() | |
138 | for action in self.application.postprocess_conf: |
|
138 | for action in self.application.postprocess_conf: | |
139 | get_db_session(None, self).flush() |
|
139 | get_db_session(None, self).flush() | |
140 | rule_obj = Rule(action.rule, REPORT_TYPE_MATRIX) |
|
140 | rule_obj = Rule(action.rule, REPORT_TYPE_MATRIX) | |
141 | report_dict = report.get_dict(request) |
|
141 | report_dict = report.get_dict(request) | |
142 | # if was not processed yet |
|
142 | # if was not processed yet | |
143 | if (rule_obj.match(report_dict) and |
|
143 | if (rule_obj.match(report_dict) and | |
144 | action.pkey not in self.triggered_postprocesses_ids): |
|
144 | action.pkey not in self.triggered_postprocesses_ids): | |
145 | action.postprocess(self) |
|
145 | action.postprocess(self) | |
146 | # this way sqla can track mutation of list |
|
146 | # this way sqla can track mutation of list | |
147 | self.triggered_postprocesses_ids = \ |
|
147 | self.triggered_postprocesses_ids = \ | |
148 | self.triggered_postprocesses_ids + [action.pkey] |
|
148 | self.triggered_postprocesses_ids + [action.pkey] | |
149 |
|
149 | |||
150 | get_db_session(None, self).flush() |
|
150 | get_db_session(None, self).flush() | |
151 | # do not go out of bounds |
|
151 | # do not go out of bounds | |
152 | if self.priority < 1: |
|
152 | if self.priority < 1: | |
153 | self.priority = 1 |
|
153 | self.priority = 1 | |
154 | if self.priority > 10: |
|
154 | if self.priority > 10: | |
155 | self.priority = 10 |
|
155 | self.priority = 10 | |
156 |
|
156 | |||
157 | def get_dict(self, request): |
|
157 | def get_dict(self, request): | |
158 | instance_dict = super(ReportGroup, self).get_dict() |
|
158 | instance_dict = super(ReportGroup, self).get_dict() | |
159 | instance_dict['server_name'] = self.get_report().tags.get( |
|
159 | instance_dict['server_name'] = self.get_report().tags.get( | |
160 | 'server_name') |
|
160 | 'server_name') | |
161 | instance_dict['view_name'] = self.get_report().tags.get('view_name') |
|
161 | instance_dict['view_name'] = self.get_report().tags.get('view_name') | |
162 | instance_dict['resource_name'] = self.application.resource_name |
|
162 | instance_dict['resource_name'] = self.application.resource_name | |
163 | instance_dict['report_type'] = self.get_report().report_type |
|
163 | instance_dict['report_type'] = self.get_report().report_type | |
164 | instance_dict['url_path'] = self.get_report().url_path |
|
164 | instance_dict['url_path'] = self.get_report().url_path | |
165 | instance_dict['front_url'] = self.get_report().get_public_url(request) |
|
165 | instance_dict['front_url'] = self.get_report().get_public_url(request) | |
166 | del instance_dict['triggered_postprocesses_ids'] |
|
166 | del instance_dict['triggered_postprocesses_ids'] | |
167 | return instance_dict |
|
167 | return instance_dict | |
168 |
|
168 | |||
169 | def es_doc(self): |
|
169 | def es_doc(self): | |
170 | return { |
|
170 | return { | |
171 | '_id': str(self.id), |
|
171 | '_id': str(self.id), | |
172 | 'pg_id': str(self.id), |
|
172 | 'pg_id': str(self.id), | |
173 | 'resource_id': self.resource_id, |
|
173 | 'resource_id': self.resource_id, | |
174 | 'error': self.error, |
|
174 | 'error': self.error, | |
175 | 'fixed': self.fixed, |
|
175 | 'fixed': self.fixed, | |
176 | 'public': self.public, |
|
176 | 'public': self.public, | |
177 | 'read': self.read, |
|
177 | 'read': self.read, | |
178 | 'priority': self.priority, |
|
178 | 'priority': self.priority, | |
179 | 'occurences': self.occurences, |
|
179 | 'occurences': self.occurences, | |
180 | 'average_duration': self.average_duration, |
|
180 | 'average_duration': self.average_duration, | |
181 | 'summed_duration': self.summed_duration, |
|
181 | 'summed_duration': self.summed_duration, | |
182 | 'first_timestamp': self.first_timestamp, |
|
182 | 'first_timestamp': self.first_timestamp, | |
183 | 'last_timestamp': self.last_timestamp |
|
183 | 'last_timestamp': self.last_timestamp | |
184 | } |
|
184 | } | |
185 |
|
185 | |||
186 | def set_notification_info(self, notify_10=False, notify_100=False): |
|
186 | def set_notification_info(self, notify_10=False, notify_100=False): | |
187 | """ |
|
187 | """ | |
188 | Update redis notification maps for notification job |
|
188 | Update redis notification maps for notification job | |
189 | """ |
|
189 | """ | |
190 | current_time = datetime.utcnow().replace(second=0, microsecond=0) |
|
190 | current_time = datetime.utcnow().replace(second=0, microsecond=0) | |
191 | # global app counter |
|
191 | # global app counter | |
192 | key = REDIS_KEYS['counters']['reports_per_type'].format( |
|
192 | key = REDIS_KEYS['counters']['reports_per_type'].format( | |
193 | self.report_type, current_time) |
|
193 | self.report_type, current_time) | |
194 | Datastores.redis.incr(key) |
|
194 | Datastores.redis.incr(key) | |
195 | Datastores.redis.expire(key, 3600 * 24) |
|
195 | Datastores.redis.expire(key, 3600 * 24) | |
196 | # detailed app notification for alerts and notifications |
|
196 | # detailed app notification for alerts and notifications | |
197 | Datastores.redis.sadd(REDIS_KEYS['apps_that_had_reports'], |
|
197 | Datastores.redis.sadd(REDIS_KEYS['apps_that_had_reports'], | |
198 | self.resource_id) |
|
198 | self.resource_id) | |
199 | Datastores.redis.sadd(REDIS_KEYS['apps_that_had_reports_alerting'], |
|
199 | Datastores.redis.sadd(REDIS_KEYS['apps_that_had_reports_alerting'], | |
200 | self.resource_id) |
|
200 | self.resource_id) | |
201 | # only notify for exceptions here |
|
201 | # only notify for exceptions here | |
202 | if self.report_type == ReportType.error: |
|
202 | if self.report_type == ReportType.error: | |
203 | Datastores.redis.sadd( |
|
203 | Datastores.redis.sadd( | |
204 | REDIS_KEYS['apps_that_had_reports'], |
|
204 | REDIS_KEYS['apps_that_had_reports'], | |
205 | self.resource_id) |
|
205 | self.resource_id) | |
206 | Datastores.redis.sadd( |
|
206 | Datastores.redis.sadd( | |
207 | REDIS_KEYS['apps_that_had_error_reports_alerting'], |
|
207 | REDIS_KEYS['apps_that_had_error_reports_alerting'], | |
208 | self.resource_id) |
|
208 | self.resource_id) | |
209 | key = REDIS_KEYS['counters']['report_group_occurences'].format(self.id) |
|
209 | key = REDIS_KEYS['counters']['report_group_occurences'].format(self.id) | |
210 | Datastores.redis.incr(key) |
|
210 | Datastores.redis.incr(key) | |
211 | Datastores.redis.expire(key, 3600 * 24) |
|
211 | Datastores.redis.expire(key, 3600 * 24) | |
212 | key = REDIS_KEYS['counters']['report_group_occurences_alerting'].format(self.id) |
|
212 | key = REDIS_KEYS['counters']['report_group_occurences_alerting'].format(self.id) | |
213 | Datastores.redis.incr(key) |
|
213 | Datastores.redis.incr(key) | |
214 | Datastores.redis.expire(key, 3600 * 24) |
|
214 | Datastores.redis.expire(key, 3600 * 24) | |
215 |
|
215 | |||
216 | if notify_10: |
|
216 | if notify_10: | |
217 | key = REDIS_KEYS['counters'][ |
|
217 | key = REDIS_KEYS['counters'][ | |
218 | 'report_group_occurences_10th'].format(self.id) |
|
218 | 'report_group_occurences_10th'].format(self.id) | |
219 | Datastores.redis.setex(key, 3600 * 24, 1) |
|
219 | Datastores.redis.setex(key, 3600 * 24, 1) | |
220 | if notify_100: |
|
220 | if notify_100: | |
221 | key = REDIS_KEYS['counters'][ |
|
221 | key = REDIS_KEYS['counters'][ | |
222 | 'report_group_occurences_100th'].format(self.id) |
|
222 | 'report_group_occurences_100th'].format(self.id) | |
223 | Datastores.redis.setex(key, 3600 * 24, 1) |
|
223 | Datastores.redis.setex(key, 3600 * 24, 1) | |
224 |
|
224 | |||
225 | key = REDIS_KEYS['reports_to_notify_per_type_per_app'].format( |
|
225 | key = REDIS_KEYS['reports_to_notify_per_type_per_app'].format( | |
226 | self.report_type, self.resource_id) |
|
226 | self.report_type, self.resource_id) | |
227 | Datastores.redis.sadd(key, self.id) |
|
227 | Datastores.redis.sadd(key, self.id) | |
228 | Datastores.redis.expire(key, 3600 * 24) |
|
228 | Datastores.redis.expire(key, 3600 * 24) | |
229 | key = REDIS_KEYS['reports_to_notify_per_type_per_app_alerting'].format( |
|
229 | key = REDIS_KEYS['reports_to_notify_per_type_per_app_alerting'].format( | |
230 | self.report_type, self.resource_id) |
|
230 | self.report_type, self.resource_id) | |
231 | Datastores.redis.sadd(key, self.id) |
|
231 | Datastores.redis.sadd(key, self.id) | |
232 | Datastores.redis.expire(key, 3600 * 24) |
|
232 | Datastores.redis.expire(key, 3600 * 24) | |
233 |
|
233 | |||
234 | @property |
|
234 | @property | |
235 | def partition_id(self): |
|
235 | def partition_id(self): | |
236 | return 'rcae_r_%s' % self.first_timestamp.strftime('%Y_%m') |
|
236 | return 'rcae_r_%s' % self.first_timestamp.strftime('%Y_%m') | |
237 |
|
237 | |||
238 |
|
238 | |||
239 | def after_insert(mapper, connection, target): |
|
239 | def after_insert(mapper, connection, target): | |
240 | if not hasattr(target, '_skip_ft_index'): |
|
240 | if not hasattr(target, '_skip_ft_index'): | |
241 | data = target.es_doc() |
|
241 | data = target.es_doc() | |
242 | data.pop('_id', None) |
|
242 | data.pop('_id', None) | |
243 | Datastores.es.index(target.partition_id, 'report_group', |
|
243 | Datastores.es.index(target.partition_id, 'report_group', | |
244 | data, id=target.id) |
|
244 | data, id=target.id) | |
245 |
|
245 | |||
246 |
|
246 | |||
247 | def after_update(mapper, connection, target): |
|
247 | def after_update(mapper, connection, target): | |
248 | if not hasattr(target, '_skip_ft_index'): |
|
248 | if not hasattr(target, '_skip_ft_index'): | |
249 | data = target.es_doc() |
|
249 | data = target.es_doc() | |
250 | data.pop('_id', None) |
|
250 | data.pop('_id', None) | |
251 | Datastores.es.index(target.partition_id, 'report_group', |
|
251 | Datastores.es.index(target.partition_id, 'report_group', | |
252 | data, id=target.id) |
|
252 | data, id=target.id) | |
253 |
|
253 | |||
254 |
|
254 | |||
255 | def after_delete(mapper, connection, target): |
|
255 | def after_delete(mapper, connection, target): | |
256 | query = {'term': {'group_id': target.id}} |
|
256 | query = {'term': {'group_id': target.id}} | |
257 | Datastores.es.delete_by_query(target.partition_id, 'report', query, |
|
257 | # TODO: routing seems unnecessary, need to test a bit more | |
258 | query_params={'routing':str(target.id)}) |
|
258 | #Datastores.es.delete_by_query(target.partition_id, 'report', query, | |
|
259 | # query_params={'routing':str(target.id)}) | |||
|
260 | Datastores.es.delete_by_query(target.partition_id, 'report', query) | |||
259 | query = {'term': {'pg_id': target.id}} |
|
261 | query = {'term': {'pg_id': target.id}} | |
260 | Datastores.es.delete_by_query(target.partition_id, 'report_group', query) |
|
262 | Datastores.es.delete_by_query(target.partition_id, 'report_group', query) | |
261 |
|
263 | |||
262 |
|
264 | |||
263 | sa.event.listen(ReportGroup, 'after_insert', after_insert) |
|
265 | sa.event.listen(ReportGroup, 'after_insert', after_insert) | |
264 | sa.event.listen(ReportGroup, 'after_update', after_update) |
|
266 | sa.event.listen(ReportGroup, 'after_update', after_update) | |
265 | sa.event.listen(ReportGroup, 'after_delete', after_delete) |
|
267 | sa.event.listen(ReportGroup, 'after_delete', after_delete) |
General Comments 0
You need to be logged in to leave comments.
Login now