Show More
@@ -1,80 +1,81 b'' | |||
|
1 | 1 | 1bd3e92b7e2e2d2024152b34bb88dff1db544a71 v4.0.0 |
|
2 | 2 | 170c5398320ea6cddd50955e88d408794c21d43a v4.0.1 |
|
3 | 3 | c3fe200198f5aa34cf2e4066df2881a9cefe3704 v4.1.0 |
|
4 | 4 | 7fd5c850745e2ea821fb4406af5f4bff9b0a7526 v4.1.1 |
|
5 | 5 | 41c87da28a179953df86061d817bc35533c66dd2 v4.1.2 |
|
6 | 6 | baaf9f5bcea3bae0ef12ae20c8b270482e62abb6 v4.2.0 |
|
7 | 7 | 32a70c7e56844a825f61df496ee5eaf8c3c4e189 v4.2.1 |
|
8 | 8 | fa695cdb411d294679ac081d595ac654e5613b03 v4.3.0 |
|
9 | 9 | 0e4dc11b58cad833c513fe17bac39e6850edf959 v4.3.1 |
|
10 | 10 | 8a876f48f5cb1d018b837db28ff928500cb32cfb v4.4.0 |
|
11 | 11 | 8dd86b410b1aac086ffdfc524ef300f896af5047 v4.4.1 |
|
12 | 12 | d2514226abc8d3b4f6fb57765f47d1b6fb360a05 v4.4.2 |
|
13 | 13 | 27d783325930af6dad2741476c0d0b1b7c8415c2 v4.5.0 |
|
14 | 14 | 7f2016f352abcbdba4a19d4039c386e9629449da v4.5.1 |
|
15 | 15 | 416fec799314c70a5c780fb28b3357b08869333a v4.5.2 |
|
16 | 16 | 27c3b85fafc83143e6678fbc3da69e1615bcac55 v4.6.0 |
|
17 | 17 | 5ad13deb9118c2a5243d4032d4d9cc174e5872db v4.6.1 |
|
18 | 18 | 2be921e01fa24bb102696ada596f87464c3666f6 v4.7.0 |
|
19 | 19 | 7198bdec29c2872c974431d55200d0398354cdb1 v4.7.1 |
|
20 | 20 | bd1c8d230fe741c2dfd7100a0ef39fd0774fd581 v4.7.2 |
|
21 | 21 | 9731914f89765d9628dc4dddc84bc9402aa124c8 v4.8.0 |
|
22 | 22 | c5a2b7d0e4bbdebc4a62d7b624befe375207b659 v4.9.0 |
|
23 | 23 | d9aa3b27ac9f7e78359775c75fedf7bfece232f1 v4.9.1 |
|
24 | 24 | 4ba4d74981cec5d6b28b158f875a2540952c2f74 v4.10.0 |
|
25 | 25 | 0a6821cbd6b0b3c21503002f88800679fa35ab63 v4.10.1 |
|
26 | 26 | 434ad90ec8d621f4416074b84f6e9ce03964defb v4.10.2 |
|
27 | 27 | 68baee10e698da2724c6e0f698c03a6abb993bf2 v4.10.3 |
|
28 | 28 | 00821d3afd1dce3f4767cc353f84a17f7d5218a1 v4.10.4 |
|
29 | 29 | 22f6744ad8cc274311825f63f953e4dee2ea5cb9 v4.10.5 |
|
30 | 30 | 96eb24bea2f5f9258775245e3f09f6fa0a4dda01 v4.10.6 |
|
31 | 31 | 3121217a812c956d7dd5a5875821bd73e8002a32 v4.11.0 |
|
32 | 32 | fa98b454715ac5b912f39e84af54345909a2a805 v4.11.1 |
|
33 | 33 | 3982abcfdcc229a723cebe52d3a9bcff10bba08e v4.11.2 |
|
34 | 34 | 33195f145db9172f0a8f1487e09207178a6ab065 v4.11.3 |
|
35 | 35 | 194c74f33e32bbae6fc4d71ec5a999cff3c13605 v4.11.4 |
|
36 | 36 | 8fbd8b0c3ddc2fa4ac9e4ca16942a03eb593df2d v4.11.5 |
|
37 | 37 | f0609aa5d5d05a1ca2f97c3995542236131c9d8a v4.11.6 |
|
38 | 38 | b5b30547d90d2e088472a70c84878f429ffbf40d v4.12.0 |
|
39 | 39 | 9072253aa8894d20c00b4a43dc61c2168c1eff94 v4.12.1 |
|
40 | 40 | 6a517543ea9ef9987d74371bd2a315eb0b232dc9 v4.12.2 |
|
41 | 41 | 7fc0731b024c3114be87865eda7ab621cc957e32 v4.12.3 |
|
42 | 42 | 6d531c0b068c6eda62dddceedc9f845ecb6feb6f v4.12.4 |
|
43 | 43 | 3d6bf2d81b1564830eb5e83396110d2a9a93eb1e v4.13.0 |
|
44 | 44 | 5468fc89e708bd90e413cd0d54350017abbdbc0e v4.13.1 |
|
45 | 45 | 610d621550521c314ee97b3d43473ac0bcf06fb8 v4.13.2 |
|
46 | 46 | 7dc62c090881fb5d03268141e71e0940d7c3295d v4.13.3 |
|
47 | 47 | 9151328c1c46b72ba6f00d7640d9141e75aa1ca2 v4.14.0 |
|
48 | 48 | a47eeac5dfa41fa6779d90452affba4091c3ade8 v4.14.1 |
|
49 | 49 | 4b34ce0d2c3c10510626b3b65044939bb7a2cddf v4.15.0 |
|
50 | 50 | 14502561d22e6b70613674cd675ae9a604b7989f v4.15.1 |
|
51 | 51 | 4aaa40b605b01af78a9f6882eca561c54b525ef0 v4.15.2 |
|
52 | 52 | 797744642eca86640ed20bef2cd77445780abaec v4.16.0 |
|
53 | 53 | 6c3452c7c25ed35ff269690929e11960ed6ad7d3 v4.16.1 |
|
54 | 54 | 5d8057df561c4b6b81b6401aed7d2f911e6e77f7 v4.16.2 |
|
55 | 55 | 13acfc008896ef4c62546bab5074e8f6f89b4fa7 v4.17.0 |
|
56 | 56 | 45b9b610976f483877142fe75321808ce9ebac59 v4.17.1 |
|
57 | 57 | ad5bd0c4bd322fdbd04bb825a3d027e08f7a3901 v4.17.2 |
|
58 | 58 | 037f5794b55a6236d68f6485a485372dde6566e0 v4.17.3 |
|
59 | 59 | 83bc3100cfd6094c1d04f475ddb299b7dc3d0b33 v4.17.4 |
|
60 | 60 | e3de8c95baf8cc9109ca56aee8193a2cb6a54c8a v4.17.4 |
|
61 | 61 | f37a3126570477543507f0bc9d245ce75546181a v4.18.0 |
|
62 | 62 | 71d8791463e87b64c1a18475de330ee600d37561 v4.18.1 |
|
63 | 63 | 4bd6b75dac1d25c64885d4d49385e5533f21c525 v4.18.2 |
|
64 | 64 | 12ed92fe57f2e9fc7b71dc0b65e26c2da5c7085f v4.18.3 |
|
65 | 65 | ddef396a6567117de531d67d44c739cbbfc3eebb v4.19.0 |
|
66 | 66 | c0c65acd73914bf4368222d510afe1161ab8c07c v4.19.1 |
|
67 | 67 | 7ac623a4a2405917e2af660d645ded662011e40d v4.19.2 |
|
68 | 68 | ef7ffda65eeb90c3ba88590a6cb816ef9b0bc232 v4.19.3 |
|
69 | 69 | 3e635489bb7961df93b01e42454ad1a8730ae968 v4.20.0 |
|
70 | 70 | 7e2eb896a02ca7cd2cd9f0f853ef3dac3f0039e3 v4.20.1 |
|
71 | 71 | 8bb5fece08ab65986225b184e46f53d2a71729cb v4.21.0 |
|
72 | 72 | 90734aac31ee4563bbe665a43ff73190cc762275 v4.22.0 |
|
73 | 73 | a9655707f7cf4146affc51c12fe5ed8e02898a57 v4.23.0 |
|
74 | 74 | 56310d93b33b97535908ef9c7b0985b89bb7fad2 v4.23.1 |
|
75 | 75 | 7637c38528fa38c1eabc1fde6a869c20995a0da7 v4.23.2 |
|
76 | 76 | 6aeb4ac3ef7f0ac699c914740dad3688c9495e83 v4.24.0 |
|
77 | 77 | 6eaf953da06e468a4c4e5239d3d0e700bda6b163 v4.24.1 |
|
78 | 78 | f8161cbc2d94a935d3c395a0e758d9a094287169 v4.25.0 |
|
79 | 79 | 77fe47b5b39338e71b2c040de2c0359b529b6251 v4.25.1 |
|
80 | 80 | 27475bd8a718b9a00a37a8563c4927120865ad85 v4.25.2 |
|
81 | b4ba10dcb4ab67d02b8c5cff32a3827f6c4fdedb v4.26.0 |
@@ -1,33 +1,34 b'' | |||
|
1 | 1 | [DEFAULT] |
|
2 | 2 | done = false |
|
3 | 3 | |
|
4 | 4 | [task:bump_version] |
|
5 | 5 | done = true |
|
6 | 6 | |
|
7 | 7 | [task:rc_tools_pinned] |
|
8 | 8 | done = true |
|
9 | 9 | |
|
10 | 10 | [task:fixes_on_stable] |
|
11 | 11 | done = true |
|
12 | 12 | |
|
13 | 13 | [task:pip2nix_generated] |
|
14 | 14 | done = true |
|
15 | 15 | |
|
16 | 16 | [task:changelog_updated] |
|
17 | 17 | done = true |
|
18 | 18 | |
|
19 | 19 | [task:generate_api_docs] |
|
20 | 20 | done = true |
|
21 | 21 | |
|
22 | [task:updated_translation] | |
|
23 | done = true | |
|
24 | ||
|
22 | 25 | [release] |
|
23 | 26 | state = prepared |
|
24 |
version = 4.2 |
|
|
25 | ||
|
26 | [task:updated_translation] | |
|
27 | version = 4.26.0 | |
|
27 | 28 | |
|
28 | 29 | [task:generate_js_routes] |
|
29 | 30 | |
|
30 | 31 | [task:updated_trial_license] |
|
31 | 32 | |
|
32 | 33 | [task:generate_oss_licenses] |
|
33 | 34 |
@@ -1,104 +1,118 b'' | |||
|
1 | 1 | .. _repo-admin-tasks: |
|
2 | 2 | |
|
3 | 3 | Common Admin Tasks for Repositories |
|
4 | 4 | ----------------------------------- |
|
5 | 5 | |
|
6 | 6 | |
|
7 | 7 | Manually Force Delete Repository |
|
8 | 8 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ |
|
9 | 9 | |
|
10 | 10 | In case of attached forks or pull-requests repositories should be archived. |
|
11 | 11 | Here is how to force delete a repository and remove all dependent objects |
|
12 | 12 | |
|
13 | 13 | |
|
14 | 14 | .. code-block:: bash |
|
15 | 15 | :dedent: 1 |
|
16 | 16 | |
|
17 | 17 | # starts the ishell interactive prompt |
|
18 | 18 | $ rccontrol ishell enterprise-1 |
|
19 | 19 | |
|
20 | 20 | .. code-block:: python |
|
21 | 21 | :dedent: 1 |
|
22 | 22 | |
|
23 | 23 | In [4]: from rhodecode.model.repo import RepoModel |
|
24 | 24 | In [3]: repo = Repository.get_by_repo_name('test_repos/repo_with_prs') |
|
25 | 25 | In [5]: RepoModel().delete(repo, forks='detach', pull_requests='delete') |
|
26 | 26 | In [6]: Session().commit() |
|
27 | 27 | |
|
28 | 28 | |
|
29 | 29 | Below is a fully automated example to force delete repositories reading from a |
|
30 | 30 | file where each line is a repository name. This can be executed via simple CLI command |
|
31 | 31 | without entering the interactive shell. |
|
32 | 32 | |
|
33 | 33 | Save the below content as a file named `repo_delete_task.py` |
|
34 | 34 | |
|
35 | 35 | |
|
36 | 36 | .. code-block:: python |
|
37 | 37 | :dedent: 1 |
|
38 | 38 | |
|
39 | 39 | from rhodecode.model.db import * |
|
40 | 40 | from rhodecode.model.repo import RepoModel |
|
41 | 41 | with open('delete_repos.txt', 'rb') as f: |
|
42 | 42 | # read all lines from file |
|
43 | 43 | repos = f.readlines() |
|
44 | 44 | for repo_name in repos: |
|
45 | 45 | repo_name = repo_name.strip() # cleanup the name just in case |
|
46 | 46 | repo = Repository.get_by_repo_name(repo_name) |
|
47 | 47 | if not repo: |
|
48 | 48 | raise Exception('Repo with name {} not found'.format(repo_name)) |
|
49 | 49 | RepoModel().delete(repo, forks='detach', pull_requests='delete') |
|
50 | 50 | Session().commit() |
|
51 | 51 | print('Removed repository {}'.format(repo_name)) |
|
52 | 52 | |
|
53 | 53 | |
|
54 | 54 | The code above will read the names of repositories from a file called `delete_repos.txt` |
|
55 | 55 | Each lines should represent a single name e.g `repo_name_1` or `repo_group/repo_name_2` |
|
56 | 56 | |
|
57 | 57 | Run this line from CLI to execute the code from the `repo_delete_task.py` file and |
|
58 | 58 | exit the ishell after the execution:: |
|
59 | 59 | |
|
60 | 60 | echo "%run repo_delete_task.py" | rccontrol ishell enterprise-1 |
|
61 | 61 | |
|
62 | 62 | |
|
63 | 63 | |
|
64 | 64 | |
|
65 | 65 | Bulk edit permissions for all repositories or groups |
|
66 | 66 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ |
|
67 | 67 | |
|
68 | 68 | In case when a permissions should be applied in bulk here are two ways to apply |
|
69 | 69 | the permissions onto *all* repositories and/or repository groups. |
|
70 | 70 | |
|
71 | 71 | 1) Start by running the interactive ishell interface |
|
72 | 72 | |
|
73 | 73 | .. code-block:: bash |
|
74 | 74 | :dedent: 1 |
|
75 | 75 | |
|
76 | 76 | # starts the ishell interactive prompt |
|
77 | 77 | $ rccontrol ishell enterprise-1 |
|
78 | 78 | |
|
79 | 79 | |
|
80 | 80 | 2a) Add user called 'admin' into all repositories with write permission. |
|
81 | 81 | Permissions can be also `repository.read`, `repository.admin`, `repository.none` |
|
82 | 82 | |
|
83 | 83 | .. code-block:: python |
|
84 | 84 | :dedent: 1 |
|
85 | 85 | |
|
86 | 86 | In [1]: from rhodecode.model.repo import RepoModel |
|
87 | 87 | In [2]: user = User.get_by_username('admin') |
|
88 | 88 | In [3]: permission_name = 'repository.write' |
|
89 | 89 | In [4]: for repo in Repository.get_all(): |
|
90 | 90 | ...: RepoModel().grant_user_permission(repo, user, permission_name) |
|
91 | 91 | ...: Session().commit() |
|
92 | 92 | |
|
93 | 93 | 2b) Add user called 'admin' into all repository groups with write permission. |
|
94 | 94 | Permissions can be also can be `group.read`, `group.admin`, `group.none` |
|
95 | 95 | |
|
96 | 96 | .. code-block:: python |
|
97 | 97 | :dedent: 1 |
|
98 | 98 | |
|
99 | 99 | In [1]: from rhodecode.model.repo import RepoModel |
|
100 | 100 | In [2]: user = User.get_by_username('admin') |
|
101 | 101 | In [3]: permission_name = 'group.write' |
|
102 | 102 | In [4]: for repo_group in RepoGroup.get_all(): |
|
103 | 103 |
...: RepoGroupModel().grant_user_permission(repo_group, user, permission_name) |
|
104 |
|
|
|
104 | ...: Session().commit() | |
|
105 | ||
|
106 | ||
|
107 | Delete a problematic pull request | |
|
108 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |
|
109 | ||
|
110 | .. code-block:: python | |
|
111 | :dedent: 1 | |
|
112 | ||
|
113 | In [1]: from rhodecode.model.pull_request import PullRequestModel | |
|
114 | In [2]: pullrequest_id = 123 | |
|
115 | In [3]: pr = PullRequest.get(pullrequest_id) | |
|
116 | In [4]: super_admin = User.get_first_super_admin() | |
|
117 | In [5]: PullRequestModel().delete(pr, super_admin) | |
|
118 | In [6]: Session().commit() |
@@ -1,56 +1,64 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2016-2020 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | from rhodecode.apps._base import ADMIN_PREFIX |
|
22 | 22 | |
|
23 | 23 | |
|
24 | 24 | def admin_routes(config): |
|
25 | 25 | from rhodecode.apps.ops.views import OpsView |
|
26 | 26 | |
|
27 | 27 | config.add_route( |
|
28 | 28 | name='ops_ping', |
|
29 | 29 | pattern='/ping') |
|
30 | 30 | config.add_view( |
|
31 | 31 | OpsView, |
|
32 | 32 | attr='ops_ping', |
|
33 | 33 | route_name='ops_ping', request_method='GET', |
|
34 | 34 | renderer='json_ext') |
|
35 | 35 | |
|
36 | 36 | config.add_route( |
|
37 | 37 | name='ops_error_test', |
|
38 | 38 | pattern='/error') |
|
39 | 39 | config.add_view( |
|
40 | 40 | OpsView, |
|
41 | 41 | attr='ops_error_test', |
|
42 | 42 | route_name='ops_error_test', request_method='GET', |
|
43 | 43 | renderer='json_ext') |
|
44 | 44 | |
|
45 | 45 | config.add_route( |
|
46 | 46 | name='ops_redirect_test', |
|
47 | 47 | pattern='/redirect') |
|
48 | 48 | config.add_view( |
|
49 | 49 | OpsView, |
|
50 | 50 | attr='ops_redirect_test', |
|
51 | 51 | route_name='ops_redirect_test', request_method='GET', |
|
52 | 52 | renderer='json_ext') |
|
53 | 53 | |
|
54 | config.add_route( | |
|
55 | name='ops_healthcheck', | |
|
56 | pattern='/status') | |
|
57 | config.add_view( | |
|
58 | OpsView, | |
|
59 | attr='ops_healthcheck', | |
|
60 | route_name='ops_healthcheck', request_method='GET', | |
|
61 | renderer='json_ext') | |
|
54 | 62 | |
|
55 | 63 | def includeme(config): |
|
56 | 64 | config.include(admin_routes, route_prefix=ADMIN_PREFIX + '/ops') |
@@ -1,74 +1,97 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2016-2020 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | import time |
|
22 | 22 | import logging |
|
23 | 23 | |
|
24 | 24 | |
|
25 | 25 | from pyramid.httpexceptions import HTTPFound |
|
26 | 26 | |
|
27 | 27 | from rhodecode.apps._base import BaseAppView |
|
28 | 28 | from rhodecode.lib import helpers as h |
|
29 | from rhodecode.lib.auth import LoginRequired | |
|
30 | from rhodecode.model.db import UserApiKeys | |
|
29 | 31 | |
|
30 | 32 | log = logging.getLogger(__name__) |
|
31 | 33 | |
|
32 | 34 | |
|
33 | 35 | class OpsView(BaseAppView): |
|
34 | 36 | |
|
35 | 37 | def load_default_context(self): |
|
36 | 38 | c = self._get_local_tmpl_context() |
|
37 | 39 | c.user = c.auth_user.get_instance() |
|
38 | 40 | |
|
39 | 41 | return c |
|
40 | 42 | |
|
41 | 43 | def ops_ping(self): |
|
42 | 44 | data = { |
|
43 | 45 | 'instance': self.request.registry.settings.get('instance_id'), |
|
44 | 46 | } |
|
45 | 47 | if getattr(self.request, 'user'): |
|
46 | 48 | caller_name = 'anonymous' |
|
47 | 49 | if self.request.user.user_id: |
|
48 | 50 | caller_name = self.request.user.username |
|
49 | 51 | |
|
50 | 52 | data.update({ |
|
51 | 53 | 'caller_ip': self.request.user.ip_addr, |
|
52 | 54 | 'caller_name': caller_name, |
|
53 | 55 | }) |
|
54 | 56 | return {'ok': data} |
|
55 | 57 | |
|
56 | 58 | def ops_error_test(self): |
|
57 | 59 | """ |
|
58 | 60 | Test exception handling and emails on errors |
|
59 | 61 | """ |
|
60 | 62 | |
|
61 | 63 | class TestException(Exception): |
|
62 | 64 | pass |
|
63 | 65 | # add timeout so we add some sort of rate limiter |
|
64 | 66 | time.sleep(2) |
|
65 | 67 | msg = ('RhodeCode Enterprise test exception. ' |
|
66 | 68 | 'Client:{}. Generation time: {}.'.format(self.request.user, time.time())) |
|
67 | 69 | raise TestException(msg) |
|
68 | 70 | |
|
69 | 71 | def ops_redirect_test(self): |
|
70 | 72 | """ |
|
71 | 73 | Test redirect handling |
|
72 | 74 | """ |
|
73 | 75 | redirect_to = self.request.GET.get('to') or h.route_path('home') |
|
74 | 76 | raise HTTPFound(redirect_to) |
|
77 | ||
|
78 | @LoginRequired(auth_token_access=[UserApiKeys.ROLE_HTTP]) | |
|
79 | def ops_healthcheck(self): | |
|
80 | from rhodecode.lib.system_info import load_system_info | |
|
81 | ||
|
82 | vcsserver_info = load_system_info('vcs_server') | |
|
83 | if vcsserver_info: | |
|
84 | vcsserver_info = vcsserver_info['human_value'] | |
|
85 | ||
|
86 | db_info = load_system_info('database_info') | |
|
87 | if db_info: | |
|
88 | db_info = db_info['human_value'] | |
|
89 | ||
|
90 | health_spec = { | |
|
91 | 'caller_ip': self.request.user.ip_addr, | |
|
92 | 'vcsserver': vcsserver_info, | |
|
93 | 'db': db_info, | |
|
94 | } | |
|
95 | ||
|
96 | return {'healthcheck': health_spec} | |
|
97 |
@@ -1,69 +1,72 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2010-2020 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | import socket |
|
22 | 22 | import logging |
|
23 | 23 | |
|
24 | 24 | import rhodecode |
|
25 | 25 | from zope.cachedescriptors.property import Lazy as LazyProperty |
|
26 | 26 | from rhodecode.lib.celerylib.loader import ( |
|
27 | 27 | celery_app, RequestContextTask, get_logger) |
|
28 | 28 | |
|
29 | 29 | async_task = celery_app.task |
|
30 | 30 | |
|
31 | 31 | |
|
32 | 32 | log = logging.getLogger(__name__) |
|
33 | 33 | |
|
34 | 34 | |
|
35 | 35 | class ResultWrapper(object): |
|
36 | 36 | def __init__(self, task): |
|
37 | 37 | self.task = task |
|
38 | 38 | |
|
39 | 39 | @LazyProperty |
|
40 | 40 | def result(self): |
|
41 | 41 | return self.task |
|
42 | 42 | |
|
43 | 43 | |
|
44 | 44 | def run_task(task, *args, **kwargs): |
|
45 | 45 | log.debug('Got task `%s` for execution', task) |
|
46 | if task is None: | |
|
47 | raise ValueError('Got non-existing task for execution') | |
|
48 | ||
|
46 | 49 | if rhodecode.CELERY_ENABLED: |
|
47 | 50 | celery_is_up = False |
|
48 | 51 | try: |
|
49 | 52 | t = task.apply_async(args=args, kwargs=kwargs) |
|
50 | 53 | celery_is_up = True |
|
51 | 54 | log.debug('executing task %s:%s in async mode', t.task_id, task) |
|
52 | 55 | return t |
|
53 | 56 | |
|
54 | 57 | except socket.error as e: |
|
55 | 58 | if isinstance(e, IOError) and e.errno == 111: |
|
56 | 59 | log.error('Unable to connect to celeryd `%s`. Sync execution', e) |
|
57 | 60 | else: |
|
58 | 61 | log.exception("Exception while connecting to celeryd.") |
|
59 | 62 | except KeyError as e: |
|
60 | 63 | log.error('Unable to connect to celeryd `%s`. Sync execution', e) |
|
61 | 64 | except Exception as e: |
|
62 | 65 | log.exception( |
|
63 | 66 | "Exception while trying to run task asynchronous. " |
|
64 | 67 | "Fallback to sync execution.") |
|
65 | 68 | |
|
66 | 69 | else: |
|
67 | 70 | log.debug('executing task %s:%s in sync mode', 'TASK', task) |
|
68 | 71 | |
|
69 | 72 | return ResultWrapper(task(*args, **kwargs)) |
@@ -1,407 +1,410 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2012-2020 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | """ |
|
22 | 22 | RhodeCode task modules, containing all task that suppose to be run |
|
23 | 23 | by celery daemon |
|
24 | 24 | """ |
|
25 | 25 | |
|
26 | 26 | import os |
|
27 | 27 | import time |
|
28 | 28 | |
|
29 | 29 | from pyramid import compat |
|
30 | 30 | from pyramid_mailer.mailer import Mailer |
|
31 | 31 | from pyramid_mailer.message import Message |
|
32 | 32 | from email.utils import formatdate |
|
33 | 33 | |
|
34 | 34 | import rhodecode |
|
35 | 35 | from rhodecode.lib import audit_logger |
|
36 | 36 | from rhodecode.lib.celerylib import get_logger, async_task, RequestContextTask, run_task |
|
37 | 37 | from rhodecode.lib import hooks_base |
|
38 | 38 | from rhodecode.lib.utils2 import safe_int, str2bool, aslist |
|
39 | 39 | from rhodecode.model.db import ( |
|
40 | 40 | Session, IntegrityError, true, Repository, RepoGroup, User) |
|
41 | 41 | from rhodecode.model.permission import PermissionModel |
|
42 | 42 | |
|
43 | 43 | |
|
44 | 44 | @async_task(ignore_result=True, base=RequestContextTask) |
|
45 | 45 | def send_email(recipients, subject, body='', html_body='', email_config=None, |
|
46 | 46 | extra_headers=None): |
|
47 | 47 | """ |
|
48 | 48 | Sends an email with defined parameters from the .ini files. |
|
49 | 49 | |
|
50 | 50 | :param recipients: list of recipients, it this is empty the defined email |
|
51 | 51 | address from field 'email_to' is used instead |
|
52 | 52 | :param subject: subject of the mail |
|
53 | 53 | :param body: body of the mail |
|
54 | 54 | :param html_body: html version of body |
|
55 | 55 | :param email_config: specify custom configuration for mailer |
|
56 | 56 | :param extra_headers: specify custom headers |
|
57 | 57 | """ |
|
58 | 58 | log = get_logger(send_email) |
|
59 | 59 | |
|
60 | 60 | email_config = email_config or rhodecode.CONFIG |
|
61 | 61 | |
|
62 | 62 | mail_server = email_config.get('smtp_server') or None |
|
63 | 63 | if mail_server is None: |
|
64 | 64 | log.error("SMTP server information missing. Sending email failed. " |
|
65 | 65 | "Make sure that `smtp_server` variable is configured " |
|
66 | 66 | "inside the .ini file") |
|
67 | 67 | return False |
|
68 | 68 | |
|
69 | 69 | subject = "%s %s" % (email_config.get('email_prefix', ''), subject) |
|
70 | 70 | |
|
71 | 71 | if recipients: |
|
72 | 72 | if isinstance(recipients, compat.string_types): |
|
73 | 73 | recipients = recipients.split(',') |
|
74 | 74 | else: |
|
75 | 75 | # if recipients are not defined we send to email_config + all admins |
|
76 | 76 | admins = [] |
|
77 | 77 | for u in User.query().filter(User.admin == true()).all(): |
|
78 | 78 | if u.email: |
|
79 | 79 | admins.append(u.email) |
|
80 | 80 | recipients = [] |
|
81 | 81 | config_email = email_config.get('email_to') |
|
82 | 82 | if config_email: |
|
83 | 83 | recipients += [config_email] |
|
84 | 84 | recipients += admins |
|
85 | 85 | |
|
86 | 86 | # translate our LEGACY config into the one that pyramid_mailer supports |
|
87 | 87 | email_conf = dict( |
|
88 | 88 | host=mail_server, |
|
89 | 89 | port=email_config.get('smtp_port', 25), |
|
90 | 90 | username=email_config.get('smtp_username'), |
|
91 | 91 | password=email_config.get('smtp_password'), |
|
92 | 92 | |
|
93 | 93 | tls=str2bool(email_config.get('smtp_use_tls')), |
|
94 | 94 | ssl=str2bool(email_config.get('smtp_use_ssl')), |
|
95 | 95 | |
|
96 | 96 | # SSL key file |
|
97 | 97 | # keyfile='', |
|
98 | 98 | |
|
99 | 99 | # SSL certificate file |
|
100 | 100 | # certfile='', |
|
101 | 101 | |
|
102 | 102 | # Location of maildir |
|
103 | 103 | # queue_path='', |
|
104 | 104 | |
|
105 | 105 | default_sender=email_config.get('app_email_from', 'RhodeCode-noreply@rhodecode.com'), |
|
106 | 106 | |
|
107 | 107 | debug=str2bool(email_config.get('smtp_debug')), |
|
108 | 108 | # /usr/sbin/sendmail Sendmail executable |
|
109 | 109 | # sendmail_app='', |
|
110 | 110 | |
|
111 | 111 | # {sendmail_app} -t -i -f {sender} Template for sendmail execution |
|
112 | 112 | # sendmail_template='', |
|
113 | 113 | ) |
|
114 | 114 | |
|
115 | 115 | if extra_headers is None: |
|
116 | 116 | extra_headers = {} |
|
117 | 117 | |
|
118 | 118 | extra_headers.setdefault('Date', formatdate(time.time())) |
|
119 | 119 | |
|
120 | 120 | if 'thread_ids' in extra_headers: |
|
121 | 121 | thread_ids = extra_headers.pop('thread_ids') |
|
122 | 122 | extra_headers['References'] = ' '.join('<{}>'.format(t) for t in thread_ids) |
|
123 | 123 | |
|
124 | 124 | try: |
|
125 | 125 | mailer = Mailer(**email_conf) |
|
126 | 126 | |
|
127 | 127 | message = Message(subject=subject, |
|
128 | 128 | sender=email_conf['default_sender'], |
|
129 | 129 | recipients=recipients, |
|
130 | 130 | body=body, html=html_body, |
|
131 | 131 | extra_headers=extra_headers) |
|
132 | 132 | mailer.send_immediately(message) |
|
133 | 133 | |
|
134 | 134 | except Exception: |
|
135 | 135 | log.exception('Mail sending failed') |
|
136 | 136 | return False |
|
137 | 137 | return True |
|
138 | 138 | |
|
139 | 139 | |
|
140 | 140 | @async_task(ignore_result=True, base=RequestContextTask) |
|
141 | 141 | def create_repo(form_data, cur_user): |
|
142 | 142 | from rhodecode.model.repo import RepoModel |
|
143 | 143 | from rhodecode.model.user import UserModel |
|
144 | 144 | from rhodecode.model.scm import ScmModel |
|
145 | 145 | from rhodecode.model.settings import SettingsModel |
|
146 | 146 | |
|
147 | 147 | log = get_logger(create_repo) |
|
148 | 148 | |
|
149 | 149 | cur_user = UserModel()._get_user(cur_user) |
|
150 | 150 | owner = cur_user |
|
151 | 151 | |
|
152 | 152 | repo_name = form_data['repo_name'] |
|
153 | 153 | repo_name_full = form_data['repo_name_full'] |
|
154 | 154 | repo_type = form_data['repo_type'] |
|
155 | 155 | description = form_data['repo_description'] |
|
156 | 156 | private = form_data['repo_private'] |
|
157 | 157 | clone_uri = form_data.get('clone_uri') |
|
158 | 158 | repo_group = safe_int(form_data['repo_group']) |
|
159 | 159 | copy_fork_permissions = form_data.get('copy_permissions') |
|
160 | 160 | copy_group_permissions = form_data.get('repo_copy_permissions') |
|
161 | 161 | fork_of = form_data.get('fork_parent_id') |
|
162 | 162 | state = form_data.get('repo_state', Repository.STATE_PENDING) |
|
163 | 163 | |
|
164 | 164 | # repo creation defaults, private and repo_type are filled in form |
|
165 | 165 | defs = SettingsModel().get_default_repo_settings(strip_prefix=True) |
|
166 | 166 | enable_statistics = form_data.get( |
|
167 | 167 | 'enable_statistics', defs.get('repo_enable_statistics')) |
|
168 | 168 | enable_locking = form_data.get( |
|
169 | 169 | 'enable_locking', defs.get('repo_enable_locking')) |
|
170 | 170 | enable_downloads = form_data.get( |
|
171 | 171 | 'enable_downloads', defs.get('repo_enable_downloads')) |
|
172 | 172 | |
|
173 | 173 | # set landing rev based on default branches for SCM |
|
174 | 174 | landing_ref, _label = ScmModel.backend_landing_ref(repo_type) |
|
175 | 175 | |
|
176 | 176 | try: |
|
177 | 177 | RepoModel()._create_repo( |
|
178 | 178 | repo_name=repo_name_full, |
|
179 | 179 | repo_type=repo_type, |
|
180 | 180 | description=description, |
|
181 | 181 | owner=owner, |
|
182 | 182 | private=private, |
|
183 | 183 | clone_uri=clone_uri, |
|
184 | 184 | repo_group=repo_group, |
|
185 | 185 | landing_rev=landing_ref, |
|
186 | 186 | fork_of=fork_of, |
|
187 | 187 | copy_fork_permissions=copy_fork_permissions, |
|
188 | 188 | copy_group_permissions=copy_group_permissions, |
|
189 | 189 | enable_statistics=enable_statistics, |
|
190 | 190 | enable_locking=enable_locking, |
|
191 | 191 | enable_downloads=enable_downloads, |
|
192 | 192 | state=state |
|
193 | 193 | ) |
|
194 | 194 | Session().commit() |
|
195 | 195 | |
|
196 | 196 | # now create this repo on Filesystem |
|
197 | 197 | RepoModel()._create_filesystem_repo( |
|
198 | 198 | repo_name=repo_name, |
|
199 | 199 | repo_type=repo_type, |
|
200 | 200 | repo_group=RepoModel()._get_repo_group(repo_group), |
|
201 | 201 | clone_uri=clone_uri, |
|
202 | 202 | ) |
|
203 | 203 | repo = Repository.get_by_repo_name(repo_name_full) |
|
204 | 204 | hooks_base.create_repository(created_by=owner.username, **repo.get_dict()) |
|
205 | 205 | |
|
206 | 206 | # update repo commit caches initially |
|
207 | 207 | repo.update_commit_cache() |
|
208 | 208 | |
|
209 | 209 | # set new created state |
|
210 | 210 | repo.set_state(Repository.STATE_CREATED) |
|
211 | 211 | repo_id = repo.repo_id |
|
212 | 212 | repo_data = repo.get_api_data() |
|
213 | 213 | |
|
214 | 214 | audit_logger.store( |
|
215 | 215 | 'repo.create', action_data={'data': repo_data}, |
|
216 | 216 | user=cur_user, |
|
217 | 217 | repo=audit_logger.RepoWrap(repo_name=repo_name, repo_id=repo_id)) |
|
218 | 218 | |
|
219 | 219 | Session().commit() |
|
220 | 220 | |
|
221 | 221 | PermissionModel().trigger_permission_flush() |
|
222 | 222 | |
|
223 | 223 | except Exception as e: |
|
224 | 224 | log.warning('Exception occurred when creating repository, ' |
|
225 | 225 | 'doing cleanup...', exc_info=True) |
|
226 | 226 | if isinstance(e, IntegrityError): |
|
227 | 227 | Session().rollback() |
|
228 | 228 | |
|
229 | 229 | # rollback things manually ! |
|
230 | 230 | repo = Repository.get_by_repo_name(repo_name_full) |
|
231 | 231 | if repo: |
|
232 | 232 | Repository.delete(repo.repo_id) |
|
233 | 233 | Session().commit() |
|
234 | 234 | RepoModel()._delete_filesystem_repo(repo) |
|
235 | 235 | log.info('Cleanup of repo %s finished', repo_name_full) |
|
236 | 236 | raise |
|
237 | 237 | |
|
238 | 238 | return True |
|
239 | 239 | |
|
240 | 240 | |
|
241 | 241 | @async_task(ignore_result=True, base=RequestContextTask) |
|
242 | 242 | def create_repo_fork(form_data, cur_user): |
|
243 | 243 | """ |
|
244 | 244 | Creates a fork of repository using internal VCS methods |
|
245 | 245 | """ |
|
246 | 246 | from rhodecode.model.repo import RepoModel |
|
247 | 247 | from rhodecode.model.user import UserModel |
|
248 | 248 | |
|
249 | 249 | log = get_logger(create_repo_fork) |
|
250 | 250 | |
|
251 | 251 | cur_user = UserModel()._get_user(cur_user) |
|
252 | 252 | owner = cur_user |
|
253 | 253 | |
|
254 | 254 | repo_name = form_data['repo_name'] # fork in this case |
|
255 | 255 | repo_name_full = form_data['repo_name_full'] |
|
256 | 256 | repo_type = form_data['repo_type'] |
|
257 | 257 | description = form_data['description'] |
|
258 | 258 | private = form_data['private'] |
|
259 | 259 | clone_uri = form_data.get('clone_uri') |
|
260 | 260 | repo_group = safe_int(form_data['repo_group']) |
|
261 | 261 | landing_ref = form_data['landing_rev'] |
|
262 | 262 | copy_fork_permissions = form_data.get('copy_permissions') |
|
263 | 263 | fork_id = safe_int(form_data.get('fork_parent_id')) |
|
264 | 264 | |
|
265 | 265 | try: |
|
266 | 266 | fork_of = RepoModel()._get_repo(fork_id) |
|
267 | 267 | RepoModel()._create_repo( |
|
268 | 268 | repo_name=repo_name_full, |
|
269 | 269 | repo_type=repo_type, |
|
270 | 270 | description=description, |
|
271 | 271 | owner=owner, |
|
272 | 272 | private=private, |
|
273 | 273 | clone_uri=clone_uri, |
|
274 | 274 | repo_group=repo_group, |
|
275 | 275 | landing_rev=landing_ref, |
|
276 | 276 | fork_of=fork_of, |
|
277 | 277 | copy_fork_permissions=copy_fork_permissions |
|
278 | 278 | ) |
|
279 | 279 | |
|
280 | 280 | Session().commit() |
|
281 | 281 | |
|
282 | 282 | base_path = Repository.base_path() |
|
283 | 283 | source_repo_path = os.path.join(base_path, fork_of.repo_name) |
|
284 | 284 | |
|
285 | 285 | # now create this repo on Filesystem |
|
286 | 286 | RepoModel()._create_filesystem_repo( |
|
287 | 287 | repo_name=repo_name, |
|
288 | 288 | repo_type=repo_type, |
|
289 | 289 | repo_group=RepoModel()._get_repo_group(repo_group), |
|
290 | 290 | clone_uri=source_repo_path, |
|
291 | 291 | ) |
|
292 | 292 | repo = Repository.get_by_repo_name(repo_name_full) |
|
293 | 293 | hooks_base.create_repository(created_by=owner.username, **repo.get_dict()) |
|
294 | 294 | |
|
295 | 295 | # update repo commit caches initially |
|
296 | 296 | config = repo._config |
|
297 | 297 | config.set('extensions', 'largefiles', '') |
|
298 | 298 | repo.update_commit_cache(config=config) |
|
299 | 299 | |
|
300 | 300 | # set new created state |
|
301 | 301 | repo.set_state(Repository.STATE_CREATED) |
|
302 | 302 | |
|
303 | 303 | repo_id = repo.repo_id |
|
304 | 304 | repo_data = repo.get_api_data() |
|
305 | 305 | audit_logger.store( |
|
306 | 306 | 'repo.fork', action_data={'data': repo_data}, |
|
307 | 307 | user=cur_user, |
|
308 | 308 | repo=audit_logger.RepoWrap(repo_name=repo_name, repo_id=repo_id)) |
|
309 | 309 | |
|
310 | 310 | Session().commit() |
|
311 | 311 | except Exception as e: |
|
312 | 312 | log.warning('Exception occurred when forking repository, ' |
|
313 | 313 | 'doing cleanup...', exc_info=True) |
|
314 | 314 | if isinstance(e, IntegrityError): |
|
315 | 315 | Session().rollback() |
|
316 | 316 | |
|
317 | 317 | # rollback things manually ! |
|
318 | 318 | repo = Repository.get_by_repo_name(repo_name_full) |
|
319 | 319 | if repo: |
|
320 | 320 | Repository.delete(repo.repo_id) |
|
321 | 321 | Session().commit() |
|
322 | 322 | RepoModel()._delete_filesystem_repo(repo) |
|
323 | 323 | log.info('Cleanup of repo %s finished', repo_name_full) |
|
324 | 324 | raise |
|
325 | 325 | |
|
326 | 326 | return True |
|
327 | 327 | |
|
328 | 328 | |
|
329 | 329 | @async_task(ignore_result=True) |
|
330 | 330 | def repo_maintenance(repoid): |
|
331 | 331 | from rhodecode.lib import repo_maintenance as repo_maintenance_lib |
|
332 | 332 | log = get_logger(repo_maintenance) |
|
333 | 333 | repo = Repository.get_by_id_or_repo_name(repoid) |
|
334 | 334 | if repo: |
|
335 | 335 | maintenance = repo_maintenance_lib.RepoMaintenance() |
|
336 | 336 | tasks = maintenance.get_tasks_for_repo(repo) |
|
337 | 337 | log.debug('Executing %s tasks on repo `%s`', tasks, repoid) |
|
338 | 338 | executed_types = maintenance.execute(repo) |
|
339 | 339 | log.debug('Got execution results %s', executed_types) |
|
340 | 340 | else: |
|
341 | 341 | log.debug('Repo `%s` not found or without a clone_url', repoid) |
|
342 | 342 | |
|
343 | 343 | |
|
344 | 344 | @async_task(ignore_result=True) |
|
345 | 345 | def check_for_update(send_email_notification=True, email_recipients=None): |
|
346 | 346 | from rhodecode.model.update import UpdateModel |
|
347 | 347 | from rhodecode.model.notification import EmailNotificationModel |
|
348 | 348 | |
|
349 | 349 | log = get_logger(check_for_update) |
|
350 | 350 | update_url = UpdateModel().get_update_url() |
|
351 | 351 | cur_ver = rhodecode.__version__ |
|
352 | 352 | |
|
353 | 353 | try: |
|
354 | 354 | data = UpdateModel().get_update_data(update_url) |
|
355 | 355 | |
|
356 | 356 | current_ver = UpdateModel().get_stored_version(fallback=cur_ver) |
|
357 | 357 | latest_ver = data['versions'][0]['version'] |
|
358 | 358 | UpdateModel().store_version(latest_ver) |
|
359 | 359 | |
|
360 | 360 | if send_email_notification: |
|
361 | 361 | log.debug('Send email notification is enabled. ' |
|
362 | 362 | 'Current RhodeCode version: %s, latest known: %s', current_ver, latest_ver) |
|
363 | 363 | if UpdateModel().is_outdated(current_ver, latest_ver): |
|
364 | 364 | |
|
365 | 365 | email_kwargs = { |
|
366 | 366 | 'current_ver': current_ver, |
|
367 | 367 | 'latest_ver': latest_ver, |
|
368 | 368 | } |
|
369 | 369 | |
|
370 | 370 | (subject, email_body, email_body_plaintext) = EmailNotificationModel().render_email( |
|
371 | 371 | EmailNotificationModel.TYPE_UPDATE_AVAILABLE, **email_kwargs) |
|
372 | 372 | |
|
373 | 373 | email_recipients = aslist(email_recipients, sep=',') or \ |
|
374 | 374 | [user.email for user in User.get_all_super_admins()] |
|
375 | 375 | run_task(send_email, email_recipients, subject, |
|
376 | 376 | email_body_plaintext, email_body) |
|
377 | 377 | |
|
378 | 378 | except Exception: |
|
379 | 379 | pass |
|
380 | 380 | |
|
381 | 381 | |
|
382 | 382 | @async_task(ignore_result=False) |
|
383 | 383 | def beat_check(*args, **kwargs): |
|
384 | 384 | log = get_logger(beat_check) |
|
385 | 385 | log.info('%r: Got args: %r and kwargs %r', beat_check, args, kwargs) |
|
386 | 386 | return time.time() |
|
387 | 387 | |
|
388 | 388 | |
|
389 | @async_task(ignore_result=True) | |
|
390 | def sync_last_update(*args, **kwargs): | |
|
391 | ||
|
389 | def sync_last_update_for_objects(*args, **kwargs): | |
|
392 | 390 | skip_repos = kwargs.get('skip_repos') |
|
393 | 391 | if not skip_repos: |
|
394 | 392 | repos = Repository.query() \ |
|
395 | 393 | .order_by(Repository.group_id.asc()) |
|
396 | 394 | |
|
397 | 395 | for repo in repos: |
|
398 | 396 | repo.update_commit_cache() |
|
399 | 397 | |
|
400 | 398 | skip_groups = kwargs.get('skip_groups') |
|
401 | 399 | if not skip_groups: |
|
402 | 400 | repo_groups = RepoGroup.query() \ |
|
403 | 401 | .filter(RepoGroup.group_parent_id == None) |
|
404 | 402 | |
|
405 | 403 | for root_gr in repo_groups: |
|
406 | 404 | for repo_gr in reversed(root_gr.recursive_groups()): |
|
407 | 405 | repo_gr.update_commit_cache() |
|
406 | ||
|
407 | ||
|
408 | @async_task(ignore_result=True) | |
|
409 | def sync_last_update(*args, **kwargs): | |
|
410 | sync_last_update_for_objects(*args, **kwargs) |
@@ -1,283 +1,284 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2010-2020 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | import gzip |
|
22 | 22 | import shutil |
|
23 | 23 | import logging |
|
24 | 24 | import tempfile |
|
25 | 25 | import urlparse |
|
26 | 26 | |
|
27 | 27 | from webob.exc import HTTPNotFound |
|
28 | 28 | |
|
29 | 29 | import rhodecode |
|
30 | 30 | from rhodecode.lib.middleware.appenlight import wrap_in_appenlight_if_enabled |
|
31 | 31 | from rhodecode.lib.middleware.simplegit import SimpleGit, GIT_PROTO_PAT |
|
32 | 32 | from rhodecode.lib.middleware.simplehg import SimpleHg |
|
33 | 33 | from rhodecode.lib.middleware.simplesvn import SimpleSvn |
|
34 | 34 | from rhodecode.model.settings import VcsSettingsModel |
|
35 | 35 | |
|
36 | 36 | log = logging.getLogger(__name__) |
|
37 | 37 | |
|
38 | 38 | VCS_TYPE_KEY = '_rc_vcs_type' |
|
39 | 39 | VCS_TYPE_SKIP = '_rc_vcs_skip' |
|
40 | 40 | |
|
41 | 41 | |
|
42 | 42 | def is_git(environ): |
|
43 | 43 | """ |
|
44 | 44 | Returns True if requests should be handled by GIT wsgi middleware |
|
45 | 45 | """ |
|
46 | 46 | is_git_path = GIT_PROTO_PAT.match(environ['PATH_INFO']) |
|
47 | 47 | log.debug( |
|
48 | 48 | 'request path: `%s` detected as GIT PROTOCOL %s', environ['PATH_INFO'], |
|
49 | 49 | is_git_path is not None) |
|
50 | 50 | |
|
51 | 51 | return is_git_path |
|
52 | 52 | |
|
53 | 53 | |
|
54 | 54 | def is_hg(environ): |
|
55 | 55 | """ |
|
56 | 56 | Returns True if requests target is mercurial server - header |
|
57 | 57 | ``HTTP_ACCEPT`` of such request would start with ``application/mercurial``. |
|
58 | 58 | """ |
|
59 | 59 | is_hg_path = False |
|
60 | 60 | |
|
61 | 61 | http_accept = environ.get('HTTP_ACCEPT') |
|
62 | 62 | |
|
63 | 63 | if http_accept and http_accept.startswith('application/mercurial'): |
|
64 | 64 | query = urlparse.parse_qs(environ['QUERY_STRING']) |
|
65 | 65 | if 'cmd' in query: |
|
66 | 66 | is_hg_path = True |
|
67 | 67 | |
|
68 | 68 | log.debug( |
|
69 | 69 | 'request path: `%s` detected as HG PROTOCOL %s', environ['PATH_INFO'], |
|
70 | 70 | is_hg_path) |
|
71 | 71 | |
|
72 | 72 | return is_hg_path |
|
73 | 73 | |
|
74 | 74 | |
|
75 | 75 | def is_svn(environ): |
|
76 | 76 | """ |
|
77 | 77 | Returns True if requests target is Subversion server |
|
78 | 78 | """ |
|
79 | 79 | |
|
80 | 80 | http_dav = environ.get('HTTP_DAV', '') |
|
81 | 81 | magic_path_segment = rhodecode.CONFIG.get( |
|
82 | 82 | 'rhodecode_subversion_magic_path', '/!svn') |
|
83 | 83 | is_svn_path = ( |
|
84 | 84 | 'subversion' in http_dav or |
|
85 | 85 | magic_path_segment in environ['PATH_INFO'] |
|
86 | 86 | or environ['REQUEST_METHOD'] in ['PROPFIND', 'PROPPATCH'] |
|
87 | 87 | ) |
|
88 | 88 | log.debug( |
|
89 | 89 | 'request path: `%s` detected as SVN PROTOCOL %s', environ['PATH_INFO'], |
|
90 | 90 | is_svn_path) |
|
91 | 91 | |
|
92 | 92 | return is_svn_path |
|
93 | 93 | |
|
94 | 94 | |
|
95 | 95 | class GunzipMiddleware(object): |
|
96 | 96 | """ |
|
97 | 97 | WSGI middleware that unzips gzip-encoded requests before |
|
98 | 98 | passing on to the underlying application. |
|
99 | 99 | """ |
|
100 | 100 | |
|
101 | 101 | def __init__(self, application): |
|
102 | 102 | self.app = application |
|
103 | 103 | |
|
104 | 104 | def __call__(self, environ, start_response): |
|
105 | 105 | accepts_encoding_header = environ.get('HTTP_CONTENT_ENCODING', b'') |
|
106 | 106 | |
|
107 | 107 | if b'gzip' in accepts_encoding_header: |
|
108 | 108 | log.debug('gzip detected, now running gunzip wrapper') |
|
109 | 109 | wsgi_input = environ['wsgi.input'] |
|
110 | 110 | |
|
111 | 111 | if not hasattr(environ['wsgi.input'], 'seek'): |
|
112 | 112 | # The gzip implementation in the standard library of Python 2.x |
|
113 | 113 | # requires the '.seek()' and '.tell()' methods to be available |
|
114 | 114 | # on the input stream. Read the data into a temporary file to |
|
115 | 115 | # work around this limitation. |
|
116 | 116 | |
|
117 | 117 | wsgi_input = tempfile.SpooledTemporaryFile(64 * 1024 * 1024) |
|
118 | 118 | shutil.copyfileobj(environ['wsgi.input'], wsgi_input) |
|
119 | 119 | wsgi_input.seek(0) |
|
120 | 120 | |
|
121 | 121 | environ['wsgi.input'] = gzip.GzipFile(fileobj=wsgi_input, mode='r') |
|
122 | 122 | # since we "Ungzipped" the content we say now it's no longer gzip |
|
123 | 123 | # content encoding |
|
124 | 124 | del environ['HTTP_CONTENT_ENCODING'] |
|
125 | 125 | |
|
126 | 126 | # content length has changes ? or i'm not sure |
|
127 | 127 | if 'CONTENT_LENGTH' in environ: |
|
128 | 128 | del environ['CONTENT_LENGTH'] |
|
129 | 129 | else: |
|
130 | 130 | log.debug('content not gzipped, gzipMiddleware passing ' |
|
131 | 131 | 'request further') |
|
132 | 132 | return self.app(environ, start_response) |
|
133 | 133 | |
|
134 | 134 | |
|
135 | 135 | def is_vcs_call(environ): |
|
136 | 136 | if VCS_TYPE_KEY in environ: |
|
137 | 137 | raw_type = environ[VCS_TYPE_KEY] |
|
138 | 138 | return raw_type and raw_type != VCS_TYPE_SKIP |
|
139 | 139 | return False |
|
140 | 140 | |
|
141 | 141 | |
|
142 | 142 | def get_path_elem(route_path): |
|
143 | 143 | if not route_path: |
|
144 | 144 | return None |
|
145 | 145 | |
|
146 | 146 | cleaned_route_path = route_path.lstrip('/') |
|
147 | 147 | if cleaned_route_path: |
|
148 | 148 | cleaned_route_path_elems = cleaned_route_path.split('/') |
|
149 | 149 | if cleaned_route_path_elems: |
|
150 | 150 | return cleaned_route_path_elems[0] |
|
151 | 151 | return None |
|
152 | 152 | |
|
153 | 153 | |
|
154 | 154 | def detect_vcs_request(environ, backends): |
|
155 | 155 | checks = { |
|
156 | 156 | 'hg': (is_hg, SimpleHg), |
|
157 | 157 | 'git': (is_git, SimpleGit), |
|
158 | 158 | 'svn': (is_svn, SimpleSvn), |
|
159 | 159 | } |
|
160 | 160 | handler = None |
|
161 | 161 | # List of path views first chunk we don't do any checks |
|
162 | 162 | white_list = [ |
|
163 | 163 | # e.g /_file_store/download |
|
164 | 164 | '_file_store', |
|
165 | 165 | |
|
166 | 166 | # static files no detection |
|
167 | 167 | '_static', |
|
168 | 168 | |
|
169 | # skip ops ping | |
|
169 | # skip ops ping, status | |
|
170 | 170 | '_admin/ops/ping', |
|
171 | '_admin/ops/status', | |
|
171 | 172 | |
|
172 | 173 | # full channelstream connect should be VCS skipped |
|
173 | 174 | '_admin/channelstream/connect', |
|
174 | 175 | ] |
|
175 | 176 | |
|
176 | 177 | path_info = environ['PATH_INFO'] |
|
177 | 178 | |
|
178 | 179 | path_elem = get_path_elem(path_info) |
|
179 | 180 | |
|
180 | 181 | if path_elem in white_list: |
|
181 | 182 | log.debug('path `%s` in whitelist, skipping...', path_info) |
|
182 | 183 | return handler |
|
183 | 184 | |
|
184 | 185 | path_url = path_info.lstrip('/') |
|
185 | 186 | if path_url in white_list: |
|
186 | 187 | log.debug('full url path `%s` in whitelist, skipping...', path_url) |
|
187 | 188 | return handler |
|
188 | 189 | |
|
189 | 190 | if VCS_TYPE_KEY in environ: |
|
190 | 191 | raw_type = environ[VCS_TYPE_KEY] |
|
191 | 192 | if raw_type == VCS_TYPE_SKIP: |
|
192 | 193 | log.debug('got `skip` marker for vcs detection, skipping...') |
|
193 | 194 | return handler |
|
194 | 195 | |
|
195 | 196 | _check, handler = checks.get(raw_type) or [None, None] |
|
196 | 197 | if handler: |
|
197 | 198 | log.debug('got handler:%s from environ', handler) |
|
198 | 199 | |
|
199 | 200 | if not handler: |
|
200 | 201 | log.debug('request start: checking if request for `%s` is of VCS type in order: %s', path_elem, backends) |
|
201 | 202 | for vcs_type in backends: |
|
202 | 203 | vcs_check, _handler = checks[vcs_type] |
|
203 | 204 | if vcs_check(environ): |
|
204 | 205 | log.debug('vcs handler found %s', _handler) |
|
205 | 206 | handler = _handler |
|
206 | 207 | break |
|
207 | 208 | |
|
208 | 209 | return handler |
|
209 | 210 | |
|
210 | 211 | |
|
211 | 212 | class VCSMiddleware(object): |
|
212 | 213 | |
|
213 | 214 | def __init__(self, app, registry, config, appenlight_client): |
|
214 | 215 | self.application = app |
|
215 | 216 | self.registry = registry |
|
216 | 217 | self.config = config |
|
217 | 218 | self.appenlight_client = appenlight_client |
|
218 | 219 | self.use_gzip = True |
|
219 | 220 | # order in which we check the middlewares, based on vcs.backends config |
|
220 | 221 | self.check_middlewares = config['vcs.backends'] |
|
221 | 222 | |
|
222 | 223 | def vcs_config(self, repo_name=None): |
|
223 | 224 | """ |
|
224 | 225 | returns serialized VcsSettings |
|
225 | 226 | """ |
|
226 | 227 | try: |
|
227 | 228 | return VcsSettingsModel( |
|
228 | 229 | repo=repo_name).get_ui_settings_as_config_obj() |
|
229 | 230 | except Exception: |
|
230 | 231 | pass |
|
231 | 232 | |
|
232 | 233 | def wrap_in_gzip_if_enabled(self, app, config): |
|
233 | 234 | if self.use_gzip: |
|
234 | 235 | app = GunzipMiddleware(app) |
|
235 | 236 | return app |
|
236 | 237 | |
|
237 | 238 | def _get_handler_app(self, environ): |
|
238 | 239 | app = None |
|
239 | 240 | log.debug('VCSMiddleware: detecting vcs type.') |
|
240 | 241 | handler = detect_vcs_request(environ, self.check_middlewares) |
|
241 | 242 | if handler: |
|
242 | 243 | app = handler(self.config, self.registry) |
|
243 | 244 | |
|
244 | 245 | return app |
|
245 | 246 | |
|
246 | 247 | def __call__(self, environ, start_response): |
|
247 | 248 | # check if we handle one of interesting protocols, optionally extract |
|
248 | 249 | # specific vcsSettings and allow changes of how things are wrapped |
|
249 | 250 | vcs_handler = self._get_handler_app(environ) |
|
250 | 251 | if vcs_handler: |
|
251 | 252 | # translate the _REPO_ID into real repo NAME for usage |
|
252 | 253 | # in middleware |
|
253 | 254 | environ['PATH_INFO'] = vcs_handler._get_by_id(environ['PATH_INFO']) |
|
254 | 255 | |
|
255 | 256 | # Set acl, url and vcs repo names. |
|
256 | 257 | vcs_handler.set_repo_names(environ) |
|
257 | 258 | |
|
258 | 259 | # register repo config back to the handler |
|
259 | 260 | vcs_conf = self.vcs_config(vcs_handler.acl_repo_name) |
|
260 | 261 | # maybe damaged/non existent settings. We still want to |
|
261 | 262 | # pass that point to validate on is_valid_and_existing_repo |
|
262 | 263 | # and return proper HTTP Code back to client |
|
263 | 264 | if vcs_conf: |
|
264 | 265 | vcs_handler.repo_vcs_config = vcs_conf |
|
265 | 266 | |
|
266 | 267 | # check for type, presence in database and on filesystem |
|
267 | 268 | if not vcs_handler.is_valid_and_existing_repo( |
|
268 | 269 | vcs_handler.acl_repo_name, |
|
269 | 270 | vcs_handler.base_path, |
|
270 | 271 | vcs_handler.SCM): |
|
271 | 272 | return HTTPNotFound()(environ, start_response) |
|
272 | 273 | |
|
273 | 274 | environ['REPO_NAME'] = vcs_handler.url_repo_name |
|
274 | 275 | |
|
275 | 276 | # Wrap handler in middlewares if they are enabled. |
|
276 | 277 | vcs_handler = self.wrap_in_gzip_if_enabled( |
|
277 | 278 | vcs_handler, self.config) |
|
278 | 279 | vcs_handler, _ = wrap_in_appenlight_if_enabled( |
|
279 | 280 | vcs_handler, self.config, self.appenlight_client) |
|
280 | 281 | |
|
281 | 282 | return vcs_handler(environ, start_response) |
|
282 | 283 | |
|
283 | 284 | return self.application(environ, start_response) |
@@ -1,354 +1,363 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2015-2020 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | import time |
|
22 | 22 | import errno |
|
23 | 23 | import logging |
|
24 | 24 | |
|
25 | 25 | import msgpack |
|
26 | 26 | import gevent |
|
27 | 27 | import redis |
|
28 | 28 | |
|
29 | 29 | from dogpile.cache.api import CachedValue |
|
30 | 30 | from dogpile.cache.backends import memory as memory_backend |
|
31 | 31 | from dogpile.cache.backends import file as file_backend |
|
32 | 32 | from dogpile.cache.backends import redis as redis_backend |
|
33 | 33 | from dogpile.cache.backends.file import NO_VALUE, compat, FileLock |
|
34 | 34 | from dogpile.cache.util import memoized_property |
|
35 | 35 | |
|
36 | 36 | from pyramid.settings import asbool |
|
37 | 37 | |
|
38 | 38 | from rhodecode.lib.memory_lru_dict import LRUDict, LRUDictDebug |
|
39 | 39 | |
|
40 | 40 | |
|
41 | 41 | _default_max_size = 1024 |
|
42 | 42 | |
|
43 | 43 | log = logging.getLogger(__name__) |
|
44 | 44 | |
|
45 | 45 | |
|
46 | 46 | class LRUMemoryBackend(memory_backend.MemoryBackend): |
|
47 | 47 | key_prefix = 'lru_mem_backend' |
|
48 | 48 | pickle_values = False |
|
49 | 49 | |
|
50 | 50 | def __init__(self, arguments): |
|
51 | 51 | max_size = arguments.pop('max_size', _default_max_size) |
|
52 | 52 | |
|
53 | 53 | LRUDictClass = LRUDict |
|
54 | 54 | if arguments.pop('log_key_count', None): |
|
55 | 55 | LRUDictClass = LRUDictDebug |
|
56 | 56 | |
|
57 | 57 | arguments['cache_dict'] = LRUDictClass(max_size) |
|
58 | 58 | super(LRUMemoryBackend, self).__init__(arguments) |
|
59 | 59 | |
|
60 | 60 | def delete(self, key): |
|
61 | 61 | try: |
|
62 | 62 | del self._cache[key] |
|
63 | 63 | except KeyError: |
|
64 | 64 | # we don't care if key isn't there at deletion |
|
65 | 65 | pass |
|
66 | 66 | |
|
67 | 67 | def delete_multi(self, keys): |
|
68 | 68 | for key in keys: |
|
69 | 69 | self.delete(key) |
|
70 | 70 | |
|
71 | 71 | |
|
72 | 72 | class PickleSerializer(object): |
|
73 | 73 | |
|
74 | 74 | def _dumps(self, value, safe=False): |
|
75 | 75 | try: |
|
76 | 76 | return compat.pickle.dumps(value) |
|
77 | 77 | except Exception: |
|
78 | 78 | if safe: |
|
79 | 79 | return NO_VALUE |
|
80 | 80 | else: |
|
81 | 81 | raise |
|
82 | 82 | |
|
83 | 83 | def _loads(self, value, safe=True): |
|
84 | 84 | try: |
|
85 | 85 | return compat.pickle.loads(value) |
|
86 | 86 | except Exception: |
|
87 | 87 | if safe: |
|
88 | 88 | return NO_VALUE |
|
89 | 89 | else: |
|
90 | 90 | raise |
|
91 | 91 | |
|
92 | 92 | |
|
93 | 93 | class MsgPackSerializer(object): |
|
94 | 94 | |
|
95 | 95 | def _dumps(self, value, safe=False): |
|
96 | 96 | try: |
|
97 | 97 | return msgpack.packb(value) |
|
98 | 98 | except Exception: |
|
99 | 99 | if safe: |
|
100 | 100 | return NO_VALUE |
|
101 | 101 | else: |
|
102 | 102 | raise |
|
103 | 103 | |
|
104 | 104 | def _loads(self, value, safe=True): |
|
105 | 105 | """ |
|
106 | 106 | pickle maintained the `CachedValue` wrapper of the tuple |
|
107 | 107 | msgpack does not, so it must be added back in. |
|
108 | 108 | """ |
|
109 | 109 | try: |
|
110 | 110 | value = msgpack.unpackb(value, use_list=False) |
|
111 | 111 | return CachedValue(*value) |
|
112 | 112 | except Exception: |
|
113 | 113 | if safe: |
|
114 | 114 | return NO_VALUE |
|
115 | 115 | else: |
|
116 | 116 | raise |
|
117 | 117 | |
|
118 | 118 | |
|
119 | 119 | import fcntl |
|
120 | 120 | flock_org = fcntl.flock |
|
121 | 121 | |
|
122 | 122 | |
|
123 | 123 | class CustomLockFactory(FileLock): |
|
124 | 124 | |
|
125 | 125 | @memoized_property |
|
126 | 126 | def _module(self): |
|
127 | 127 | |
|
128 | 128 | def gevent_flock(fd, operation): |
|
129 | 129 | """ |
|
130 | 130 | Gevent compatible flock |
|
131 | 131 | """ |
|
132 | 132 | # set non-blocking, this will cause an exception if we cannot acquire a lock |
|
133 | 133 | operation |= fcntl.LOCK_NB |
|
134 | 134 | start_lock_time = time.time() |
|
135 | 135 | timeout = 60 * 15 # 15min |
|
136 | 136 | while True: |
|
137 | 137 | try: |
|
138 | 138 | flock_org(fd, operation) |
|
139 | 139 | # lock has been acquired |
|
140 | 140 | break |
|
141 | 141 | except (OSError, IOError) as e: |
|
142 | 142 | # raise on other errors than Resource temporarily unavailable |
|
143 | 143 | if e.errno != errno.EAGAIN: |
|
144 | 144 | raise |
|
145 | 145 | elif (time.time() - start_lock_time) > timeout: |
|
146 | 146 | # waited to much time on a lock, better fail than loop for ever |
|
147 | 147 | log.error('Failed to acquire lock on `%s` after waiting %ss', |
|
148 | 148 | self.filename, timeout) |
|
149 | 149 | raise |
|
150 | 150 | wait_timeout = 0.03 |
|
151 | 151 | log.debug('Failed to acquire lock on `%s`, retry in %ss', |
|
152 | 152 | self.filename, wait_timeout) |
|
153 | 153 | gevent.sleep(wait_timeout) |
|
154 | 154 | |
|
155 | 155 | fcntl.flock = gevent_flock |
|
156 | 156 | return fcntl |
|
157 | 157 | |
|
158 | 158 | |
|
159 | 159 | class FileNamespaceBackend(PickleSerializer, file_backend.DBMBackend): |
|
160 | 160 | key_prefix = 'file_backend' |
|
161 | 161 | |
|
162 | 162 | def __init__(self, arguments): |
|
163 | 163 | arguments['lock_factory'] = CustomLockFactory |
|
164 | 164 | db_file = arguments.get('filename') |
|
165 | 165 | |
|
166 | 166 | log.debug('initialing %s DB in %s', self.__class__.__name__, db_file) |
|
167 | 167 | try: |
|
168 | 168 | super(FileNamespaceBackend, self).__init__(arguments) |
|
169 | 169 | except Exception: |
|
170 | 170 | log.error('Failed to initialize db at: %s', db_file) |
|
171 | 171 | raise |
|
172 | 172 | |
|
173 | 173 | def __repr__(self): |
|
174 | 174 | return '{} `{}`'.format(self.__class__, self.filename) |
|
175 | 175 | |
|
176 | 176 | def list_keys(self, prefix=''): |
|
177 | 177 | prefix = '{}:{}'.format(self.key_prefix, prefix) |
|
178 | 178 | |
|
179 | 179 | def cond(v): |
|
180 | 180 | if not prefix: |
|
181 | 181 | return True |
|
182 | 182 | |
|
183 | 183 | if v.startswith(prefix): |
|
184 | 184 | return True |
|
185 | 185 | return False |
|
186 | 186 | |
|
187 | 187 | with self._dbm_file(True) as dbm: |
|
188 | 188 | try: |
|
189 | 189 | return filter(cond, dbm.keys()) |
|
190 | 190 | except Exception: |
|
191 | 191 | log.error('Failed to fetch DBM keys from DB: %s', self.get_store()) |
|
192 | 192 | raise |
|
193 | 193 | |
|
194 | 194 | def get_store(self): |
|
195 | 195 | return self.filename |
|
196 | 196 | |
|
197 | 197 | def _dbm_get(self, key): |
|
198 | 198 | with self._dbm_file(False) as dbm: |
|
199 | 199 | if hasattr(dbm, 'get'): |
|
200 | 200 | value = dbm.get(key, NO_VALUE) |
|
201 | 201 | else: |
|
202 | 202 | # gdbm objects lack a .get method |
|
203 | 203 | try: |
|
204 | 204 | value = dbm[key] |
|
205 | 205 | except KeyError: |
|
206 | 206 | value = NO_VALUE |
|
207 | 207 | if value is not NO_VALUE: |
|
208 | 208 | value = self._loads(value) |
|
209 | 209 | return value |
|
210 | 210 | |
|
211 | 211 | def get(self, key): |
|
212 | 212 | try: |
|
213 | 213 | return self._dbm_get(key) |
|
214 | 214 | except Exception: |
|
215 | 215 | log.error('Failed to fetch DBM key %s from DB: %s', key, self.get_store()) |
|
216 | 216 | raise |
|
217 | 217 | |
|
218 | 218 | def set(self, key, value): |
|
219 | 219 | with self._dbm_file(True) as dbm: |
|
220 | 220 | dbm[key] = self._dumps(value) |
|
221 | 221 | |
|
222 | 222 | def set_multi(self, mapping): |
|
223 | 223 | with self._dbm_file(True) as dbm: |
|
224 | 224 | for key, value in mapping.items(): |
|
225 | 225 | dbm[key] = self._dumps(value) |
|
226 | 226 | |
|
227 | 227 | |
|
228 | 228 | class BaseRedisBackend(redis_backend.RedisBackend): |
|
229 | 229 | key_prefix = '' |
|
230 | 230 | |
|
231 | 231 | def __init__(self, arguments): |
|
232 | 232 | super(BaseRedisBackend, self).__init__(arguments) |
|
233 | 233 | self._lock_timeout = self.lock_timeout |
|
234 | 234 | self._lock_auto_renewal = asbool(arguments.pop("lock_auto_renewal", True)) |
|
235 | 235 | |
|
236 | 236 | if self._lock_auto_renewal and not self._lock_timeout: |
|
237 | 237 | # set default timeout for auto_renewal |
|
238 | 238 | self._lock_timeout = 30 |
|
239 | 239 | |
|
240 | 240 | def _create_client(self): |
|
241 | 241 | args = {} |
|
242 | 242 | |
|
243 | 243 | if self.url is not None: |
|
244 | 244 | args.update(url=self.url) |
|
245 | 245 | |
|
246 | 246 | else: |
|
247 | 247 | args.update( |
|
248 | 248 | host=self.host, password=self.password, |
|
249 | 249 | port=self.port, db=self.db |
|
250 | 250 | ) |
|
251 | 251 | |
|
252 | 252 | connection_pool = redis.ConnectionPool(**args) |
|
253 | 253 | |
|
254 | 254 | return redis.StrictRedis(connection_pool=connection_pool) |
|
255 | 255 | |
|
256 | 256 | def list_keys(self, prefix=''): |
|
257 | 257 | prefix = '{}:{}*'.format(self.key_prefix, prefix) |
|
258 | 258 | return self.client.keys(prefix) |
|
259 | 259 | |
|
260 | 260 | def get_store(self): |
|
261 | 261 | return self.client.connection_pool |
|
262 | 262 | |
|
263 | 263 | def get(self, key): |
|
264 | 264 | value = self.client.get(key) |
|
265 | 265 | if value is None: |
|
266 | 266 | return NO_VALUE |
|
267 | 267 | return self._loads(value) |
|
268 | 268 | |
|
269 | 269 | def get_multi(self, keys): |
|
270 | 270 | if not keys: |
|
271 | 271 | return [] |
|
272 | 272 | values = self.client.mget(keys) |
|
273 | 273 | loads = self._loads |
|
274 | 274 | return [ |
|
275 | 275 | loads(v) if v is not None else NO_VALUE |
|
276 | 276 | for v in values] |
|
277 | 277 | |
|
278 | 278 | def set(self, key, value): |
|
279 | 279 | if self.redis_expiration_time: |
|
280 | 280 | self.client.setex(key, self.redis_expiration_time, |
|
281 | 281 | self._dumps(value)) |
|
282 | 282 | else: |
|
283 | 283 | self.client.set(key, self._dumps(value)) |
|
284 | 284 | |
|
285 | 285 | def set_multi(self, mapping): |
|
286 | 286 | dumps = self._dumps |
|
287 | 287 | mapping = dict( |
|
288 | 288 | (k, dumps(v)) |
|
289 | 289 | for k, v in mapping.items() |
|
290 | 290 | ) |
|
291 | 291 | |
|
292 | 292 | if not self.redis_expiration_time: |
|
293 | 293 | self.client.mset(mapping) |
|
294 | 294 | else: |
|
295 | 295 | pipe = self.client.pipeline() |
|
296 | 296 | for key, value in mapping.items(): |
|
297 | 297 | pipe.setex(key, self.redis_expiration_time, value) |
|
298 | 298 | pipe.execute() |
|
299 | 299 | |
|
300 | 300 | def get_mutex(self, key): |
|
301 | 301 | if self.distributed_lock: |
|
302 | 302 | lock_key = redis_backend.u('_lock_{0}').format(key) |
|
303 | log.debug('Trying to acquire Redis lock for key %s', lock_key) | |
|
304 | 303 | return get_mutex_lock(self.client, lock_key, self._lock_timeout, |
|
305 | 304 | auto_renewal=self._lock_auto_renewal) |
|
306 | 305 | else: |
|
307 | 306 | return None |
|
308 | 307 | |
|
309 | 308 | |
|
310 | 309 | class RedisPickleBackend(PickleSerializer, BaseRedisBackend): |
|
311 | 310 | key_prefix = 'redis_pickle_backend' |
|
312 | 311 | pass |
|
313 | 312 | |
|
314 | 313 | |
|
315 | 314 | class RedisMsgPackBackend(MsgPackSerializer, BaseRedisBackend): |
|
316 | 315 | key_prefix = 'redis_msgpack_backend' |
|
317 | 316 | pass |
|
318 | 317 | |
|
319 | 318 | |
|
320 | 319 | def get_mutex_lock(client, lock_key, lock_timeout, auto_renewal=False): |
|
321 | 320 | import redis_lock |
|
322 | 321 | |
|
323 | 322 | class _RedisLockWrapper(object): |
|
324 | 323 | """LockWrapper for redis_lock""" |
|
325 | 324 | |
|
326 | 325 | @classmethod |
|
327 | 326 | def get_lock(cls): |
|
328 | 327 | return redis_lock.Lock( |
|
329 | 328 | redis_client=client, |
|
330 | 329 | name=lock_key, |
|
331 | 330 | expire=lock_timeout, |
|
332 | 331 | auto_renewal=auto_renewal, |
|
333 | 332 | strict=True, |
|
334 | 333 | ) |
|
335 | 334 | |
|
335 | def __repr__(self): | |
|
336 | return "{}:{}".format(self.__class__.__name__, lock_key) | |
|
337 | ||
|
338 | def __str__(self): | |
|
339 | return "{}:{}".format(self.__class__.__name__, lock_key) | |
|
340 | ||
|
336 | 341 | def __init__(self): |
|
337 | 342 | self.lock = self.get_lock() |
|
343 | self.lock_key = lock_key | |
|
338 | 344 | |
|
339 | 345 | def acquire(self, wait=True): |
|
346 | log.debug('Trying to acquire Redis lock for key %s', self.lock_key) | |
|
340 | 347 | try: |
|
341 |
|
|
|
348 | acquired = self.lock.acquire(wait) | |
|
349 | log.debug('Got lock for key %s, %s', self.lock_key, acquired) | |
|
350 | return acquired | |
|
342 | 351 | except redis_lock.AlreadyAcquired: |
|
343 | 352 | return False |
|
344 | 353 | except redis_lock.AlreadyStarted: |
|
345 | 354 | # refresh thread exists, but it also means we acquired the lock |
|
346 | 355 | return True |
|
347 | 356 | |
|
348 | 357 | def release(self): |
|
349 | 358 | try: |
|
350 | 359 | self.lock.release() |
|
351 | 360 | except redis_lock.NotAcquired: |
|
352 | 361 | pass |
|
353 | 362 | |
|
354 | 363 | return _RedisLockWrapper() |
@@ -1,418 +1,422 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2015-2020 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | import os |
|
21 | 21 | import time |
|
22 | 22 | import logging |
|
23 | 23 | import functools |
|
24 | 24 | import threading |
|
25 | 25 | |
|
26 | 26 | from dogpile.cache import CacheRegion |
|
27 | 27 | from dogpile.cache.util import compat |
|
28 | 28 | |
|
29 | 29 | import rhodecode |
|
30 | 30 | from rhodecode.lib.utils import safe_str, sha1 |
|
31 | 31 | from rhodecode.lib.utils2 import safe_unicode, str2bool |
|
32 | 32 | from rhodecode.model.db import Session, CacheKey, IntegrityError |
|
33 | 33 | |
|
34 | 34 | from rhodecode.lib.rc_cache import cache_key_meta |
|
35 | 35 | from rhodecode.lib.rc_cache import region_meta |
|
36 | 36 | |
|
37 | 37 | log = logging.getLogger(__name__) |
|
38 | 38 | |
|
39 | 39 | |
|
40 | 40 | def isCython(func): |
|
41 | 41 | """ |
|
42 | 42 | Private helper that checks if a function is a cython function. |
|
43 | 43 | """ |
|
44 | 44 | return func.__class__.__name__ == 'cython_function_or_method' |
|
45 | 45 | |
|
46 | 46 | |
|
47 | 47 | class RhodeCodeCacheRegion(CacheRegion): |
|
48 | 48 | |
|
49 | 49 | def conditional_cache_on_arguments( |
|
50 | 50 | self, namespace=None, |
|
51 | 51 | expiration_time=None, |
|
52 | 52 | should_cache_fn=None, |
|
53 | 53 | to_str=compat.string_type, |
|
54 | 54 | function_key_generator=None, |
|
55 | 55 | condition=True): |
|
56 | 56 | """ |
|
57 | 57 | Custom conditional decorator, that will not touch any dogpile internals if |
|
58 | 58 | condition isn't meet. This works a bit different than should_cache_fn |
|
59 | 59 | And it's faster in cases we don't ever want to compute cached values |
|
60 | 60 | """ |
|
61 | 61 | expiration_time_is_callable = compat.callable(expiration_time) |
|
62 | 62 | |
|
63 | 63 | if function_key_generator is None: |
|
64 | 64 | function_key_generator = self.function_key_generator |
|
65 | 65 | |
|
66 | 66 | # workaround for py2 and cython problems, this block should be removed |
|
67 | 67 | # once we've migrated to py3 |
|
68 | 68 | if 'cython' == 'cython': |
|
69 | 69 | def decorator(fn): |
|
70 | 70 | if to_str is compat.string_type: |
|
71 | 71 | # backwards compatible |
|
72 | 72 | key_generator = function_key_generator(namespace, fn) |
|
73 | 73 | else: |
|
74 | 74 | key_generator = function_key_generator(namespace, fn, to_str=to_str) |
|
75 | 75 | |
|
76 | 76 | @functools.wraps(fn) |
|
77 | 77 | def decorate(*arg, **kw): |
|
78 | 78 | key = key_generator(*arg, **kw) |
|
79 | 79 | |
|
80 | 80 | @functools.wraps(fn) |
|
81 | 81 | def creator(): |
|
82 | 82 | return fn(*arg, **kw) |
|
83 | 83 | |
|
84 | 84 | if not condition: |
|
85 | 85 | return creator() |
|
86 | 86 | |
|
87 | 87 | timeout = expiration_time() if expiration_time_is_callable \ |
|
88 | 88 | else expiration_time |
|
89 | 89 | |
|
90 | 90 | return self.get_or_create(key, creator, timeout, should_cache_fn) |
|
91 | 91 | |
|
92 | 92 | def invalidate(*arg, **kw): |
|
93 | 93 | key = key_generator(*arg, **kw) |
|
94 | 94 | self.delete(key) |
|
95 | 95 | |
|
96 | 96 | def set_(value, *arg, **kw): |
|
97 | 97 | key = key_generator(*arg, **kw) |
|
98 | 98 | self.set(key, value) |
|
99 | 99 | |
|
100 | 100 | def get(*arg, **kw): |
|
101 | 101 | key = key_generator(*arg, **kw) |
|
102 | 102 | return self.get(key) |
|
103 | 103 | |
|
104 | 104 | def refresh(*arg, **kw): |
|
105 | 105 | key = key_generator(*arg, **kw) |
|
106 | 106 | value = fn(*arg, **kw) |
|
107 | 107 | self.set(key, value) |
|
108 | 108 | return value |
|
109 | 109 | |
|
110 | 110 | decorate.set = set_ |
|
111 | 111 | decorate.invalidate = invalidate |
|
112 | 112 | decorate.refresh = refresh |
|
113 | 113 | decorate.get = get |
|
114 | 114 | decorate.original = fn |
|
115 | 115 | decorate.key_generator = key_generator |
|
116 | 116 | decorate.__wrapped__ = fn |
|
117 | 117 | |
|
118 | 118 | return decorate |
|
119 | 119 | return decorator |
|
120 | 120 | |
|
121 | 121 | def get_or_create_for_user_func(key_generator, user_func, *arg, **kw): |
|
122 | 122 | |
|
123 | 123 | if not condition: |
|
124 | 124 | log.debug('Calling un-cached func:%s', user_func.func_name) |
|
125 | return user_func(*arg, **kw) | |
|
125 | start = time.time() | |
|
126 | result = user_func(*arg, **kw) | |
|
127 | total = time.time() - start | |
|
128 | log.debug('un-cached func:%s took %.4fs', user_func.func_name, total) | |
|
129 | return result | |
|
126 | 130 | |
|
127 | 131 | key = key_generator(*arg, **kw) |
|
128 | 132 | |
|
129 | 133 | timeout = expiration_time() if expiration_time_is_callable \ |
|
130 | 134 | else expiration_time |
|
131 | 135 | |
|
132 | 136 | log.debug('Calling cached fn:%s', user_func.func_name) |
|
133 | 137 | return self.get_or_create(key, user_func, timeout, should_cache_fn, (arg, kw)) |
|
134 | 138 | |
|
135 | 139 | def cache_decorator(user_func): |
|
136 | 140 | if to_str is compat.string_type: |
|
137 | 141 | # backwards compatible |
|
138 | 142 | key_generator = function_key_generator(namespace, user_func) |
|
139 | 143 | else: |
|
140 | 144 | key_generator = function_key_generator(namespace, user_func, to_str=to_str) |
|
141 | 145 | |
|
142 | 146 | def refresh(*arg, **kw): |
|
143 | 147 | """ |
|
144 | 148 | Like invalidate, but regenerates the value instead |
|
145 | 149 | """ |
|
146 | 150 | key = key_generator(*arg, **kw) |
|
147 | 151 | value = user_func(*arg, **kw) |
|
148 | 152 | self.set(key, value) |
|
149 | 153 | return value |
|
150 | 154 | |
|
151 | 155 | def invalidate(*arg, **kw): |
|
152 | 156 | key = key_generator(*arg, **kw) |
|
153 | 157 | self.delete(key) |
|
154 | 158 | |
|
155 | 159 | def set_(value, *arg, **kw): |
|
156 | 160 | key = key_generator(*arg, **kw) |
|
157 | 161 | self.set(key, value) |
|
158 | 162 | |
|
159 | 163 | def get(*arg, **kw): |
|
160 | 164 | key = key_generator(*arg, **kw) |
|
161 | 165 | return self.get(key) |
|
162 | 166 | |
|
163 | 167 | user_func.set = set_ |
|
164 | 168 | user_func.invalidate = invalidate |
|
165 | 169 | user_func.get = get |
|
166 | 170 | user_func.refresh = refresh |
|
167 | 171 | user_func.key_generator = key_generator |
|
168 | 172 | user_func.original = user_func |
|
169 | 173 | |
|
170 | 174 | # Use `decorate` to preserve the signature of :param:`user_func`. |
|
171 | 175 | return decorator.decorate(user_func, functools.partial( |
|
172 | 176 | get_or_create_for_user_func, key_generator)) |
|
173 | 177 | |
|
174 | 178 | return cache_decorator |
|
175 | 179 | |
|
176 | 180 | |
|
177 | 181 | def make_region(*arg, **kw): |
|
178 | 182 | return RhodeCodeCacheRegion(*arg, **kw) |
|
179 | 183 | |
|
180 | 184 | |
|
181 | 185 | def get_default_cache_settings(settings, prefixes=None): |
|
182 | 186 | prefixes = prefixes or [] |
|
183 | 187 | cache_settings = {} |
|
184 | 188 | for key in settings.keys(): |
|
185 | 189 | for prefix in prefixes: |
|
186 | 190 | if key.startswith(prefix): |
|
187 | 191 | name = key.split(prefix)[1].strip() |
|
188 | 192 | val = settings[key] |
|
189 | 193 | if isinstance(val, compat.string_types): |
|
190 | 194 | val = val.strip() |
|
191 | 195 | cache_settings[name] = val |
|
192 | 196 | return cache_settings |
|
193 | 197 | |
|
194 | 198 | |
|
195 | 199 | def compute_key_from_params(*args): |
|
196 | 200 | """ |
|
197 | 201 | Helper to compute key from given params to be used in cache manager |
|
198 | 202 | """ |
|
199 | 203 | return sha1("_".join(map(safe_str, args))) |
|
200 | 204 | |
|
201 | 205 | |
|
202 | 206 | def backend_key_generator(backend): |
|
203 | 207 | """ |
|
204 | 208 | Special wrapper that also sends over the backend to the key generator |
|
205 | 209 | """ |
|
206 | 210 | def wrapper(namespace, fn): |
|
207 | 211 | return key_generator(backend, namespace, fn) |
|
208 | 212 | return wrapper |
|
209 | 213 | |
|
210 | 214 | |
|
211 | 215 | def key_generator(backend, namespace, fn): |
|
212 | 216 | fname = fn.__name__ |
|
213 | 217 | |
|
214 | 218 | def generate_key(*args): |
|
215 | 219 | backend_prefix = getattr(backend, 'key_prefix', None) or 'backend_prefix' |
|
216 | 220 | namespace_pref = namespace or 'default_namespace' |
|
217 | 221 | arg_key = compute_key_from_params(*args) |
|
218 | 222 | final_key = "{}:{}:{}_{}".format(backend_prefix, namespace_pref, fname, arg_key) |
|
219 | 223 | |
|
220 | 224 | return final_key |
|
221 | 225 | |
|
222 | 226 | return generate_key |
|
223 | 227 | |
|
224 | 228 | |
|
225 | 229 | def get_or_create_region(region_name, region_namespace=None): |
|
226 | 230 | from rhodecode.lib.rc_cache.backends import FileNamespaceBackend |
|
227 | 231 | region_obj = region_meta.dogpile_cache_regions.get(region_name) |
|
228 | 232 | if not region_obj: |
|
229 | 233 | raise EnvironmentError( |
|
230 | 234 | 'Region `{}` not in configured: {}.'.format( |
|
231 | 235 | region_name, region_meta.dogpile_cache_regions.keys())) |
|
232 | 236 | |
|
233 | 237 | region_uid_name = '{}:{}'.format(region_name, region_namespace) |
|
234 | 238 | if isinstance(region_obj.actual_backend, FileNamespaceBackend): |
|
235 | 239 | region_exist = region_meta.dogpile_cache_regions.get(region_namespace) |
|
236 | 240 | if region_exist: |
|
237 | 241 | log.debug('Using already configured region: %s', region_namespace) |
|
238 | 242 | return region_exist |
|
239 | 243 | cache_dir = region_meta.dogpile_config_defaults['cache_dir'] |
|
240 | 244 | expiration_time = region_obj.expiration_time |
|
241 | 245 | |
|
242 | 246 | if not os.path.isdir(cache_dir): |
|
243 | 247 | os.makedirs(cache_dir) |
|
244 | 248 | new_region = make_region( |
|
245 | 249 | name=region_uid_name, |
|
246 | 250 | function_key_generator=backend_key_generator(region_obj.actual_backend) |
|
247 | 251 | ) |
|
248 | 252 | namespace_filename = os.path.join( |
|
249 | 253 | cache_dir, "{}.cache.dbm".format(region_namespace)) |
|
250 | 254 | # special type that allows 1db per namespace |
|
251 | 255 | new_region.configure( |
|
252 | 256 | backend='dogpile.cache.rc.file_namespace', |
|
253 | 257 | expiration_time=expiration_time, |
|
254 | 258 | arguments={"filename": namespace_filename} |
|
255 | 259 | ) |
|
256 | 260 | |
|
257 | 261 | # create and save in region caches |
|
258 | 262 | log.debug('configuring new region: %s', region_uid_name) |
|
259 | 263 | region_obj = region_meta.dogpile_cache_regions[region_namespace] = new_region |
|
260 | 264 | |
|
261 | 265 | return region_obj |
|
262 | 266 | |
|
263 | 267 | |
|
264 | 268 | def clear_cache_namespace(cache_region, cache_namespace_uid, invalidate=False): |
|
265 | 269 | region = get_or_create_region(cache_region, cache_namespace_uid) |
|
266 | 270 | cache_keys = region.backend.list_keys(prefix=cache_namespace_uid) |
|
267 | 271 | num_delete_keys = len(cache_keys) |
|
268 | 272 | if invalidate: |
|
269 | 273 | region.invalidate(hard=False) |
|
270 | 274 | else: |
|
271 | 275 | if num_delete_keys: |
|
272 | 276 | region.delete_multi(cache_keys) |
|
273 | 277 | return num_delete_keys |
|
274 | 278 | |
|
275 | 279 | |
|
276 | 280 | class ActiveRegionCache(object): |
|
277 | 281 | def __init__(self, context, cache_data): |
|
278 | 282 | self.context = context |
|
279 | 283 | self.cache_data = cache_data |
|
280 | 284 | |
|
281 | 285 | def should_invalidate(self): |
|
282 | 286 | return False |
|
283 | 287 | |
|
284 | 288 | |
|
285 | 289 | class FreshRegionCache(object): |
|
286 | 290 | def __init__(self, context, cache_data): |
|
287 | 291 | self.context = context |
|
288 | 292 | self.cache_data = cache_data |
|
289 | 293 | |
|
290 | 294 | def should_invalidate(self): |
|
291 | 295 | return True |
|
292 | 296 | |
|
293 | 297 | |
|
294 | 298 | class InvalidationContext(object): |
|
295 | 299 | """ |
|
296 | 300 | usage:: |
|
297 | 301 | |
|
298 | 302 | from rhodecode.lib import rc_cache |
|
299 | 303 | |
|
300 | 304 | cache_namespace_uid = CacheKey.SOME_NAMESPACE.format(1) |
|
301 | 305 | region = rc_cache.get_or_create_region('cache_perms', cache_namespace_uid) |
|
302 | 306 | |
|
303 | 307 | @region.conditional_cache_on_arguments(namespace=cache_namespace_uid, condition=True) |
|
304 | 308 | def heavy_compute(cache_name, param1, param2): |
|
305 | 309 | print('COMPUTE {}, {}, {}'.format(cache_name, param1, param2)) |
|
306 | 310 | |
|
307 | 311 | # invalidation namespace is shared namespace key for all process caches |
|
308 | 312 | # we use it to send a global signal |
|
309 | 313 | invalidation_namespace = 'repo_cache:1' |
|
310 | 314 | |
|
311 | 315 | inv_context_manager = rc_cache.InvalidationContext( |
|
312 | 316 | uid=cache_namespace_uid, invalidation_namespace=invalidation_namespace) |
|
313 | 317 | with inv_context_manager as invalidation_context: |
|
314 | 318 | args = ('one', 'two') |
|
315 | 319 | # re-compute and store cache if we get invalidate signal |
|
316 | 320 | if invalidation_context.should_invalidate(): |
|
317 | 321 | result = heavy_compute.refresh(*args) |
|
318 | 322 | else: |
|
319 | 323 | result = heavy_compute(*args) |
|
320 | 324 | |
|
321 | 325 | compute_time = inv_context_manager.compute_time |
|
322 | 326 | log.debug('result computed in %.4fs', compute_time) |
|
323 | 327 | |
|
324 | 328 | # To send global invalidation signal, simply run |
|
325 | 329 | CacheKey.set_invalidate(invalidation_namespace) |
|
326 | 330 | |
|
327 | 331 | """ |
|
328 | 332 | |
|
329 | 333 | def __repr__(self): |
|
330 | 334 | return '<InvalidationContext:{}[{}]>'.format( |
|
331 | 335 | safe_str(self.cache_key), safe_str(self.uid)) |
|
332 | 336 | |
|
333 | 337 | def __init__(self, uid, invalidation_namespace='', |
|
334 | 338 | raise_exception=False, thread_scoped=None): |
|
335 | 339 | self.uid = uid |
|
336 | 340 | self.invalidation_namespace = invalidation_namespace |
|
337 | 341 | self.raise_exception = raise_exception |
|
338 | 342 | self.proc_id = safe_unicode(rhodecode.CONFIG.get('instance_id') or 'DEFAULT') |
|
339 | 343 | self.thread_id = 'global' |
|
340 | 344 | |
|
341 | 345 | if thread_scoped is None: |
|
342 | 346 | # if we set "default" we can override this via .ini settings |
|
343 | 347 | thread_scoped = str2bool(rhodecode.CONFIG.get('cache_thread_scoped')) |
|
344 | 348 | |
|
345 | 349 | # Append the thread id to the cache key if this invalidation context |
|
346 | 350 | # should be scoped to the current thread. |
|
347 | 351 | if thread_scoped is True: |
|
348 | 352 | self.thread_id = threading.current_thread().ident |
|
349 | 353 | |
|
350 | 354 | self.cache_key = compute_key_from_params(uid) |
|
351 | 355 | self.cache_key = 'proc:{}|thread:{}|params:{}'.format( |
|
352 | 356 | self.proc_id, self.thread_id, self.cache_key) |
|
353 | 357 | self.compute_time = 0 |
|
354 | 358 | |
|
355 | 359 | def get_or_create_cache_obj(self, cache_type, invalidation_namespace=''): |
|
356 | 360 | invalidation_namespace = invalidation_namespace or self.invalidation_namespace |
|
357 | 361 | # fetch all cache keys for this namespace and convert them to a map to find if we |
|
358 | 362 | # have specific cache_key object registered. We do this because we want to have |
|
359 | 363 | # all consistent cache_state_uid for newly registered objects |
|
360 | 364 | cache_obj_map = CacheKey.get_namespace_map(invalidation_namespace) |
|
361 | 365 | cache_obj = cache_obj_map.get(self.cache_key) |
|
362 | 366 | log.debug('Fetched cache obj %s using %s cache key.', cache_obj, self.cache_key) |
|
363 | 367 | if not cache_obj: |
|
364 | 368 | new_cache_args = invalidation_namespace |
|
365 | 369 | first_cache_obj = next(cache_obj_map.itervalues()) if cache_obj_map else None |
|
366 | 370 | cache_state_uid = None |
|
367 | 371 | if first_cache_obj: |
|
368 | 372 | cache_state_uid = first_cache_obj.cache_state_uid |
|
369 | 373 | cache_obj = CacheKey(self.cache_key, cache_args=new_cache_args, |
|
370 | 374 | cache_state_uid=cache_state_uid) |
|
371 | 375 | cache_key_meta.cache_keys_by_pid.append(self.cache_key) |
|
372 | 376 | |
|
373 | 377 | return cache_obj |
|
374 | 378 | |
|
375 | 379 | def __enter__(self): |
|
376 | 380 | """ |
|
377 | 381 | Test if current object is valid, and return CacheRegion function |
|
378 | 382 | that does invalidation and calculation |
|
379 | 383 | """ |
|
380 | 384 | log.debug('Entering cache invalidation check context: %s', self.invalidation_namespace) |
|
381 | 385 | # register or get a new key based on uid |
|
382 | 386 | self.cache_obj = self.get_or_create_cache_obj(cache_type=self.uid) |
|
383 | 387 | cache_data = self.cache_obj.get_dict() |
|
384 | 388 | self._start_time = time.time() |
|
385 | 389 | if self.cache_obj.cache_active: |
|
386 | 390 | # means our cache obj is existing and marked as it's |
|
387 | 391 | # cache is not outdated, we return ActiveRegionCache |
|
388 | 392 | self.skip_cache_active_change = True |
|
389 | 393 | |
|
390 | 394 | return ActiveRegionCache(context=self, cache_data=cache_data) |
|
391 | 395 | |
|
392 | 396 | # the key is either not existing or set to False, we return |
|
393 | 397 | # the real invalidator which re-computes value. We additionally set |
|
394 | 398 | # the flag to actually update the Database objects |
|
395 | 399 | self.skip_cache_active_change = False |
|
396 | 400 | return FreshRegionCache(context=self, cache_data=cache_data) |
|
397 | 401 | |
|
398 | 402 | def __exit__(self, exc_type, exc_val, exc_tb): |
|
399 | 403 | # save compute time |
|
400 | 404 | self.compute_time = time.time() - self._start_time |
|
401 | 405 | |
|
402 | 406 | if self.skip_cache_active_change: |
|
403 | 407 | return |
|
404 | 408 | |
|
405 | 409 | try: |
|
406 | 410 | self.cache_obj.cache_active = True |
|
407 | 411 | Session().add(self.cache_obj) |
|
408 | 412 | Session().commit() |
|
409 | 413 | except IntegrityError: |
|
410 | 414 | # if we catch integrity error, it means we inserted this object |
|
411 | 415 | # assumption is that's really an edge race-condition case and |
|
412 | 416 | # it's safe is to skip it |
|
413 | 417 | Session().rollback() |
|
414 | 418 | except Exception: |
|
415 | 419 | log.exception('Failed to commit on cache key update') |
|
416 | 420 | Session().rollback() |
|
417 | 421 | if self.raise_exception: |
|
418 | 422 | raise |
@@ -1,797 +1,853 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2017-2020 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | |
|
22 | 22 | import os |
|
23 | 23 | import sys |
|
24 | 24 | import time |
|
25 | 25 | import platform |
|
26 | 26 | import collections |
|
27 | from functools import wraps | |
|
28 | ||
|
27 | 29 | import pkg_resources |
|
28 | 30 | import logging |
|
29 | 31 | import resource |
|
30 | 32 | |
|
31 | 33 | from pyramid.compat import configparser |
|
32 | 34 | |
|
33 | 35 | log = logging.getLogger(__name__) |
|
34 | 36 | |
|
35 | 37 | |
|
36 | 38 | psutil = None |
|
37 | 39 | |
|
38 | 40 | try: |
|
39 | 41 | # cygwin cannot have yet psutil support. |
|
40 | 42 | import psutil as psutil |
|
41 | 43 | except ImportError: |
|
42 | 44 | pass |
|
43 | 45 | |
|
44 | 46 | |
|
45 | 47 | _NA = 'NOT AVAILABLE' |
|
46 | 48 | |
|
47 | 49 | STATE_OK = 'ok' |
|
48 | 50 | STATE_ERR = 'error' |
|
49 | 51 | STATE_WARN = 'warning' |
|
50 | 52 | |
|
51 | 53 | STATE_OK_DEFAULT = {'message': '', 'type': STATE_OK} |
|
52 | 54 | |
|
53 | 55 | |
|
56 | registered_helpers = {} | |
|
57 | ||
|
58 | ||
|
59 | def register_sysinfo(func): | |
|
60 | """ | |
|
61 | @register_helper | |
|
62 | def db_check(): | |
|
63 | pass | |
|
64 | ||
|
65 | db_check == registered_helpers['db_check'] | |
|
66 | """ | |
|
67 | global registered_helpers | |
|
68 | registered_helpers[func.__name__] = func | |
|
69 | ||
|
70 | @wraps(func) | |
|
71 | def _wrapper(*args, **kwargs): | |
|
72 | return func(*args, **kwargs) | |
|
73 | return _wrapper | |
|
74 | ||
|
75 | ||
|
54 | 76 | # HELPERS |
|
55 | 77 | def percentage(part, whole): |
|
56 | 78 | whole = float(whole) |
|
57 | 79 | if whole > 0: |
|
58 | 80 | return round(100 * float(part) / whole, 1) |
|
59 | 81 | return 0.0 |
|
60 | 82 | |
|
61 | 83 | |
|
62 | 84 | def get_storage_size(storage_path): |
|
63 | 85 | sizes = [] |
|
64 | 86 | for file_ in os.listdir(storage_path): |
|
65 | 87 | storage_file = os.path.join(storage_path, file_) |
|
66 | 88 | if os.path.isfile(storage_file): |
|
67 | 89 | try: |
|
68 | 90 | sizes.append(os.path.getsize(storage_file)) |
|
69 | 91 | except OSError: |
|
70 | 92 | log.exception('Failed to get size of storage file %s', storage_file) |
|
71 | 93 | pass |
|
72 | 94 | |
|
73 | 95 | return sum(sizes) |
|
74 | 96 | |
|
75 | 97 | |
|
76 | 98 | def get_resource(resource_type): |
|
77 | 99 | try: |
|
78 | 100 | return resource.getrlimit(resource_type) |
|
79 | 101 | except Exception: |
|
80 | 102 | return 'NOT_SUPPORTED' |
|
81 | 103 | |
|
82 | 104 | |
|
83 | 105 | def get_cert_path(ini_path): |
|
84 | 106 | default = '/etc/ssl/certs/ca-certificates.crt' |
|
85 | 107 | control_ca_bundle = os.path.join( |
|
86 | 108 | os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(ini_path)))), |
|
87 | 109 | '.rccontrol-profile/etc/ca-bundle.crt') |
|
88 | 110 | if os.path.isfile(control_ca_bundle): |
|
89 | 111 | default = control_ca_bundle |
|
90 | 112 | |
|
91 | 113 | return default |
|
92 | 114 | |
|
93 | 115 | |
|
94 | 116 | class SysInfoRes(object): |
|
95 | 117 | def __init__(self, value, state=None, human_value=None): |
|
96 | 118 | self.value = value |
|
97 | 119 | self.state = state or STATE_OK_DEFAULT |
|
98 | 120 | self.human_value = human_value or value |
|
99 | 121 | |
|
100 | 122 | def __json__(self): |
|
101 | 123 | return { |
|
102 | 124 | 'value': self.value, |
|
103 | 125 | 'state': self.state, |
|
104 | 126 | 'human_value': self.human_value, |
|
105 | 127 | } |
|
106 | 128 | |
|
107 | 129 | def get_value(self): |
|
108 | 130 | return self.__json__() |
|
109 | 131 | |
|
110 | 132 | def __str__(self): |
|
111 | 133 | return '<SysInfoRes({})>'.format(self.__json__()) |
|
112 | 134 | |
|
113 | 135 | |
|
114 | 136 | class SysInfo(object): |
|
115 | 137 | |
|
116 | 138 | def __init__(self, func_name, **kwargs): |
|
117 | 139 | self.func_name = func_name |
|
118 | 140 | self.value = _NA |
|
119 | 141 | self.state = None |
|
120 | 142 | self.kwargs = kwargs or {} |
|
121 | 143 | |
|
122 | 144 | def __call__(self): |
|
123 | 145 | computed = self.compute(**self.kwargs) |
|
124 | 146 | if not isinstance(computed, SysInfoRes): |
|
125 | 147 | raise ValueError( |
|
126 | 148 | 'computed value for {} is not instance of ' |
|
127 | 149 | '{}, got {} instead'.format( |
|
128 | 150 | self.func_name, SysInfoRes, type(computed))) |
|
129 | 151 | return computed.__json__() |
|
130 | 152 | |
|
131 | 153 | def __str__(self): |
|
132 | 154 | return '<SysInfo({})>'.format(self.func_name) |
|
133 | 155 | |
|
134 | 156 | def compute(self, **kwargs): |
|
135 | 157 | return self.func_name(**kwargs) |
|
136 | 158 | |
|
137 | 159 | |
|
138 | 160 | # SysInfo functions |
|
161 | @register_sysinfo | |
|
139 | 162 | def python_info(): |
|
140 | 163 | value = dict(version=' '.join(platform._sys_version()), |
|
141 | 164 | executable=sys.executable) |
|
142 | 165 | return SysInfoRes(value=value) |
|
143 | 166 | |
|
144 | 167 | |
|
168 | @register_sysinfo | |
|
145 | 169 | def py_modules(): |
|
146 | 170 | mods = dict([(p.project_name, {'version': p.version, 'location': p.location}) |
|
147 | 171 | for p in pkg_resources.working_set]) |
|
148 | 172 | |
|
149 | 173 | value = sorted(mods.items(), key=lambda k: k[0].lower()) |
|
150 | 174 | return SysInfoRes(value=value) |
|
151 | 175 | |
|
152 | 176 | |
|
177 | @register_sysinfo | |
|
153 | 178 | def platform_type(): |
|
154 | 179 | from rhodecode.lib.utils import safe_unicode, generate_platform_uuid |
|
155 | 180 | |
|
156 | 181 | value = dict( |
|
157 | 182 | name=safe_unicode(platform.platform()), |
|
158 | 183 | uuid=generate_platform_uuid() |
|
159 | 184 | ) |
|
160 | 185 | return SysInfoRes(value=value) |
|
161 | 186 | |
|
162 | 187 | |
|
188 | @register_sysinfo | |
|
163 | 189 | def locale_info(): |
|
164 | 190 | import locale |
|
165 | 191 | |
|
166 | 192 | value = dict( |
|
167 | 193 | locale_default=locale.getdefaultlocale(), |
|
168 | 194 | locale_lc_all=locale.getlocale(locale.LC_ALL), |
|
169 | 195 | lang_env=os.environ.get('LANG'), |
|
170 | 196 | lc_all_env=os.environ.get('LC_ALL'), |
|
171 | 197 | local_archive_env=os.environ.get('LOCALE_ARCHIVE'), |
|
172 | 198 | ) |
|
173 | 199 | human_value = 'LANG: {}, locale LC_ALL: {}, Default locales: {}'.format( |
|
174 | 200 | value['lang_env'], value['locale_lc_all'], value['locale_default']) |
|
175 | 201 | return SysInfoRes(value=value, human_value=human_value) |
|
176 | 202 | |
|
177 | 203 | |
|
204 | @register_sysinfo | |
|
178 | 205 | def ulimit_info(): |
|
179 | 206 | data = collections.OrderedDict([ |
|
180 | 207 | ('cpu time (seconds)', get_resource(resource.RLIMIT_CPU)), |
|
181 | 208 | ('file size', get_resource(resource.RLIMIT_FSIZE)), |
|
182 | 209 | ('stack size', get_resource(resource.RLIMIT_STACK)), |
|
183 | 210 | ('core file size', get_resource(resource.RLIMIT_CORE)), |
|
184 | 211 | ('address space size', get_resource(resource.RLIMIT_AS)), |
|
185 | 212 | ('locked in mem size', get_resource(resource.RLIMIT_MEMLOCK)), |
|
186 | 213 | ('heap size', get_resource(resource.RLIMIT_DATA)), |
|
187 | 214 | ('rss size', get_resource(resource.RLIMIT_RSS)), |
|
188 | 215 | ('number of processes', get_resource(resource.RLIMIT_NPROC)), |
|
189 | 216 | ('open files', get_resource(resource.RLIMIT_NOFILE)), |
|
190 | 217 | ]) |
|
191 | 218 | |
|
192 | 219 | text = ', '.join('{}:{}'.format(k, v) for k, v in data.items()) |
|
193 | 220 | |
|
194 | 221 | value = { |
|
195 | 222 | 'limits': data, |
|
196 | 223 | 'text': text, |
|
197 | 224 | } |
|
198 | 225 | return SysInfoRes(value=value) |
|
199 | 226 | |
|
200 | 227 | |
|
228 | @register_sysinfo | |
|
201 | 229 | def uptime(): |
|
202 | 230 | from rhodecode.lib.helpers import age, time_to_datetime |
|
203 | 231 | from rhodecode.translation import TranslationString |
|
204 | 232 | |
|
205 | 233 | value = dict(boot_time=0, uptime=0, text='') |
|
206 | 234 | state = STATE_OK_DEFAULT |
|
207 | 235 | if not psutil: |
|
208 | 236 | return SysInfoRes(value=value, state=state) |
|
209 | 237 | |
|
210 | 238 | boot_time = psutil.boot_time() |
|
211 | 239 | value['boot_time'] = boot_time |
|
212 | 240 | value['uptime'] = time.time() - boot_time |
|
213 | 241 | |
|
214 | 242 | date_or_age = age(time_to_datetime(boot_time)) |
|
215 | 243 | if isinstance(date_or_age, TranslationString): |
|
216 | 244 | date_or_age = date_or_age.interpolate() |
|
217 | 245 | |
|
218 | 246 | human_value = value.copy() |
|
219 | 247 | human_value['boot_time'] = time_to_datetime(boot_time) |
|
220 | 248 | human_value['uptime'] = age(time_to_datetime(boot_time), show_suffix=False) |
|
221 | 249 | |
|
222 | 250 | human_value['text'] = u'Server started {}'.format(date_or_age) |
|
223 | 251 | return SysInfoRes(value=value, human_value=human_value) |
|
224 | 252 | |
|
225 | 253 | |
|
254 | @register_sysinfo | |
|
226 | 255 | def memory(): |
|
227 | 256 | from rhodecode.lib.helpers import format_byte_size_binary |
|
228 | 257 | value = dict(available=0, used=0, used_real=0, cached=0, percent=0, |
|
229 | 258 | percent_used=0, free=0, inactive=0, active=0, shared=0, |
|
230 | 259 | total=0, buffers=0, text='') |
|
231 | 260 | |
|
232 | 261 | state = STATE_OK_DEFAULT |
|
233 | 262 | if not psutil: |
|
234 | 263 | return SysInfoRes(value=value, state=state) |
|
235 | 264 | |
|
236 | 265 | value.update(dict(psutil.virtual_memory()._asdict())) |
|
237 | 266 | value['used_real'] = value['total'] - value['available'] |
|
238 | 267 | value['percent_used'] = psutil._common.usage_percent( |
|
239 | 268 | value['used_real'], value['total'], 1) |
|
240 | 269 | |
|
241 | 270 | human_value = value.copy() |
|
242 | 271 | human_value['text'] = '%s/%s, %s%% used' % ( |
|
243 | 272 | format_byte_size_binary(value['used_real']), |
|
244 | 273 | format_byte_size_binary(value['total']), |
|
245 | 274 | value['percent_used'],) |
|
246 | 275 | |
|
247 | 276 | keys = value.keys()[::] |
|
248 | 277 | keys.pop(keys.index('percent')) |
|
249 | 278 | keys.pop(keys.index('percent_used')) |
|
250 | 279 | keys.pop(keys.index('text')) |
|
251 | 280 | for k in keys: |
|
252 | 281 | human_value[k] = format_byte_size_binary(value[k]) |
|
253 | 282 | |
|
254 | 283 | if state['type'] == STATE_OK and value['percent_used'] > 90: |
|
255 | 284 | msg = 'Critical: your available RAM memory is very low.' |
|
256 | 285 | state = {'message': msg, 'type': STATE_ERR} |
|
257 | 286 | |
|
258 | 287 | elif state['type'] == STATE_OK and value['percent_used'] > 70: |
|
259 | 288 | msg = 'Warning: your available RAM memory is running low.' |
|
260 | 289 | state = {'message': msg, 'type': STATE_WARN} |
|
261 | 290 | |
|
262 | 291 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
263 | 292 | |
|
264 | 293 | |
|
294 | @register_sysinfo | |
|
265 | 295 | def machine_load(): |
|
266 | 296 | value = {'1_min': _NA, '5_min': _NA, '15_min': _NA, 'text': ''} |
|
267 | 297 | state = STATE_OK_DEFAULT |
|
268 | 298 | if not psutil: |
|
269 | 299 | return SysInfoRes(value=value, state=state) |
|
270 | 300 | |
|
271 | 301 | # load averages |
|
272 | 302 | if hasattr(psutil.os, 'getloadavg'): |
|
273 | 303 | value.update(dict( |
|
274 | 304 | zip(['1_min', '5_min', '15_min'], psutil.os.getloadavg()))) |
|
275 | 305 | |
|
276 | 306 | human_value = value.copy() |
|
277 | 307 | human_value['text'] = '1min: {}, 5min: {}, 15min: {}'.format( |
|
278 | 308 | value['1_min'], value['5_min'], value['15_min']) |
|
279 | 309 | |
|
280 | 310 | if state['type'] == STATE_OK and value['15_min'] > 5: |
|
281 | 311 | msg = 'Warning: your machine load is very high.' |
|
282 | 312 | state = {'message': msg, 'type': STATE_WARN} |
|
283 | 313 | |
|
284 | 314 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
285 | 315 | |
|
286 | 316 | |
|
317 | @register_sysinfo | |
|
287 | 318 | def cpu(): |
|
288 | 319 | value = {'cpu': 0, 'cpu_count': 0, 'cpu_usage': []} |
|
289 | 320 | state = STATE_OK_DEFAULT |
|
290 | 321 | |
|
291 | 322 | if not psutil: |
|
292 | 323 | return SysInfoRes(value=value, state=state) |
|
293 | 324 | |
|
294 | 325 | value['cpu'] = psutil.cpu_percent(0.5) |
|
295 | 326 | value['cpu_usage'] = psutil.cpu_percent(0.5, percpu=True) |
|
296 | 327 | value['cpu_count'] = psutil.cpu_count() |
|
297 | 328 | |
|
298 | 329 | human_value = value.copy() |
|
299 | 330 | human_value['text'] = '{} cores at {} %'.format( |
|
300 | 331 | value['cpu_count'], value['cpu']) |
|
301 | 332 | |
|
302 | 333 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
303 | 334 | |
|
304 | 335 | |
|
336 | @register_sysinfo | |
|
305 | 337 | def storage(): |
|
306 | 338 | from rhodecode.lib.helpers import format_byte_size_binary |
|
307 | 339 | from rhodecode.model.settings import VcsSettingsModel |
|
308 | 340 | path = VcsSettingsModel().get_repos_location() |
|
309 | 341 | |
|
310 | 342 | value = dict(percent=0, used=0, total=0, path=path, text='') |
|
311 | 343 | state = STATE_OK_DEFAULT |
|
312 | 344 | if not psutil: |
|
313 | 345 | return SysInfoRes(value=value, state=state) |
|
314 | 346 | |
|
315 | 347 | try: |
|
316 | 348 | value.update(dict(psutil.disk_usage(path)._asdict())) |
|
317 | 349 | except Exception as e: |
|
318 | 350 | log.exception('Failed to fetch disk info') |
|
319 | 351 | state = {'message': str(e), 'type': STATE_ERR} |
|
320 | 352 | |
|
321 | 353 | human_value = value.copy() |
|
322 | 354 | human_value['used'] = format_byte_size_binary(value['used']) |
|
323 | 355 | human_value['total'] = format_byte_size_binary(value['total']) |
|
324 | 356 | human_value['text'] = "{}/{}, {}% used".format( |
|
325 | 357 | format_byte_size_binary(value['used']), |
|
326 | 358 | format_byte_size_binary(value['total']), |
|
327 | 359 | value['percent']) |
|
328 | 360 | |
|
329 | 361 | if state['type'] == STATE_OK and value['percent'] > 90: |
|
330 | 362 | msg = 'Critical: your disk space is very low.' |
|
331 | 363 | state = {'message': msg, 'type': STATE_ERR} |
|
332 | 364 | |
|
333 | 365 | elif state['type'] == STATE_OK and value['percent'] > 70: |
|
334 | 366 | msg = 'Warning: your disk space is running low.' |
|
335 | 367 | state = {'message': msg, 'type': STATE_WARN} |
|
336 | 368 | |
|
337 | 369 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
338 | 370 | |
|
339 | 371 | |
|
372 | @register_sysinfo | |
|
340 | 373 | def storage_inodes(): |
|
341 | 374 | from rhodecode.model.settings import VcsSettingsModel |
|
342 | 375 | path = VcsSettingsModel().get_repos_location() |
|
343 | 376 | |
|
344 | 377 | value = dict(percent=0, free=0, used=0, total=0, path=path, text='') |
|
345 | 378 | state = STATE_OK_DEFAULT |
|
346 | 379 | if not psutil: |
|
347 | 380 | return SysInfoRes(value=value, state=state) |
|
348 | 381 | |
|
349 | 382 | try: |
|
350 | 383 | i_stat = os.statvfs(path) |
|
351 | 384 | value['free'] = i_stat.f_ffree |
|
352 | 385 | value['used'] = i_stat.f_files-i_stat.f_favail |
|
353 | 386 | value['total'] = i_stat.f_files |
|
354 | 387 | value['percent'] = percentage(value['used'], value['total']) |
|
355 | 388 | except Exception as e: |
|
356 | 389 | log.exception('Failed to fetch disk inodes info') |
|
357 | 390 | state = {'message': str(e), 'type': STATE_ERR} |
|
358 | 391 | |
|
359 | 392 | human_value = value.copy() |
|
360 | 393 | human_value['text'] = "{}/{}, {}% used".format( |
|
361 | 394 | value['used'], value['total'], value['percent']) |
|
362 | 395 | |
|
363 | 396 | if state['type'] == STATE_OK and value['percent'] > 90: |
|
364 | 397 | msg = 'Critical: your disk free inodes are very low.' |
|
365 | 398 | state = {'message': msg, 'type': STATE_ERR} |
|
366 | 399 | |
|
367 | 400 | elif state['type'] == STATE_OK and value['percent'] > 70: |
|
368 | 401 | msg = 'Warning: your disk free inodes are running low.' |
|
369 | 402 | state = {'message': msg, 'type': STATE_WARN} |
|
370 | 403 | |
|
371 | 404 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
372 | 405 | |
|
373 | 406 | |
|
407 | @register_sysinfo | |
|
374 | 408 | def storage_archives(): |
|
375 | 409 | import rhodecode |
|
376 | 410 | from rhodecode.lib.utils import safe_str |
|
377 | 411 | from rhodecode.lib.helpers import format_byte_size_binary |
|
378 | 412 | |
|
379 | 413 | msg = 'Enable this by setting ' \ |
|
380 | 414 | 'archive_cache_dir=/path/to/cache option in the .ini file' |
|
381 | 415 | path = safe_str(rhodecode.CONFIG.get('archive_cache_dir', msg)) |
|
382 | 416 | |
|
383 | 417 | value = dict(percent=0, used=0, total=0, items=0, path=path, text='') |
|
384 | 418 | state = STATE_OK_DEFAULT |
|
385 | 419 | try: |
|
386 | 420 | items_count = 0 |
|
387 | 421 | used = 0 |
|
388 | 422 | for root, dirs, files in os.walk(path): |
|
389 | 423 | if root == path: |
|
390 | 424 | items_count = len(files) |
|
391 | 425 | |
|
392 | 426 | for f in files: |
|
393 | 427 | try: |
|
394 | 428 | used += os.path.getsize(os.path.join(root, f)) |
|
395 | 429 | except OSError: |
|
396 | 430 | pass |
|
397 | 431 | value.update({ |
|
398 | 432 | 'percent': 100, |
|
399 | 433 | 'used': used, |
|
400 | 434 | 'total': used, |
|
401 | 435 | 'items': items_count |
|
402 | 436 | }) |
|
403 | 437 | |
|
404 | 438 | except Exception as e: |
|
405 | 439 | log.exception('failed to fetch archive cache storage') |
|
406 | 440 | state = {'message': str(e), 'type': STATE_ERR} |
|
407 | 441 | |
|
408 | 442 | human_value = value.copy() |
|
409 | 443 | human_value['used'] = format_byte_size_binary(value['used']) |
|
410 | 444 | human_value['total'] = format_byte_size_binary(value['total']) |
|
411 | 445 | human_value['text'] = "{} ({} items)".format( |
|
412 | 446 | human_value['used'], value['items']) |
|
413 | 447 | |
|
414 | 448 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
415 | 449 | |
|
416 | 450 | |
|
451 | @register_sysinfo | |
|
417 | 452 | def storage_gist(): |
|
418 | 453 | from rhodecode.model.gist import GIST_STORE_LOC |
|
419 | 454 | from rhodecode.model.settings import VcsSettingsModel |
|
420 | 455 | from rhodecode.lib.utils import safe_str |
|
421 | 456 | from rhodecode.lib.helpers import format_byte_size_binary |
|
422 | 457 | path = safe_str(os.path.join( |
|
423 | 458 | VcsSettingsModel().get_repos_location(), GIST_STORE_LOC)) |
|
424 | 459 | |
|
425 | 460 | # gist storage |
|
426 | 461 | value = dict(percent=0, used=0, total=0, items=0, path=path, text='') |
|
427 | 462 | state = STATE_OK_DEFAULT |
|
428 | 463 | |
|
429 | 464 | try: |
|
430 | 465 | items_count = 0 |
|
431 | 466 | used = 0 |
|
432 | 467 | for root, dirs, files in os.walk(path): |
|
433 | 468 | if root == path: |
|
434 | 469 | items_count = len(dirs) |
|
435 | 470 | |
|
436 | 471 | for f in files: |
|
437 | 472 | try: |
|
438 | 473 | used += os.path.getsize(os.path.join(root, f)) |
|
439 | 474 | except OSError: |
|
440 | 475 | pass |
|
441 | 476 | value.update({ |
|
442 | 477 | 'percent': 100, |
|
443 | 478 | 'used': used, |
|
444 | 479 | 'total': used, |
|
445 | 480 | 'items': items_count |
|
446 | 481 | }) |
|
447 | 482 | except Exception as e: |
|
448 | 483 | log.exception('failed to fetch gist storage items') |
|
449 | 484 | state = {'message': str(e), 'type': STATE_ERR} |
|
450 | 485 | |
|
451 | 486 | human_value = value.copy() |
|
452 | 487 | human_value['used'] = format_byte_size_binary(value['used']) |
|
453 | 488 | human_value['total'] = format_byte_size_binary(value['total']) |
|
454 | 489 | human_value['text'] = "{} ({} items)".format( |
|
455 | 490 | human_value['used'], value['items']) |
|
456 | 491 | |
|
457 | 492 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
458 | 493 | |
|
459 | 494 | |
|
495 | @register_sysinfo | |
|
460 | 496 | def storage_temp(): |
|
461 | 497 | import tempfile |
|
462 | 498 | from rhodecode.lib.helpers import format_byte_size_binary |
|
463 | 499 | |
|
464 | 500 | path = tempfile.gettempdir() |
|
465 | 501 | value = dict(percent=0, used=0, total=0, items=0, path=path, text='') |
|
466 | 502 | state = STATE_OK_DEFAULT |
|
467 | 503 | |
|
468 | 504 | if not psutil: |
|
469 | 505 | return SysInfoRes(value=value, state=state) |
|
470 | 506 | |
|
471 | 507 | try: |
|
472 | 508 | value.update(dict(psutil.disk_usage(path)._asdict())) |
|
473 | 509 | except Exception as e: |
|
474 | 510 | log.exception('Failed to fetch temp dir info') |
|
475 | 511 | state = {'message': str(e), 'type': STATE_ERR} |
|
476 | 512 | |
|
477 | 513 | human_value = value.copy() |
|
478 | 514 | human_value['used'] = format_byte_size_binary(value['used']) |
|
479 | 515 | human_value['total'] = format_byte_size_binary(value['total']) |
|
480 | 516 | human_value['text'] = "{}/{}, {}% used".format( |
|
481 | 517 | format_byte_size_binary(value['used']), |
|
482 | 518 | format_byte_size_binary(value['total']), |
|
483 | 519 | value['percent']) |
|
484 | 520 | |
|
485 | 521 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
486 | 522 | |
|
487 | 523 | |
|
524 | @register_sysinfo | |
|
488 | 525 | def search_info(): |
|
489 | 526 | import rhodecode |
|
490 | 527 | from rhodecode.lib.index import searcher_from_config |
|
491 | 528 | |
|
492 | 529 | backend = rhodecode.CONFIG.get('search.module', '') |
|
493 | 530 | location = rhodecode.CONFIG.get('search.location', '') |
|
494 | 531 | |
|
495 | 532 | try: |
|
496 | 533 | searcher = searcher_from_config(rhodecode.CONFIG) |
|
497 | 534 | searcher = searcher.__class__.__name__ |
|
498 | 535 | except Exception: |
|
499 | 536 | searcher = None |
|
500 | 537 | |
|
501 | 538 | value = dict( |
|
502 | 539 | backend=backend, searcher=searcher, location=location, text='') |
|
503 | 540 | state = STATE_OK_DEFAULT |
|
504 | 541 | |
|
505 | 542 | human_value = value.copy() |
|
506 | 543 | human_value['text'] = "backend:`{}`".format(human_value['backend']) |
|
507 | 544 | |
|
508 | 545 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
509 | 546 | |
|
510 | 547 | |
|
548 | @register_sysinfo | |
|
511 | 549 | def git_info(): |
|
512 | 550 | from rhodecode.lib.vcs.backends import git |
|
513 | 551 | state = STATE_OK_DEFAULT |
|
514 | 552 | value = human_value = '' |
|
515 | 553 | try: |
|
516 | 554 | value = git.discover_git_version(raise_on_exc=True) |
|
517 | 555 | human_value = 'version reported from VCSServer: {}'.format(value) |
|
518 | 556 | except Exception as e: |
|
519 | 557 | state = {'message': str(e), 'type': STATE_ERR} |
|
520 | 558 | |
|
521 | 559 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
522 | 560 | |
|
523 | 561 | |
|
562 | @register_sysinfo | |
|
524 | 563 | def hg_info(): |
|
525 | 564 | from rhodecode.lib.vcs.backends import hg |
|
526 | 565 | state = STATE_OK_DEFAULT |
|
527 | 566 | value = human_value = '' |
|
528 | 567 | try: |
|
529 | 568 | value = hg.discover_hg_version(raise_on_exc=True) |
|
530 | 569 | human_value = 'version reported from VCSServer: {}'.format(value) |
|
531 | 570 | except Exception as e: |
|
532 | 571 | state = {'message': str(e), 'type': STATE_ERR} |
|
533 | 572 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
534 | 573 | |
|
535 | 574 | |
|
575 | @register_sysinfo | |
|
536 | 576 | def svn_info(): |
|
537 | 577 | from rhodecode.lib.vcs.backends import svn |
|
538 | 578 | state = STATE_OK_DEFAULT |
|
539 | 579 | value = human_value = '' |
|
540 | 580 | try: |
|
541 | 581 | value = svn.discover_svn_version(raise_on_exc=True) |
|
542 | 582 | human_value = 'version reported from VCSServer: {}'.format(value) |
|
543 | 583 | except Exception as e: |
|
544 | 584 | state = {'message': str(e), 'type': STATE_ERR} |
|
545 | 585 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
546 | 586 | |
|
547 | 587 | |
|
588 | @register_sysinfo | |
|
548 | 589 | def vcs_backends(): |
|
549 | 590 | import rhodecode |
|
550 | 591 | value = rhodecode.CONFIG.get('vcs.backends') |
|
551 | 592 | human_value = 'Enabled backends in order: {}'.format(','.join(value)) |
|
552 | 593 | return SysInfoRes(value=value, human_value=human_value) |
|
553 | 594 | |
|
554 | 595 | |
|
596 | @register_sysinfo | |
|
555 | 597 | def vcs_server(): |
|
556 | 598 | import rhodecode |
|
557 | 599 | from rhodecode.lib.vcs.backends import get_vcsserver_service_data |
|
558 | 600 | |
|
559 | 601 | server_url = rhodecode.CONFIG.get('vcs.server') |
|
560 | 602 | enabled = rhodecode.CONFIG.get('vcs.server.enable') |
|
561 | 603 | protocol = rhodecode.CONFIG.get('vcs.server.protocol') or 'http' |
|
562 | 604 | state = STATE_OK_DEFAULT |
|
563 | 605 | version = None |
|
564 | 606 | workers = 0 |
|
565 | 607 | |
|
566 | 608 | try: |
|
567 | 609 | data = get_vcsserver_service_data() |
|
568 | 610 | if data and 'version' in data: |
|
569 | 611 | version = data['version'] |
|
570 | 612 | |
|
571 | 613 | if data and 'config' in data: |
|
572 | 614 | conf = data['config'] |
|
573 | 615 | workers = conf.get('workers', 'NOT AVAILABLE') |
|
574 | 616 | |
|
575 | 617 | connection = 'connected' |
|
576 | 618 | except Exception as e: |
|
577 | 619 | connection = 'failed' |
|
578 | 620 | state = {'message': str(e), 'type': STATE_ERR} |
|
579 | 621 | |
|
580 | 622 | value = dict( |
|
581 | 623 | url=server_url, |
|
582 | 624 | enabled=enabled, |
|
583 | 625 | protocol=protocol, |
|
584 | 626 | connection=connection, |
|
585 | 627 | version=version, |
|
586 | 628 | text='', |
|
587 | 629 | ) |
|
588 | 630 | |
|
589 | 631 | human_value = value.copy() |
|
590 | 632 | human_value['text'] = \ |
|
591 | 633 | '{url}@ver:{ver} via {mode} mode[workers:{workers}], connection:{conn}'.format( |
|
592 | 634 | url=server_url, ver=version, workers=workers, mode=protocol, |
|
593 | 635 | conn=connection) |
|
594 | 636 | |
|
595 | 637 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
596 | 638 | |
|
597 | 639 | |
|
640 | @register_sysinfo | |
|
598 | 641 | def vcs_server_config(): |
|
599 | 642 | from rhodecode.lib.vcs.backends import get_vcsserver_service_data |
|
600 | 643 | state = STATE_OK_DEFAULT |
|
601 | 644 | |
|
602 | 645 | value = {} |
|
603 | 646 | try: |
|
604 | 647 | data = get_vcsserver_service_data() |
|
605 | 648 | value = data['app_config'] |
|
606 | 649 | except Exception as e: |
|
607 | 650 | state = {'message': str(e), 'type': STATE_ERR} |
|
608 | 651 | |
|
609 | 652 | human_value = value.copy() |
|
610 | 653 | human_value['text'] = 'VCS Server config' |
|
611 | 654 | |
|
612 | 655 | return SysInfoRes(value=value, state=state, human_value=human_value) |
|
613 | 656 | |
|
614 | 657 | |
|
658 | @register_sysinfo | |
|
615 | 659 | def rhodecode_app_info(): |
|
616 | 660 | import rhodecode |
|
617 | 661 | edition = rhodecode.CONFIG.get('rhodecode.edition') |
|
618 | 662 | |
|
619 | 663 | value = dict( |
|
620 | 664 | rhodecode_version=rhodecode.__version__, |
|
621 | 665 | rhodecode_lib_path=os.path.abspath(rhodecode.__file__), |
|
622 | 666 | text='' |
|
623 | 667 | ) |
|
624 | 668 | human_value = value.copy() |
|
625 | 669 | human_value['text'] = 'RhodeCode {edition}, version {ver}'.format( |
|
626 | 670 | edition=edition, ver=value['rhodecode_version'] |
|
627 | 671 | ) |
|
628 | 672 | return SysInfoRes(value=value, human_value=human_value) |
|
629 | 673 | |
|
630 | 674 | |
|
675 | @register_sysinfo | |
|
631 | 676 | def rhodecode_config(): |
|
632 | 677 | import rhodecode |
|
633 | 678 | path = rhodecode.CONFIG.get('__file__') |
|
634 | 679 | rhodecode_ini_safe = rhodecode.CONFIG.copy() |
|
635 | 680 | cert_path = get_cert_path(path) |
|
636 | 681 | |
|
637 | 682 | try: |
|
638 | 683 | config = configparser.ConfigParser() |
|
639 | 684 | config.read(path) |
|
640 | 685 | parsed_ini = config |
|
641 | 686 | if parsed_ini.has_section('server:main'): |
|
642 | 687 | parsed_ini = dict(parsed_ini.items('server:main')) |
|
643 | 688 | except Exception: |
|
644 | 689 | log.exception('Failed to read .ini file for display') |
|
645 | 690 | parsed_ini = {} |
|
646 | 691 | |
|
647 | 692 | rhodecode_ini_safe['server:main'] = parsed_ini |
|
648 | 693 | |
|
649 | 694 | blacklist = [ |
|
650 | 695 | 'rhodecode_license_key', |
|
651 | 696 | 'routes.map', |
|
652 | 697 | 'sqlalchemy.db1.url', |
|
653 | 698 | 'channelstream.secret', |
|
654 | 699 | 'beaker.session.secret', |
|
655 | 700 | 'rhodecode.encrypted_values.secret', |
|
656 | 701 | 'rhodecode_auth_github_consumer_key', |
|
657 | 702 | 'rhodecode_auth_github_consumer_secret', |
|
658 | 703 | 'rhodecode_auth_google_consumer_key', |
|
659 | 704 | 'rhodecode_auth_google_consumer_secret', |
|
660 | 705 | 'rhodecode_auth_bitbucket_consumer_secret', |
|
661 | 706 | 'rhodecode_auth_bitbucket_consumer_key', |
|
662 | 707 | 'rhodecode_auth_twitter_consumer_secret', |
|
663 | 708 | 'rhodecode_auth_twitter_consumer_key', |
|
664 | 709 | |
|
665 | 710 | 'rhodecode_auth_twitter_secret', |
|
666 | 711 | 'rhodecode_auth_github_secret', |
|
667 | 712 | 'rhodecode_auth_google_secret', |
|
668 | 713 | 'rhodecode_auth_bitbucket_secret', |
|
669 | 714 | |
|
670 | 715 | 'appenlight.api_key', |
|
671 | 716 | ('app_conf', 'sqlalchemy.db1.url') |
|
672 | 717 | ] |
|
673 | 718 | for k in blacklist: |
|
674 | 719 | if isinstance(k, tuple): |
|
675 | 720 | section, key = k |
|
676 | 721 | if section in rhodecode_ini_safe: |
|
677 | 722 | rhodecode_ini_safe[section] = '**OBFUSCATED**' |
|
678 | 723 | else: |
|
679 | 724 | rhodecode_ini_safe.pop(k, None) |
|
680 | 725 | |
|
681 | 726 | # TODO: maybe put some CONFIG checks here ? |
|
682 | 727 | return SysInfoRes(value={'config': rhodecode_ini_safe, |
|
683 | 728 | 'path': path, 'cert_path': cert_path}) |
|
684 | 729 | |
|
685 | 730 | |
|
731 | @register_sysinfo | |
|
686 | 732 | def database_info(): |
|
687 | 733 | import rhodecode |
|
688 | 734 | from sqlalchemy.engine import url as engine_url |
|
689 | 735 | from rhodecode.model.meta import Base as sql_base, Session |
|
690 | 736 | from rhodecode.model.db import DbMigrateVersion |
|
691 | 737 | |
|
692 | 738 | state = STATE_OK_DEFAULT |
|
693 | 739 | |
|
694 | 740 | db_migrate = DbMigrateVersion.query().filter( |
|
695 | 741 | DbMigrateVersion.repository_id == 'rhodecode_db_migrations').one() |
|
696 | 742 | |
|
697 | 743 | db_url_obj = engine_url.make_url(rhodecode.CONFIG['sqlalchemy.db1.url']) |
|
698 | 744 | |
|
699 | 745 | try: |
|
700 | 746 | engine = sql_base.metadata.bind |
|
701 | 747 | db_server_info = engine.dialect._get_server_version_info( |
|
702 | 748 | Session.connection(bind=engine)) |
|
703 | 749 | db_version = '.'.join(map(str, db_server_info)) |
|
704 | 750 | except Exception: |
|
705 | 751 | log.exception('failed to fetch db version') |
|
706 | 752 | db_version = 'UNKNOWN' |
|
707 | 753 | |
|
708 | 754 | db_info = dict( |
|
709 | 755 | migrate_version=db_migrate.version, |
|
710 | 756 | type=db_url_obj.get_backend_name(), |
|
711 | 757 | version=db_version, |
|
712 | 758 | url=repr(db_url_obj) |
|
713 | 759 | ) |
|
714 | 760 | current_version = db_migrate.version |
|
715 | 761 | expected_version = rhodecode.__dbversion__ |
|
716 | 762 | if state['type'] == STATE_OK and current_version != expected_version: |
|
717 | 763 | msg = 'Critical: database schema mismatch, ' \ |
|
718 | 764 | 'expected version {}, got {}. ' \ |
|
719 | 765 | 'Please run migrations on your database.'.format( |
|
720 | 766 | expected_version, current_version) |
|
721 | 767 | state = {'message': msg, 'type': STATE_ERR} |
|
722 | 768 | |
|
723 | 769 | human_value = db_info.copy() |
|
724 | 770 | human_value['url'] = "{} @ migration version: {}".format( |
|
725 | 771 | db_info['url'], db_info['migrate_version']) |
|
726 | 772 | human_value['version'] = "{} {}".format(db_info['type'], db_info['version']) |
|
727 | 773 | return SysInfoRes(value=db_info, state=state, human_value=human_value) |
|
728 | 774 | |
|
729 | 775 | |
|
776 | @register_sysinfo | |
|
730 | 777 | def server_info(environ): |
|
731 | 778 | import rhodecode |
|
732 | 779 | from rhodecode.lib.base import get_server_ip_addr, get_server_port |
|
733 | 780 | |
|
734 | 781 | value = { |
|
735 | 782 | 'server_ip': '%s:%s' % ( |
|
736 | 783 | get_server_ip_addr(environ, log_errors=False), |
|
737 | 784 | get_server_port(environ) |
|
738 | 785 | ), |
|
739 | 786 | 'server_id': rhodecode.CONFIG.get('instance_id'), |
|
740 | 787 | } |
|
741 | 788 | return SysInfoRes(value=value) |
|
742 | 789 | |
|
743 | 790 | |
|
791 | @register_sysinfo | |
|
744 | 792 | def usage_info(): |
|
745 | 793 | from rhodecode.model.db import User, Repository |
|
746 | 794 | value = { |
|
747 | 795 | 'users': User.query().count(), |
|
748 | 796 | 'users_active': User.query().filter(User.active == True).count(), |
|
749 | 797 | 'repositories': Repository.query().count(), |
|
750 | 798 | 'repository_types': { |
|
751 | 799 | 'hg': Repository.query().filter( |
|
752 | 800 | Repository.repo_type == 'hg').count(), |
|
753 | 801 | 'git': Repository.query().filter( |
|
754 | 802 | Repository.repo_type == 'git').count(), |
|
755 | 803 | 'svn': Repository.query().filter( |
|
756 | 804 | Repository.repo_type == 'svn').count(), |
|
757 | 805 | }, |
|
758 | 806 | } |
|
759 | 807 | return SysInfoRes(value=value) |
|
760 | 808 | |
|
761 | 809 | |
|
762 | 810 | def get_system_info(environ): |
|
763 | 811 | environ = environ or {} |
|
764 | 812 | return { |
|
765 | 813 | 'rhodecode_app': SysInfo(rhodecode_app_info)(), |
|
766 | 814 | 'rhodecode_config': SysInfo(rhodecode_config)(), |
|
767 | 815 | 'rhodecode_usage': SysInfo(usage_info)(), |
|
768 | 816 | 'python': SysInfo(python_info)(), |
|
769 | 817 | 'py_modules': SysInfo(py_modules)(), |
|
770 | 818 | |
|
771 | 819 | 'platform': SysInfo(platform_type)(), |
|
772 | 820 | 'locale': SysInfo(locale_info)(), |
|
773 | 821 | 'server': SysInfo(server_info, environ=environ)(), |
|
774 | 822 | 'database': SysInfo(database_info)(), |
|
775 | 823 | 'ulimit': SysInfo(ulimit_info)(), |
|
776 | 824 | 'storage': SysInfo(storage)(), |
|
777 | 825 | 'storage_inodes': SysInfo(storage_inodes)(), |
|
778 | 826 | 'storage_archive': SysInfo(storage_archives)(), |
|
779 | 827 | 'storage_gist': SysInfo(storage_gist)(), |
|
780 | 828 | 'storage_temp': SysInfo(storage_temp)(), |
|
781 | 829 | |
|
782 | 830 | 'search': SysInfo(search_info)(), |
|
783 | 831 | |
|
784 | 832 | 'uptime': SysInfo(uptime)(), |
|
785 | 833 | 'load': SysInfo(machine_load)(), |
|
786 | 834 | 'cpu': SysInfo(cpu)(), |
|
787 | 835 | 'memory': SysInfo(memory)(), |
|
788 | 836 | |
|
789 | 837 | 'vcs_backends': SysInfo(vcs_backends)(), |
|
790 | 838 | 'vcs_server': SysInfo(vcs_server)(), |
|
791 | 839 | |
|
792 | 840 | 'vcs_server_config': SysInfo(vcs_server_config)(), |
|
793 | 841 | |
|
794 | 842 | 'git': SysInfo(git_info)(), |
|
795 | 843 | 'hg': SysInfo(hg_info)(), |
|
796 | 844 | 'svn': SysInfo(svn_info)(), |
|
797 | 845 | } |
|
846 | ||
|
847 | ||
|
848 | def load_system_info(key): | |
|
849 | """ | |
|
850 | get_sys_info('vcs_server') | |
|
851 | get_sys_info('database') | |
|
852 | """ | |
|
853 | return SysInfo(registered_helpers[key])() |
General Comments 0
You need to be logged in to leave comments.
Login now