Show More
@@ -1,213 +1,212 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2010-2018 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | import pytest |
|
22 | 22 | |
|
23 | 23 | from rhodecode.lib.vcs.nodes import FileNode |
|
24 | 24 | from rhodecode.model.db import User |
|
25 | 25 | from rhodecode.model.pull_request import PullRequestModel |
|
26 | 26 | from rhodecode.tests import TEST_USER_ADMIN_LOGIN |
|
27 | 27 | from rhodecode.api.tests.utils import ( |
|
28 | 28 | build_data, api_call, assert_ok, assert_error) |
|
29 | 29 | |
|
30 | 30 | |
|
31 | 31 | @pytest.mark.usefixtures("testuser_api", "app") |
|
32 | 32 | class TestUpdatePullRequest(object): |
|
33 | 33 | |
|
34 | 34 | @pytest.mark.backends("git", "hg") |
|
35 | 35 | def test_api_update_pull_request_title_or_description( |
|
36 | 36 | self, pr_util, no_notifications): |
|
37 | 37 | pull_request = pr_util.create_pull_request() |
|
38 | 38 | |
|
39 | 39 | id_, params = build_data( |
|
40 | 40 | self.apikey, 'update_pull_request', |
|
41 | 41 | repoid=pull_request.target_repo.repo_name, |
|
42 | 42 | pullrequestid=pull_request.pull_request_id, |
|
43 | 43 | title='New TITLE OF A PR', |
|
44 | 44 | description='New DESC OF A PR', |
|
45 | 45 | ) |
|
46 | 46 | response = api_call(self.app, params) |
|
47 | 47 | |
|
48 | 48 | expected = { |
|
49 | 49 | "msg": "Updated pull request `{}`".format( |
|
50 | 50 | pull_request.pull_request_id), |
|
51 | 51 | "pull_request": response.json['result']['pull_request'], |
|
52 | 52 | "updated_commits": {"added": [], "common": [], "removed": []}, |
|
53 | 53 | "updated_reviewers": {"added": [], "removed": []}, |
|
54 | 54 | } |
|
55 | 55 | |
|
56 | 56 | response_json = response.json['result'] |
|
57 | 57 | assert response_json == expected |
|
58 | 58 | pr = response_json['pull_request'] |
|
59 | 59 | assert pr['title'] == 'New TITLE OF A PR' |
|
60 | 60 | assert pr['description'] == 'New DESC OF A PR' |
|
61 | 61 | |
|
62 | 62 | @pytest.mark.backends("git", "hg") |
|
63 | 63 | def test_api_try_update_closed_pull_request( |
|
64 | 64 | self, pr_util, no_notifications): |
|
65 | 65 | pull_request = pr_util.create_pull_request() |
|
66 | 66 | PullRequestModel().close_pull_request( |
|
67 | 67 | pull_request, TEST_USER_ADMIN_LOGIN) |
|
68 | 68 | |
|
69 | 69 | id_, params = build_data( |
|
70 | 70 | self.apikey, 'update_pull_request', |
|
71 | 71 | repoid=pull_request.target_repo.repo_name, |
|
72 | 72 | pullrequestid=pull_request.pull_request_id) |
|
73 | 73 | response = api_call(self.app, params) |
|
74 | 74 | |
|
75 | 75 | expected = 'pull request `{}` update failed, pull request ' \ |
|
76 | 76 | 'is closed'.format(pull_request.pull_request_id) |
|
77 | 77 | |
|
78 | 78 | assert_error(id_, expected, response.body) |
|
79 | 79 | |
|
80 | 80 | @pytest.mark.backends("git", "hg") |
|
81 | 81 | def test_api_update_update_commits(self, pr_util, no_notifications): |
|
82 | 82 | commits = [ |
|
83 | 83 | {'message': 'a'}, |
|
84 | 84 | {'message': 'b', 'added': [FileNode('file_b', 'test_content\n')]}, |
|
85 | 85 | {'message': 'c', 'added': [FileNode('file_c', 'test_content\n')]}, |
|
86 | 86 | ] |
|
87 | 87 | pull_request = pr_util.create_pull_request( |
|
88 | 88 | commits=commits, target_head='a', source_head='b', revisions=['b']) |
|
89 | 89 | pr_util.update_source_repository(head='c') |
|
90 | 90 | repo = pull_request.source_repo.scm_instance() |
|
91 | 91 | commits = [x for x in repo.get_commits()] |
|
92 | print commits | |
|
93 | 92 | |
|
94 | 93 | added_commit_id = commits[-1].raw_id # c commit |
|
95 | 94 | common_commit_id = commits[1].raw_id # b commit is common ancestor |
|
96 | 95 | total_commits = [added_commit_id, common_commit_id] |
|
97 | 96 | |
|
98 | 97 | id_, params = build_data( |
|
99 | 98 | self.apikey, 'update_pull_request', |
|
100 | 99 | repoid=pull_request.target_repo.repo_name, |
|
101 | 100 | pullrequestid=pull_request.pull_request_id, |
|
102 | 101 | update_commits=True |
|
103 | 102 | ) |
|
104 | 103 | response = api_call(self.app, params) |
|
105 | 104 | |
|
106 | 105 | expected = { |
|
107 | 106 | "msg": "Updated pull request `{}`".format( |
|
108 | 107 | pull_request.pull_request_id), |
|
109 | 108 | "pull_request": response.json['result']['pull_request'], |
|
110 | 109 | "updated_commits": {"added": [added_commit_id], |
|
111 | 110 | "common": [common_commit_id], |
|
112 | 111 | "total": total_commits, |
|
113 | 112 | "removed": []}, |
|
114 | 113 | "updated_reviewers": {"added": [], "removed": []}, |
|
115 | 114 | } |
|
116 | 115 | |
|
117 | 116 | assert_ok(id_, expected, response.body) |
|
118 | 117 | |
|
119 | 118 | @pytest.mark.backends("git", "hg") |
|
120 | 119 | def test_api_update_change_reviewers( |
|
121 | 120 | self, user_util, pr_util, no_notifications): |
|
122 | 121 | a = user_util.create_user() |
|
123 | 122 | b = user_util.create_user() |
|
124 | 123 | c = user_util.create_user() |
|
125 | 124 | new_reviewers = [ |
|
126 | 125 | {'username': b.username,'reasons': ['updated via API'], |
|
127 | 126 | 'mandatory':False}, |
|
128 | 127 | {'username': c.username, 'reasons': ['updated via API'], |
|
129 | 128 | 'mandatory':False}, |
|
130 | 129 | ] |
|
131 | 130 | |
|
132 | 131 | added = [b.username, c.username] |
|
133 | 132 | removed = [a.username] |
|
134 | 133 | |
|
135 | 134 | pull_request = pr_util.create_pull_request( |
|
136 | 135 | reviewers=[(a.username, ['added via API'], False, [])]) |
|
137 | 136 | |
|
138 | 137 | id_, params = build_data( |
|
139 | 138 | self.apikey, 'update_pull_request', |
|
140 | 139 | repoid=pull_request.target_repo.repo_name, |
|
141 | 140 | pullrequestid=pull_request.pull_request_id, |
|
142 | 141 | reviewers=new_reviewers) |
|
143 | 142 | response = api_call(self.app, params) |
|
144 | 143 | expected = { |
|
145 | 144 | "msg": "Updated pull request `{}`".format( |
|
146 | 145 | pull_request.pull_request_id), |
|
147 | 146 | "pull_request": response.json['result']['pull_request'], |
|
148 | 147 | "updated_commits": {"added": [], "common": [], "removed": []}, |
|
149 | 148 | "updated_reviewers": {"added": added, "removed": removed}, |
|
150 | 149 | } |
|
151 | 150 | |
|
152 | 151 | assert_ok(id_, expected, response.body) |
|
153 | 152 | |
|
154 | 153 | @pytest.mark.backends("git", "hg") |
|
155 | 154 | def test_api_update_bad_user_in_reviewers(self, pr_util): |
|
156 | 155 | pull_request = pr_util.create_pull_request() |
|
157 | 156 | |
|
158 | 157 | id_, params = build_data( |
|
159 | 158 | self.apikey, 'update_pull_request', |
|
160 | 159 | repoid=pull_request.target_repo.repo_name, |
|
161 | 160 | pullrequestid=pull_request.pull_request_id, |
|
162 | 161 | reviewers=[{'username': 'bad_name'}]) |
|
163 | 162 | response = api_call(self.app, params) |
|
164 | 163 | |
|
165 | 164 | expected = 'user `bad_name` does not exist' |
|
166 | 165 | |
|
167 | 166 | assert_error(id_, expected, response.body) |
|
168 | 167 | |
|
169 | 168 | @pytest.mark.backends("git", "hg") |
|
170 | 169 | def test_api_update_repo_error(self, pr_util): |
|
171 | 170 | pull_request = pr_util.create_pull_request() |
|
172 | 171 | id_, params = build_data( |
|
173 | 172 | self.apikey, 'update_pull_request', |
|
174 | 173 | repoid='fake', |
|
175 | 174 | pullrequestid=pull_request.pull_request_id, |
|
176 | 175 | reviewers=[{'username': 'bad_name'}]) |
|
177 | 176 | response = api_call(self.app, params) |
|
178 | 177 | |
|
179 | 178 | expected = 'repository `fake` does not exist' |
|
180 | 179 | |
|
181 | 180 | response_json = response.json['error'] |
|
182 | 181 | assert response_json == expected |
|
183 | 182 | |
|
184 | 183 | @pytest.mark.backends("git", "hg") |
|
185 | 184 | def test_api_update_pull_request_error(self, pr_util): |
|
186 | 185 | pull_request = pr_util.create_pull_request() |
|
187 | 186 | |
|
188 | 187 | id_, params = build_data( |
|
189 | 188 | self.apikey, 'update_pull_request', |
|
190 | 189 | repoid=pull_request.target_repo.repo_name, |
|
191 | 190 | pullrequestid=999999, |
|
192 | 191 | reviewers=[{'username': 'bad_name'}]) |
|
193 | 192 | response = api_call(self.app, params) |
|
194 | 193 | |
|
195 | 194 | expected = 'pull request `999999` does not exist' |
|
196 | 195 | assert_error(id_, expected, response.body) |
|
197 | 196 | |
|
198 | 197 | @pytest.mark.backends("git", "hg") |
|
199 | 198 | def test_api_update_pull_request_no_perms_to_update( |
|
200 | 199 | self, user_util, pr_util): |
|
201 | 200 | user = user_util.create_user() |
|
202 | 201 | pull_request = pr_util.create_pull_request() |
|
203 | 202 | |
|
204 | 203 | id_, params = build_data( |
|
205 | 204 | user.api_key, 'update_pull_request', |
|
206 | 205 | repoid=pull_request.target_repo.repo_name, |
|
207 | 206 | pullrequestid=pull_request.pull_request_id,) |
|
208 | 207 | response = api_call(self.app, params) |
|
209 | 208 | |
|
210 | 209 | expected = ('pull request `%s` update failed, ' |
|
211 | 210 | 'no permission to update.') % pull_request.pull_request_id |
|
212 | 211 | |
|
213 | 212 | assert_error(id_, expected, response.body) |
@@ -1,100 +1,100 b'' | |||
|
1 | 1 | """ |
|
2 | 2 | Script to migrate repository from sqlalchemy <= 0.4.4 to the new |
|
3 | 3 | repository schema. This shouldn't use any other migrate modules, so |
|
4 | 4 | that it can work in any version. |
|
5 | 5 | """ |
|
6 | 6 | |
|
7 | 7 | import os |
|
8 | 8 | import sys |
|
9 | 9 | import logging |
|
10 | 10 | |
|
11 | 11 | log = logging.getLogger(__name__) |
|
12 | 12 | |
|
13 | 13 | |
|
14 | 14 | def usage(): |
|
15 | 15 | """Gives usage information.""" |
|
16 |
print |
|
|
16 | print("""Usage: %(prog)s repository-to-migrate | |
|
17 | 17 | |
|
18 | 18 | Upgrade your repository to the new flat format. |
|
19 | 19 | |
|
20 | 20 | NOTE: You should probably make a backup before running this. |
|
21 | """ % {'prog': sys.argv[0]} | |
|
21 | """ % {'prog': sys.argv[0]}) | |
|
22 | 22 | |
|
23 | 23 | sys.exit(1) |
|
24 | 24 | |
|
25 | 25 | |
|
26 | 26 | def delete_file(filepath): |
|
27 | 27 | """Deletes a file and prints a message.""" |
|
28 | 28 | log.info('Deleting file: %s' % filepath) |
|
29 | 29 | os.remove(filepath) |
|
30 | 30 | |
|
31 | 31 | |
|
32 | 32 | def move_file(src, tgt): |
|
33 | 33 | """Moves a file and prints a message.""" |
|
34 | 34 | log.info('Moving file %s to %s' % (src, tgt)) |
|
35 | 35 | if os.path.exists(tgt): |
|
36 | 36 | raise Exception( |
|
37 | 37 | 'Cannot move file %s because target %s already exists' % \ |
|
38 | 38 | (src, tgt)) |
|
39 | 39 | os.rename(src, tgt) |
|
40 | 40 | |
|
41 | 41 | |
|
42 | 42 | def delete_directory(dirpath): |
|
43 | 43 | """Delete a directory and print a message.""" |
|
44 | 44 | log.info('Deleting directory: %s' % dirpath) |
|
45 | 45 | os.rmdir(dirpath) |
|
46 | 46 | |
|
47 | 47 | |
|
48 | 48 | def migrate_repository(repos): |
|
49 | 49 | """Does the actual migration to the new repository format.""" |
|
50 | 50 | log.info('Migrating repository at: %s to new format' % repos) |
|
51 | 51 | versions = '%s/versions' % repos |
|
52 | 52 | dirs = os.listdir(versions) |
|
53 | 53 | # Only use int's in list. |
|
54 | 54 | numdirs = [int(dirname) for dirname in dirs if dirname.isdigit()] |
|
55 | 55 | numdirs.sort() # Sort list. |
|
56 | 56 | for dirname in numdirs: |
|
57 | 57 | origdir = '%s/%s' % (versions, dirname) |
|
58 | 58 | log.info('Working on directory: %s' % origdir) |
|
59 | 59 | files = os.listdir(origdir) |
|
60 | 60 | files.sort() |
|
61 | 61 | for filename in files: |
|
62 | 62 | # Delete compiled Python files. |
|
63 | 63 | if filename.endswith('.pyc') or filename.endswith('.pyo'): |
|
64 | 64 | delete_file('%s/%s' % (origdir, filename)) |
|
65 | 65 | |
|
66 | 66 | # Delete empty __init__.py files. |
|
67 | 67 | origfile = '%s/__init__.py' % origdir |
|
68 | 68 | if os.path.exists(origfile) and len(open(origfile).read()) == 0: |
|
69 | 69 | delete_file(origfile) |
|
70 | 70 | |
|
71 | 71 | # Move sql upgrade scripts. |
|
72 | 72 | if filename.endswith('.sql'): |
|
73 | 73 | version, dbms, operation = filename.split('.', 3)[0:3] |
|
74 | 74 | origfile = '%s/%s' % (origdir, filename) |
|
75 | 75 | # For instance: 2.postgres.upgrade.sql -> |
|
76 | 76 | # 002_postgres_upgrade.sql |
|
77 | 77 | tgtfile = '%s/%03d_%s_%s.sql' % ( |
|
78 | 78 | versions, int(version), dbms, operation) |
|
79 | 79 | move_file(origfile, tgtfile) |
|
80 | 80 | |
|
81 | 81 | # Move Python upgrade script. |
|
82 | 82 | pyfile = '%s.py' % dirname |
|
83 | 83 | pyfilepath = '%s/%s' % (origdir, pyfile) |
|
84 | 84 | if os.path.exists(pyfilepath): |
|
85 | 85 | tgtfile = '%s/%03d.py' % (versions, int(dirname)) |
|
86 | 86 | move_file(pyfilepath, tgtfile) |
|
87 | 87 | |
|
88 | 88 | # Try to remove directory. Will fail if it's not empty. |
|
89 | 89 | delete_directory(origdir) |
|
90 | 90 | |
|
91 | 91 | |
|
92 | 92 | def main(): |
|
93 | 93 | """Main function to be called when using this script.""" |
|
94 | 94 | if len(sys.argv) != 2: |
|
95 | 95 | usage() |
|
96 | 96 | migrate_repository(sys.argv[1]) |
|
97 | 97 | |
|
98 | 98 | |
|
99 | 99 | if __name__ == '__main__': |
|
100 | 100 | main() |
@@ -1,79 +1,79 b'' | |||
|
1 | 1 | import logging |
|
2 | 2 | import datetime |
|
3 | 3 | |
|
4 | 4 | from sqlalchemy import * |
|
5 | 5 | from sqlalchemy.exc import DatabaseError |
|
6 | 6 | from sqlalchemy.orm import relation, backref, class_mapper |
|
7 | 7 | from sqlalchemy.orm.session import Session |
|
8 | 8 | |
|
9 | 9 | from rhodecode.lib.dbmigrate.migrate import * |
|
10 | 10 | from rhodecode.lib.dbmigrate.migrate.changeset import * |
|
11 | 11 | |
|
12 | 12 | from rhodecode.model.meta import Base |
|
13 | 13 | |
|
14 | 14 | log = logging.getLogger(__name__) |
|
15 | 15 | |
|
16 | 16 | |
|
17 | 17 | def upgrade(migrate_engine): |
|
18 | 18 | """ Upgrade operations go here. |
|
19 | 19 | Don't create your own engine; bind migrate_engine to your metadata |
|
20 | 20 | """ |
|
21 | 21 | |
|
22 | 22 | #========================================================================== |
|
23 | 23 | # Change unique constraints of table `repo_to_perm` |
|
24 | 24 | #========================================================================== |
|
25 | 25 | from rhodecode.lib.dbmigrate.schema.db_1_3_0 import UserRepoToPerm |
|
26 | 26 | tbl = UserRepoToPerm().__table__ |
|
27 | 27 | new_cons = UniqueConstraint('user_id', 'repository_id', 'permission_id', table=tbl) |
|
28 | 28 | new_cons.create() |
|
29 | 29 | old_cons = None |
|
30 | 30 | if migrate_engine.name in ['mysql']: |
|
31 | 31 | old_cons = UniqueConstraint('user_id', 'repository_id', table=tbl, name="user_id") |
|
32 | 32 | elif migrate_engine.name in ['postgresql']: |
|
33 | 33 | old_cons = UniqueConstraint('user_id', 'repository_id', table=tbl) |
|
34 | 34 | else: |
|
35 | 35 | # sqlite doesn't support dropping constraints... |
|
36 |
print |
|
|
36 | print("""Please manually drop UniqueConstraint('user_id', 'repository_id')""") | |
|
37 | 37 | |
|
38 | 38 | if old_cons: |
|
39 | 39 | try: |
|
40 | 40 | old_cons.drop() |
|
41 | 41 | except Exception as e: |
|
42 | 42 | # we don't care if this fails really... better to pass migration than |
|
43 | 43 | # leave this in intermidiate state |
|
44 |
print |
|
|
44 | print('Failed to remove Unique for user_id, repository_id reason %s' % e) | |
|
45 | 45 | |
|
46 | 46 | |
|
47 | 47 | #========================================================================== |
|
48 | 48 | # fix uniques of table `user_repo_group_to_perm` |
|
49 | 49 | #========================================================================== |
|
50 | 50 | from rhodecode.lib.dbmigrate.schema.db_1_3_0 import UserRepoGroupToPerm |
|
51 | 51 | tbl = UserRepoGroupToPerm().__table__ |
|
52 | 52 | new_cons = UniqueConstraint('group_id', 'permission_id', 'user_id', table=tbl) |
|
53 | 53 | new_cons.create() |
|
54 | 54 | old_cons = None |
|
55 | 55 | |
|
56 | 56 | # fix uniqueConstraints |
|
57 | 57 | if migrate_engine.name in ['mysql']: |
|
58 | 58 | #mysql is givinig troubles here... |
|
59 | 59 | old_cons = UniqueConstraint('group_id', 'permission_id', table=tbl, name="group_id") |
|
60 | 60 | elif migrate_engine.name in ['postgresql']: |
|
61 | 61 | old_cons = UniqueConstraint('group_id', 'permission_id', table=tbl, name='group_to_perm_group_id_permission_id_key') |
|
62 | 62 | else: |
|
63 | 63 | # sqlite doesn't support dropping constraints... |
|
64 |
print |
|
|
64 | print("""Please manually drop UniqueConstraint('group_id', 'permission_id')""") | |
|
65 | 65 | |
|
66 | 66 | if old_cons: |
|
67 | 67 | try: |
|
68 | 68 | old_cons.drop() |
|
69 | 69 | except Exception as e: |
|
70 | 70 | # we don't care if this fails really... better to pass migration than |
|
71 | 71 | # leave this in intermidiate state |
|
72 |
print |
|
|
72 | print('Failed to remove Unique for user_id, repository_id reason %s' % e) | |
|
73 | 73 | |
|
74 | 74 | return |
|
75 | 75 | |
|
76 | 76 | |
|
77 | 77 | def downgrade(migrate_engine): |
|
78 | 78 | meta = MetaData() |
|
79 | 79 | meta.bind = migrate_engine |
@@ -1,135 +1,135 b'' | |||
|
1 | 1 | import logging |
|
2 | 2 | import datetime |
|
3 | 3 | |
|
4 | 4 | from sqlalchemy import * |
|
5 | 5 | from sqlalchemy.exc import DatabaseError |
|
6 | 6 | from sqlalchemy.orm import relation, backref, class_mapper, joinedload |
|
7 | 7 | from sqlalchemy.orm.session import Session |
|
8 | 8 | from sqlalchemy.ext.declarative import declarative_base |
|
9 | 9 | |
|
10 | 10 | from rhodecode.lib.dbmigrate.migrate import * |
|
11 | 11 | from rhodecode.lib.dbmigrate.migrate.changeset import * |
|
12 | 12 | |
|
13 | 13 | from rhodecode.model.meta import Base |
|
14 | 14 | from rhodecode.model import meta |
|
15 | 15 | from rhodecode.lib.dbmigrate.versions import _reset_base |
|
16 | 16 | |
|
17 | 17 | log = logging.getLogger(__name__) |
|
18 | 18 | |
|
19 | 19 | |
|
20 | 20 | def upgrade(migrate_engine): |
|
21 | 21 | """ |
|
22 | 22 | Upgrade operations go here. |
|
23 | 23 | Don't create your own engine; bind migrate_engine to your metadata |
|
24 | 24 | """ |
|
25 | 25 | _reset_base(migrate_engine) |
|
26 | 26 | from rhodecode.lib.dbmigrate.schema import db_1_5_0 |
|
27 | 27 | #========================================================================== |
|
28 | 28 | # USER LOGS |
|
29 | 29 | #========================================================================== |
|
30 | 30 | |
|
31 | 31 | tbl = db_1_5_0.UserLog.__table__ |
|
32 | 32 | username = Column("username", String(255, convert_unicode=False), |
|
33 | 33 | nullable=True, unique=None, default=None) |
|
34 | 34 | # create username column |
|
35 | 35 | username.create(table=tbl) |
|
36 | 36 | |
|
37 | 37 | _Session = meta.Session() |
|
38 | 38 | ## after adding that column fix all usernames |
|
39 | 39 | users_log = _Session.query(db_1_5_0.UserLog)\ |
|
40 | 40 | .options(joinedload(db_1_5_0.UserLog.user))\ |
|
41 | 41 | .options(joinedload(db_1_5_0.UserLog.repository)).all() |
|
42 | 42 | |
|
43 | 43 | for entry in users_log: |
|
44 | 44 | entry.username = entry.user.username |
|
45 | 45 | _Session.add(entry) |
|
46 | 46 | _Session.commit() |
|
47 | 47 | |
|
48 | 48 | #alter username to not null |
|
49 | 49 | tbl_name = db_1_5_0.UserLog.__tablename__ |
|
50 | 50 | tbl = Table(tbl_name, |
|
51 | 51 | MetaData(bind=migrate_engine), autoload=True, |
|
52 | 52 | autoload_with=migrate_engine) |
|
53 | 53 | col = tbl.columns.username |
|
54 | 54 | |
|
55 | 55 | # remove nullability from revision field |
|
56 | 56 | col.alter(nullable=False) |
|
57 | 57 | |
|
58 | 58 | # issue fixups |
|
59 | 59 | fixups(db_1_5_0, meta.Session) |
|
60 | 60 | |
|
61 | 61 | |
|
62 | 62 | def downgrade(migrate_engine): |
|
63 | 63 | meta = MetaData() |
|
64 | 64 | meta.bind = migrate_engine |
|
65 | 65 | |
|
66 | 66 | |
|
67 | 67 | def get_by_key(cls, key): |
|
68 | 68 | return cls.query().filter(cls.permission_name == key).scalar() |
|
69 | 69 | |
|
70 | 70 | |
|
71 | 71 | def get_by_name(cls, key): |
|
72 | 72 | return cls.query().filter(cls.app_settings_name == key).scalar() |
|
73 | 73 | |
|
74 | 74 | |
|
75 | 75 | def fixups(models, _SESSION): |
|
76 | 76 | # ** create default permissions ** # |
|
77 | 77 | #===================================== |
|
78 | 78 | for p in models.Permission.PERMS: |
|
79 | 79 | if not get_by_key(models.Permission, p[0]): |
|
80 | 80 | new_perm = models.Permission() |
|
81 | 81 | new_perm.permission_name = p[0] |
|
82 | 82 | new_perm.permission_longname = p[0] #translation err with p[1] |
|
83 |
print |
|
|
83 | print('Creating new permission %s' % p[0]) | |
|
84 | 84 | _SESSION().add(new_perm) |
|
85 | 85 | |
|
86 | 86 | _SESSION().commit() |
|
87 | 87 | |
|
88 | 88 | # ** populate default permissions ** # |
|
89 | 89 | #===================================== |
|
90 | 90 | |
|
91 | 91 | user = models.User.query().filter(models.User.username == 'default').scalar() |
|
92 | 92 | |
|
93 | 93 | def _make_perm(perm): |
|
94 | 94 | new_perm = models.UserToPerm() |
|
95 | 95 | new_perm.user = user |
|
96 | 96 | new_perm.permission = get_by_key(models.Permission, perm) |
|
97 | 97 | return new_perm |
|
98 | 98 | |
|
99 | 99 | def _get_group(perm_name): |
|
100 | 100 | return '.'.join(perm_name.split('.')[:1]) |
|
101 | 101 | |
|
102 | 102 | perms = models.UserToPerm.query().filter(models.UserToPerm.user == user).all() |
|
103 | 103 | defined_perms_groups = map( |
|
104 | 104 | _get_group, (x.permission.permission_name for x in perms)) |
|
105 | 105 | log.debug('GOT ALREADY DEFINED:%s' % perms) |
|
106 | 106 | DEFAULT_PERMS = models.Permission.DEFAULT_USER_PERMISSIONS |
|
107 | 107 | |
|
108 | 108 | # for every default permission that needs to be created, we check if |
|
109 | 109 | # it's group is already defined, if it's not we create default perm |
|
110 | 110 | for perm_name in DEFAULT_PERMS: |
|
111 | 111 | gr = _get_group(perm_name) |
|
112 | 112 | if gr not in defined_perms_groups: |
|
113 | 113 | log.debug('GR:%s not found, creating permission %s' |
|
114 | 114 | % (gr, perm_name)) |
|
115 | 115 | new_perm = _make_perm(perm_name) |
|
116 | 116 | _SESSION().add(new_perm) |
|
117 | 117 | _SESSION().commit() |
|
118 | 118 | |
|
119 | 119 | # ** create default options ** # |
|
120 | 120 | #=============================== |
|
121 | 121 | skip_existing = True |
|
122 | 122 | for k, v in [ |
|
123 | 123 | ('default_repo_enable_locking', False), |
|
124 | 124 | ('default_repo_enable_downloads', False), |
|
125 | 125 | ('default_repo_enable_statistics', False), |
|
126 | 126 | ('default_repo_private', False), |
|
127 | 127 | ('default_repo_type', 'hg')]: |
|
128 | 128 | |
|
129 | 129 | if skip_existing and get_by_name(models.RhodeCodeSetting, k) is not None: |
|
130 | 130 | log.debug('Skipping option %s' % k) |
|
131 | 131 | continue |
|
132 | 132 | setting = models.RhodeCodeSetting(k, v) |
|
133 | 133 | _SESSION().add(setting) |
|
134 | 134 | |
|
135 | 135 | _SESSION().commit() |
@@ -1,49 +1,49 b'' | |||
|
1 | 1 | import logging |
|
2 | 2 | import datetime |
|
3 | 3 | |
|
4 | 4 | from sqlalchemy import * |
|
5 | 5 | from sqlalchemy.exc import DatabaseError |
|
6 | 6 | from sqlalchemy.orm import relation, backref, class_mapper, joinedload |
|
7 | 7 | from sqlalchemy.orm.session import Session |
|
8 | 8 | from sqlalchemy.ext.declarative import declarative_base |
|
9 | 9 | |
|
10 | 10 | from rhodecode.lib.dbmigrate.migrate import * |
|
11 | 11 | from rhodecode.lib.dbmigrate.migrate.changeset import * |
|
12 | 12 | |
|
13 | 13 | from rhodecode.model.meta import Base |
|
14 | 14 | from rhodecode.model import meta |
|
15 | 15 | from rhodecode.lib.dbmigrate.versions import _reset_base, notify |
|
16 | 16 | |
|
17 | 17 | log = logging.getLogger(__name__) |
|
18 | 18 | |
|
19 | 19 | |
|
20 | 20 | def upgrade(migrate_engine): |
|
21 | 21 | """ |
|
22 | 22 | Upgrade operations go here. |
|
23 | 23 | Don't create your own engine; bind migrate_engine to your metadata |
|
24 | 24 | """ |
|
25 | 25 | _reset_base(migrate_engine) |
|
26 | 26 | from rhodecode.lib.dbmigrate.schema import db_1_6_0 |
|
27 | 27 | |
|
28 | 28 | #========================================================================== |
|
29 | 29 | # USER LOGS |
|
30 | 30 | #========================================================================== |
|
31 | 31 | tbl = db_1_6_0.RepositoryField.__table__ |
|
32 | 32 | tbl.create() |
|
33 | 33 | |
|
34 | 34 | # issue fixups |
|
35 | 35 | fixups(db_1_6_0, meta.Session) |
|
36 | 36 | |
|
37 | 37 | |
|
38 | 38 | def downgrade(migrate_engine): |
|
39 | 39 | meta = MetaData() |
|
40 | 40 | meta.bind = migrate_engine |
|
41 | 41 | |
|
42 | 42 | |
|
43 | 43 | def fixups(models, _SESSION): |
|
44 | 44 | notify('Upgrading repositories Caches') |
|
45 | 45 | repositories = models.Repository.getAll() |
|
46 | 46 | for repo in repositories: |
|
47 |
print |
|
|
47 | print(repo) | |
|
48 | 48 | repo.update_commit_cache() |
|
49 | 49 | _SESSION().commit() |
@@ -1,85 +1,85 b'' | |||
|
1 | 1 | import os |
|
2 | 2 | import logging |
|
3 | 3 | import datetime |
|
4 | 4 | |
|
5 | 5 | from sqlalchemy import * |
|
6 | 6 | from sqlalchemy.exc import DatabaseError |
|
7 | 7 | from sqlalchemy.orm import relation, backref, class_mapper, joinedload |
|
8 | 8 | from sqlalchemy.orm.session import Session |
|
9 | 9 | from sqlalchemy.ext.declarative import declarative_base |
|
10 | 10 | |
|
11 | 11 | from rhodecode.lib.dbmigrate.migrate import * |
|
12 | 12 | from rhodecode.lib.dbmigrate.migrate.changeset import * |
|
13 | 13 | from rhodecode.lib.utils2 import str2bool |
|
14 | 14 | |
|
15 | 15 | from rhodecode.model.meta import Base |
|
16 | 16 | from rhodecode.model import meta |
|
17 | 17 | from rhodecode.lib.dbmigrate.versions import _reset_base, notify |
|
18 | 18 | |
|
19 | 19 | log = logging.getLogger(__name__) |
|
20 | 20 | |
|
21 | 21 | |
|
22 | 22 | def get_by_key(cls, key): |
|
23 | 23 | return cls.query().filter(cls.ui_key == key).scalar() |
|
24 | 24 | |
|
25 | 25 | |
|
26 | 26 | def get_repos_location(cls): |
|
27 | 27 | return get_by_key(cls, '/').ui_value |
|
28 | 28 | |
|
29 | 29 | |
|
30 | 30 | def upgrade(migrate_engine): |
|
31 | 31 | """ |
|
32 | 32 | Upgrade operations go here. |
|
33 | 33 | Don't create your own engine; bind migrate_engine to your metadata |
|
34 | 34 | """ |
|
35 | 35 | _reset_base(migrate_engine) |
|
36 | 36 | from rhodecode.lib.dbmigrate.schema import db_2_0_1 |
|
37 | 37 | tbl = db_2_0_1.RepoGroup.__table__ |
|
38 | 38 | |
|
39 | 39 | created_on = Column('created_on', DateTime(timezone=False), nullable=True, |
|
40 | 40 | default=datetime.datetime.now) |
|
41 | 41 | created_on.create(table=tbl) |
|
42 | 42 | |
|
43 | 43 | #fix null values on certain columns when upgrading from older releases |
|
44 | 44 | tbl = db_2_0_1.UserLog.__table__ |
|
45 | 45 | col = tbl.columns.user_id |
|
46 | 46 | col.alter(nullable=True) |
|
47 | 47 | |
|
48 | 48 | tbl = db_2_0_1.UserFollowing.__table__ |
|
49 | 49 | col = tbl.columns.follows_repository_id |
|
50 | 50 | col.alter(nullable=True) |
|
51 | 51 | |
|
52 | 52 | tbl = db_2_0_1.UserFollowing.__table__ |
|
53 | 53 | col = tbl.columns.follows_user_id |
|
54 | 54 | col.alter(nullable=True) |
|
55 | 55 | |
|
56 | 56 | # issue fixups |
|
57 | 57 | fixups(db_2_0_1, meta.Session) |
|
58 | 58 | |
|
59 | 59 | |
|
60 | 60 | def downgrade(migrate_engine): |
|
61 | 61 | meta = MetaData() |
|
62 | 62 | meta.bind = migrate_engine |
|
63 | 63 | |
|
64 | 64 | |
|
65 | 65 | def fixups(models, _SESSION): |
|
66 | 66 | notify('Fixing default created on for repo groups') |
|
67 | 67 | |
|
68 | 68 | for gr in models.RepoGroup.get_all(): |
|
69 | 69 | gr.created_on = datetime.datetime.now() |
|
70 | 70 | _SESSION().add(gr) |
|
71 | 71 | _SESSION().commit() |
|
72 | 72 | |
|
73 | 73 | repo_store_path = get_repos_location(models.RhodeCodeUi) |
|
74 | 74 | _store = os.path.join(repo_store_path, '.cache', 'largefiles') |
|
75 | 75 | notify('Setting largefiles usercache') |
|
76 |
print |
|
|
76 | print(_store) | |
|
77 | 77 | |
|
78 | 78 | if not models.RhodeCodeUi.query().filter( |
|
79 | 79 | models.RhodeCodeUi.ui_key == 'usercache').scalar(): |
|
80 | 80 | largefiles = models.RhodeCodeUi() |
|
81 | 81 | largefiles.ui_section = 'largefiles' |
|
82 | 82 | largefiles.ui_key = 'usercache' |
|
83 | 83 | largefiles.ui_value = _store |
|
84 | 84 | _SESSION().add(largefiles) |
|
85 | 85 | _SESSION().commit() |
@@ -1,70 +1,70 b'' | |||
|
1 | 1 | import logging |
|
2 | 2 | import datetime |
|
3 | 3 | |
|
4 | 4 | from sqlalchemy import * |
|
5 | 5 | from sqlalchemy.exc import DatabaseError |
|
6 | 6 | from sqlalchemy.orm import relation, backref, class_mapper, joinedload |
|
7 | 7 | from sqlalchemy.orm.session import Session |
|
8 | 8 | from sqlalchemy.ext.declarative import declarative_base |
|
9 | 9 | |
|
10 | 10 | from rhodecode.lib.dbmigrate.migrate import * |
|
11 | 11 | from rhodecode.lib.dbmigrate.migrate.changeset import * |
|
12 | 12 | from rhodecode.lib.utils2 import str2bool |
|
13 | 13 | |
|
14 | 14 | from rhodecode.model.meta import Base |
|
15 | 15 | from rhodecode.model import meta |
|
16 | 16 | from rhodecode.lib.dbmigrate.versions import _reset_base, notify |
|
17 | 17 | |
|
18 | 18 | log = logging.getLogger(__name__) |
|
19 | 19 | |
|
20 | 20 | |
|
21 | 21 | def upgrade(migrate_engine): |
|
22 | 22 | """ |
|
23 | 23 | Upgrade operations go here. |
|
24 | 24 | Don't create your own engine; bind migrate_engine to your metadata |
|
25 | 25 | """ |
|
26 | 26 | _reset_base(migrate_engine) |
|
27 | 27 | from rhodecode.lib.dbmigrate.schema import db_2_0_2 |
|
28 | 28 | |
|
29 | 29 | # issue fixups |
|
30 | 30 | fixups(db_2_0_2, meta.Session) |
|
31 | 31 | |
|
32 | 32 | |
|
33 | 33 | def downgrade(migrate_engine): |
|
34 | 34 | meta = MetaData() |
|
35 | 35 | meta.bind = migrate_engine |
|
36 | 36 | |
|
37 | 37 | |
|
38 | 38 | def fixups(models, _SESSION): |
|
39 | 39 | notify('fixing new schema for landing_rev') |
|
40 | 40 | |
|
41 | 41 | for repo in models.Repository.get_all(): |
|
42 |
print |
|
|
42 | print(u'repo %s old landing rev is: %s' % (repo, repo.landing_rev)) | |
|
43 | 43 | _rev = repo.landing_rev[1] |
|
44 | 44 | _rev_type = 'rev' # default |
|
45 | 45 | |
|
46 | 46 | if _rev in ['default', 'master']: |
|
47 | 47 | _rev_type = 'branch' |
|
48 | 48 | elif _rev in ['tip']: |
|
49 | 49 | _rev_type = 'rev' |
|
50 | 50 | else: |
|
51 | 51 | try: |
|
52 | 52 | scm = repo.scm_instance |
|
53 | 53 | if scm: |
|
54 | 54 | known_branches = scm.branches.keys() |
|
55 | 55 | known_bookmarks = scm.bookmarks.keys() |
|
56 | 56 | if _rev in known_branches: |
|
57 | 57 | _rev_type = 'branch' |
|
58 | 58 | elif _rev in known_bookmarks: |
|
59 | 59 | _rev_type = 'book' |
|
60 | 60 | except Exception as e: |
|
61 |
print |
|
|
62 |
print |
|
|
61 | print(e) | |
|
62 | print('continue...') | |
|
63 | 63 | #we don't want any error to break the process |
|
64 | 64 | pass |
|
65 | 65 | |
|
66 | 66 | _new_landing_rev = '%s:%s' % (_rev_type, _rev) |
|
67 |
print |
|
|
67 | print(u'setting to %s' % _new_landing_rev) | |
|
68 | 68 | repo.landing_rev = _new_landing_rev |
|
69 | 69 | _SESSION().add(repo) |
|
70 | 70 | _SESSION().commit() |
@@ -1,65 +1,65 b'' | |||
|
1 | 1 | import logging |
|
2 | 2 | import datetime |
|
3 | 3 | |
|
4 | 4 | from sqlalchemy import * |
|
5 | 5 | from sqlalchemy.exc import DatabaseError |
|
6 | 6 | from sqlalchemy.orm import relation, backref, class_mapper, joinedload |
|
7 | 7 | from sqlalchemy.orm.session import Session |
|
8 | 8 | from sqlalchemy.ext.declarative import declarative_base |
|
9 | 9 | |
|
10 | 10 | from rhodecode.lib.dbmigrate.migrate import * |
|
11 | 11 | from rhodecode.lib.dbmigrate.migrate.changeset import * |
|
12 | 12 | from rhodecode.lib.utils2 import str2bool |
|
13 | 13 | |
|
14 | 14 | from rhodecode.model.meta import Base |
|
15 | 15 | from rhodecode.model import meta |
|
16 | 16 | from rhodecode.lib.dbmigrate.versions import _reset_base, notify |
|
17 | 17 | |
|
18 | 18 | log = logging.getLogger(__name__) |
|
19 | 19 | |
|
20 | 20 | |
|
21 | 21 | def get_by_key(cls, key): |
|
22 | 22 | return cls.query().filter(cls.permission_name == key).scalar() |
|
23 | 23 | |
|
24 | 24 | |
|
25 | 25 | def upgrade(migrate_engine): |
|
26 | 26 | """ |
|
27 | 27 | Upgrade operations go here. |
|
28 | 28 | Don't create your own engine; bind migrate_engine to your metadata |
|
29 | 29 | """ |
|
30 | 30 | _reset_base(migrate_engine) |
|
31 | 31 | from rhodecode.lib.dbmigrate.schema import db_2_2_0 |
|
32 | 32 | |
|
33 | 33 | # issue fixups |
|
34 | 34 | fixups(db_2_2_0, meta.Session) |
|
35 | 35 | |
|
36 | 36 | |
|
37 | 37 | def downgrade(migrate_engine): |
|
38 | 38 | meta = MetaData() |
|
39 | 39 | meta.bind = migrate_engine |
|
40 | 40 | |
|
41 | 41 | |
|
42 | 42 | def fixups(models, _SESSION): |
|
43 | 43 | # ** create default permissions ** # |
|
44 | 44 | #===================================== |
|
45 | 45 | for p in models.Permission.PERMS: |
|
46 | 46 | if not get_by_key(models.Permission, p[0]): |
|
47 | 47 | new_perm = models.Permission() |
|
48 | 48 | new_perm.permission_name = p[0] |
|
49 | 49 | new_perm.permission_longname = p[0] #translation err with p[1] |
|
50 |
print |
|
|
50 | print('Creating new permission %s' % p[0]) | |
|
51 | 51 | _SESSION().add(new_perm) |
|
52 | 52 | |
|
53 | 53 | _SESSION().commit() |
|
54 | 54 | |
|
55 | 55 | # ** set default create_on_write to active |
|
56 | 56 | user = models.User.query().filter( |
|
57 | 57 | models.User.username == 'default').scalar() |
|
58 | 58 | |
|
59 | 59 | _def = 'hg.create.write_on_repogroup.true' |
|
60 | 60 | new = models.UserToPerm() |
|
61 | 61 | new.user = user |
|
62 | 62 | new.permission = get_by_key(models.Permission, _def) |
|
63 |
print |
|
|
63 | print('Setting default to %s' % _def) | |
|
64 | 64 | _SESSION().add(new) |
|
65 | 65 | _SESSION().commit() |
@@ -1,44 +1,44 b'' | |||
|
1 | 1 | import logging |
|
2 | 2 | import datetime |
|
3 | 3 | |
|
4 | 4 | from sqlalchemy import * |
|
5 | 5 | from sqlalchemy.exc import DatabaseError |
|
6 | 6 | from sqlalchemy.orm import relation, backref, class_mapper, joinedload |
|
7 | 7 | from sqlalchemy.orm.session import Session |
|
8 | 8 | from sqlalchemy.ext.declarative import declarative_base |
|
9 | 9 | |
|
10 | 10 | from rhodecode.lib.dbmigrate.migrate import * |
|
11 | 11 | from rhodecode.lib.dbmigrate.migrate.changeset import * |
|
12 | 12 | from rhodecode.lib.utils2 import str2bool |
|
13 | 13 | |
|
14 | 14 | from rhodecode.model.meta import Base |
|
15 | 15 | from rhodecode.model import meta |
|
16 | 16 | from rhodecode.lib.dbmigrate.versions import _reset_base, notify |
|
17 | 17 | |
|
18 | 18 | log = logging.getLogger(__name__) |
|
19 | 19 | |
|
20 | 20 | |
|
21 | 21 | def upgrade(migrate_engine): |
|
22 | 22 | """ |
|
23 | 23 | Upgrade operations go here. |
|
24 | 24 | Don't create your own engine; bind migrate_engine to your metadata |
|
25 | 25 | """ |
|
26 | 26 | _reset_base(migrate_engine) |
|
27 | 27 | from rhodecode.lib.dbmigrate.schema import db_2_3_0_1 |
|
28 | 28 | |
|
29 | 29 | # issue fixups |
|
30 | 30 | fixups(db_2_3_0_1, meta.Session) |
|
31 | 31 | |
|
32 | 32 | |
|
33 | 33 | def downgrade(migrate_engine): |
|
34 | 34 | meta = MetaData() |
|
35 | 35 | meta.bind = migrate_engine |
|
36 | 36 | |
|
37 | 37 | |
|
38 | 38 | def fixups(models, _SESSION): |
|
39 | 39 | notify('Setting default renderer to rst') |
|
40 | 40 | for cs_comment in models.ChangesetComment.get_all(): |
|
41 |
print |
|
|
41 | print('comment_id %s renderer rst' % (cs_comment.comment_id)) | |
|
42 | 42 | cs_comment.renderer = 'rst' |
|
43 | 43 | _SESSION().add(cs_comment) |
|
44 | 44 | _SESSION().commit() |
@@ -1,149 +1,149 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2010-2018 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | import os |
|
22 | 22 | import errno |
|
23 | 23 | |
|
24 | 24 | from multiprocessing.util import Finalize |
|
25 | 25 | |
|
26 | 26 | from rhodecode.lib.compat import kill |
|
27 | 27 | |
|
28 | 28 | |
|
29 | 29 | class LockHeld(Exception): |
|
30 | 30 | pass |
|
31 | 31 | |
|
32 | 32 | |
|
33 | 33 | class DaemonLock(object): |
|
34 | 34 | """daemon locking |
|
35 | 35 | USAGE: |
|
36 | 36 | try: |
|
37 | 37 | l = DaemonLock(file_='/path/tolockfile',desc='test lock') |
|
38 | 38 | main() |
|
39 | 39 | l.release() |
|
40 | 40 | except LockHeld: |
|
41 | 41 | sys.exit(1) |
|
42 | 42 | """ |
|
43 | 43 | |
|
44 | 44 | def __init__(self, file_=None, callbackfn=None, |
|
45 | 45 | desc='daemon lock', debug=False): |
|
46 | 46 | |
|
47 | 47 | lock_name = os.path.join(os.path.dirname(__file__), 'running.lock') |
|
48 | 48 | self.pidfile = file_ if file_ else lock_name |
|
49 | 49 | self.callbackfn = callbackfn |
|
50 | 50 | self.desc = desc |
|
51 | 51 | self.debug = debug |
|
52 | 52 | self.held = False |
|
53 | 53 | #run the lock automatically ! |
|
54 | 54 | self.lock() |
|
55 | 55 | self._finalize = Finalize(self, DaemonLock._on_finalize, |
|
56 | 56 | args=(self, debug), exitpriority=10) |
|
57 | 57 | |
|
58 | 58 | @staticmethod |
|
59 | 59 | def _on_finalize(lock, debug): |
|
60 | 60 | if lock.held: |
|
61 | 61 | if debug: |
|
62 |
print |
|
|
62 | print('leck held finilazing and running lock.release()') | |
|
63 | 63 | lock.release() |
|
64 | 64 | |
|
65 | 65 | def lock(self): |
|
66 | 66 | """ |
|
67 | 67 | locking function, if lock is present it |
|
68 | 68 | will raise LockHeld exception |
|
69 | 69 | """ |
|
70 | 70 | lockname = '%s' % (os.getpid()) |
|
71 | 71 | if self.debug: |
|
72 |
print |
|
|
72 | print('running lock') | |
|
73 | 73 | self.trylock() |
|
74 | 74 | self.makelock(lockname, self.pidfile) |
|
75 | 75 | return True |
|
76 | 76 | |
|
77 | 77 | def trylock(self): |
|
78 | 78 | running_pid = False |
|
79 | 79 | if self.debug: |
|
80 |
print |
|
|
80 | print('checking for already running process') | |
|
81 | 81 | try: |
|
82 | 82 | with open(self.pidfile, 'r') as f: |
|
83 | 83 | try: |
|
84 | 84 | running_pid = int(f.readline()) |
|
85 | 85 | except ValueError: |
|
86 | 86 | running_pid = -1 |
|
87 | 87 | |
|
88 | 88 | if self.debug: |
|
89 | 89 |
print |
|
90 | 90 |
|
|
91 | 91 | # Now we check the PID from lock file matches to the current |
|
92 | 92 | # process PID |
|
93 | 93 | if running_pid: |
|
94 | 94 | try: |
|
95 | 95 | kill(running_pid, 0) |
|
96 | 96 | except OSError as exc: |
|
97 | 97 | if exc.errno in (errno.ESRCH, errno.EPERM): |
|
98 | 98 |
print |
|
99 | 99 |
|
|
100 |
print |
|
|
100 | print("Removing lock file for the: %s" % running_pid) | |
|
101 | 101 | self.release() |
|
102 | 102 | else: |
|
103 | 103 | raise |
|
104 | 104 | else: |
|
105 |
print |
|
|
106 |
print |
|
|
105 | print("You already have an instance of the program running") | |
|
106 | print("It is running as process %s" % running_pid) | |
|
107 | 107 | raise LockHeld() |
|
108 | 108 | |
|
109 | 109 | except IOError as e: |
|
110 | 110 | if e.errno != 2: |
|
111 | 111 | raise |
|
112 | 112 | |
|
113 | 113 | def release(self): |
|
114 | 114 | """releases the pid by removing the pidfile |
|
115 | 115 | """ |
|
116 | 116 | if self.debug: |
|
117 |
print |
|
|
117 | print('trying to release the pidlock') | |
|
118 | 118 | |
|
119 | 119 | if self.callbackfn: |
|
120 | 120 | #execute callback function on release |
|
121 | 121 | if self.debug: |
|
122 |
print |
|
|
122 | print('executing callback function %s' % self.callbackfn) | |
|
123 | 123 | self.callbackfn() |
|
124 | 124 | try: |
|
125 | 125 | if self.debug: |
|
126 |
print |
|
|
126 | print('removing pidfile %s' % self.pidfile) | |
|
127 | 127 | os.remove(self.pidfile) |
|
128 | 128 | self.held = False |
|
129 | 129 | except OSError as e: |
|
130 | 130 | if self.debug: |
|
131 |
print |
|
|
131 | print('removing pidfile failed %s' % e) | |
|
132 | 132 | pass |
|
133 | 133 | |
|
134 | 134 | def makelock(self, lockname, pidfile): |
|
135 | 135 | """ |
|
136 | 136 | this function will make an actual lock |
|
137 | 137 | |
|
138 | 138 | :param lockname: acctual pid of file |
|
139 | 139 | :param pidfile: the file to write the pid in |
|
140 | 140 | """ |
|
141 | 141 | if self.debug: |
|
142 |
print |
|
|
142 | print('creating a file %s and pid: %s' % (pidfile, lockname)) | |
|
143 | 143 | |
|
144 | 144 | dir_, file_ = os.path.split(pidfile) |
|
145 | 145 | if not os.path.isdir(dir_): |
|
146 | 146 | os.makedirs(dir_) |
|
147 | 147 | with open(self.pidfile, 'wb') as f: |
|
148 | 148 | f.write(lockname) |
|
149 | 149 | self.held = True |
@@ -1,1025 +1,1025 b'' | |||
|
1 | 1 | # (c) 2005 Ian Bicking and contributors; written for Paste |
|
2 | 2 | # (http://pythonpaste.org) Licensed under the MIT license: |
|
3 | 3 | # http://www.opensource.org/licenses/mit-license.php |
|
4 | 4 | # |
|
5 | 5 | # For discussion of daemonizing: |
|
6 | 6 | # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/278731 |
|
7 | 7 | # |
|
8 | 8 | # Code taken also from QP: http://www.mems-exchange.org/software/qp/ From |
|
9 | 9 | # lib/site.py |
|
10 | 10 | |
|
11 | 11 | import atexit |
|
12 | 12 | import errno |
|
13 | 13 | import fnmatch |
|
14 | 14 | import logging |
|
15 | 15 | import optparse |
|
16 | 16 | import os |
|
17 | 17 | import re |
|
18 | 18 | import subprocess32 |
|
19 | 19 | import sys |
|
20 | 20 | import textwrap |
|
21 | 21 | import threading |
|
22 | 22 | import time |
|
23 | 23 | import traceback |
|
24 | 24 | |
|
25 | 25 | from logging.config import fileConfig |
|
26 | 26 | import ConfigParser as configparser |
|
27 | 27 | from paste.deploy import loadserver |
|
28 | 28 | from paste.deploy import loadapp |
|
29 | 29 | |
|
30 | 30 | import rhodecode |
|
31 | 31 | from rhodecode.lib.compat import kill |
|
32 | 32 | |
|
33 | 33 | |
|
34 | 34 | def make_web_build_callback(filename): |
|
35 | 35 | p = subprocess32.Popen('make web-build', shell=True, |
|
36 | 36 | stdout=subprocess32.PIPE, |
|
37 | 37 | stderr=subprocess32.PIPE, |
|
38 | 38 | cwd=os.path.dirname(os.path.dirname(__file__))) |
|
39 | 39 | stdout, stderr = p.communicate() |
|
40 | 40 | stdout = ''.join(stdout) |
|
41 | 41 | stderr = ''.join(stderr) |
|
42 | 42 | if stdout: |
|
43 |
print |
|
|
43 | print(stdout) | |
|
44 | 44 | if stderr: |
|
45 | 45 |
print |
|
46 |
print |
|
|
46 | print(stderr) | |
|
47 | 47 | |
|
48 | 48 | |
|
49 | 49 | MAXFD = 1024 |
|
50 | 50 | HERE = os.path.dirname(os.path.abspath(__file__)) |
|
51 | 51 | SERVER_RUNNING_FILE = None |
|
52 | 52 | |
|
53 | 53 | |
|
54 | 54 | # watch those extra files for changes, server gets restarted if file changes |
|
55 | 55 | GLOBAL_EXTRA_FILES = { |
|
56 | 56 | 'rhodecode/public/css/*.less': make_web_build_callback, |
|
57 | 57 | 'rhodecode/public/js/src/**/*.js': make_web_build_callback, |
|
58 | 58 | } |
|
59 | 59 | |
|
60 | 60 | |
|
61 | 61 | |
|
62 | 62 | ## HOOKS - inspired by gunicorn # |
|
63 | 63 | |
|
64 | 64 | def when_ready(server): |
|
65 | 65 | """ |
|
66 | 66 | Called just after the server is started. |
|
67 | 67 | """ |
|
68 | 68 | |
|
69 | 69 | def _remove_server_running_file(): |
|
70 | 70 | if os.path.isfile(SERVER_RUNNING_FILE): |
|
71 | 71 | os.remove(SERVER_RUNNING_FILE) |
|
72 | 72 | |
|
73 | 73 | if SERVER_RUNNING_FILE: |
|
74 | 74 | with open(SERVER_RUNNING_FILE, 'wb') as f: |
|
75 | 75 | f.write(str(os.getpid())) |
|
76 | 76 | # register cleanup of that file when server exits |
|
77 | 77 | atexit.register(_remove_server_running_file) |
|
78 | 78 | |
|
79 | 79 | |
|
80 | 80 | def setup_logging(config_uri, fileConfig=fileConfig, |
|
81 | 81 | configparser=configparser): |
|
82 | 82 | """ |
|
83 | 83 | Set up logging via the logging module's fileConfig function with the |
|
84 | 84 | filename specified via ``config_uri`` (a string in the form |
|
85 | 85 | ``filename#sectionname``). |
|
86 | 86 | |
|
87 | 87 | ConfigParser defaults are specified for the special ``__file__`` |
|
88 | 88 | and ``here`` variables, similar to PasteDeploy config loading. |
|
89 | 89 | """ |
|
90 | 90 | path, _ = _getpathsec(config_uri, None) |
|
91 | 91 | parser = configparser.ConfigParser() |
|
92 | 92 | parser.read([path]) |
|
93 | 93 | if parser.has_section('loggers'): |
|
94 | 94 | config_file = os.path.abspath(path) |
|
95 | 95 | return fileConfig( |
|
96 | 96 | config_file, |
|
97 | 97 | {'__file__': config_file, 'here': os.path.dirname(config_file)} |
|
98 | 98 | ) |
|
99 | 99 | |
|
100 | 100 | |
|
101 | 101 | def set_rhodecode_is_test(config_uri): |
|
102 | 102 | """If is_test is defined in the config file sets rhodecode.is_test.""" |
|
103 | 103 | path, _ = _getpathsec(config_uri, None) |
|
104 | 104 | parser = configparser.ConfigParser() |
|
105 | 105 | parser.read(path) |
|
106 | 106 | rhodecode.is_test = ( |
|
107 | 107 | parser.has_option('app:main', 'is_test') and |
|
108 | 108 | parser.getboolean('app:main', 'is_test')) |
|
109 | 109 | |
|
110 | 110 | |
|
111 | 111 | def _getpathsec(config_uri, name): |
|
112 | 112 | if '#' in config_uri: |
|
113 | 113 | path, section = config_uri.split('#', 1) |
|
114 | 114 | else: |
|
115 | 115 | path, section = config_uri, 'main' |
|
116 | 116 | if name: |
|
117 | 117 | section = name |
|
118 | 118 | return path, section |
|
119 | 119 | |
|
120 | 120 | |
|
121 | 121 | def parse_vars(args): |
|
122 | 122 | """ |
|
123 | 123 | Given variables like ``['a=b', 'c=d']`` turns it into ``{'a': |
|
124 | 124 | 'b', 'c': 'd'}`` |
|
125 | 125 | """ |
|
126 | 126 | result = {} |
|
127 | 127 | for arg in args: |
|
128 | 128 | if '=' not in arg: |
|
129 | 129 | raise ValueError( |
|
130 | 130 | 'Variable assignment %r invalid (no "=")' |
|
131 | 131 | % arg) |
|
132 | 132 | name, value = arg.split('=', 1) |
|
133 | 133 | result[name] = value |
|
134 | 134 | return result |
|
135 | 135 | |
|
136 | 136 | |
|
137 | 137 | def _match_pattern(filename): |
|
138 | 138 | for pattern in GLOBAL_EXTRA_FILES: |
|
139 | 139 | if fnmatch.fnmatch(filename, pattern): |
|
140 | 140 | return pattern |
|
141 | 141 | return False |
|
142 | 142 | |
|
143 | 143 | |
|
144 | 144 | def generate_extra_file_list(): |
|
145 | 145 | |
|
146 | 146 | extra_list = [] |
|
147 | 147 | for root, dirs, files in os.walk(HERE, topdown=True): |
|
148 | 148 | for fname in files: |
|
149 | 149 | stripped_src = os.path.join( |
|
150 | 150 | 'rhodecode', os.path.relpath(os.path.join(root, fname), HERE)) |
|
151 | 151 | |
|
152 | 152 | if _match_pattern(stripped_src): |
|
153 | 153 | extra_list.append(stripped_src) |
|
154 | 154 | |
|
155 | 155 | return extra_list |
|
156 | 156 | |
|
157 | 157 | |
|
158 | 158 | def run_callback_for_pattern(filename): |
|
159 | 159 | pattern = _match_pattern(filename) |
|
160 | 160 | if pattern: |
|
161 | 161 | _file_callback = GLOBAL_EXTRA_FILES.get(pattern) |
|
162 | 162 | if callable(_file_callback): |
|
163 | 163 | _file_callback(filename) |
|
164 | 164 | |
|
165 | 165 | |
|
166 | 166 | class DaemonizeException(Exception): |
|
167 | 167 | pass |
|
168 | 168 | |
|
169 | 169 | |
|
170 | 170 | class RcServerCommand(object): |
|
171 | 171 | |
|
172 | 172 | usage = '%prog config_uri [start|stop|restart|status] [var=value]' |
|
173 | 173 | description = """\ |
|
174 | 174 | This command serves a web application that uses a PasteDeploy |
|
175 | 175 | configuration file for the server and application. |
|
176 | 176 | |
|
177 | 177 | If start/stop/restart is given, then --daemon is implied, and it will |
|
178 | 178 | start (normal operation), stop (--stop-daemon), or do both. |
|
179 | 179 | |
|
180 | 180 | You can also include variable assignments like 'http_port=8080' |
|
181 | 181 | and then use %(http_port)s in your config files. |
|
182 | 182 | """ |
|
183 | 183 | default_verbosity = 1 |
|
184 | 184 | |
|
185 | 185 | parser = optparse.OptionParser( |
|
186 | 186 | usage, |
|
187 | 187 | description=textwrap.dedent(description) |
|
188 | 188 | ) |
|
189 | 189 | parser.add_option( |
|
190 | 190 | '-n', '--app-name', |
|
191 | 191 | dest='app_name', |
|
192 | 192 | metavar='NAME', |
|
193 | 193 | help="Load the named application (default main)") |
|
194 | 194 | parser.add_option( |
|
195 | 195 | '-s', '--server', |
|
196 | 196 | dest='server', |
|
197 | 197 | metavar='SERVER_TYPE', |
|
198 | 198 | help="Use the named server.") |
|
199 | 199 | parser.add_option( |
|
200 | 200 | '--server-name', |
|
201 | 201 | dest='server_name', |
|
202 | 202 | metavar='SECTION_NAME', |
|
203 | 203 | help=("Use the named server as defined in the configuration file " |
|
204 | 204 | "(default: main)")) |
|
205 | 205 | parser.add_option( |
|
206 | 206 | '--with-vcsserver', |
|
207 | 207 | dest='vcs_server', |
|
208 | 208 | action='store_true', |
|
209 | 209 | help=("Start the vcsserver instance together with the RhodeCode server")) |
|
210 | 210 | if hasattr(os, 'fork'): |
|
211 | 211 | parser.add_option( |
|
212 | 212 | '--daemon', |
|
213 | 213 | dest="daemon", |
|
214 | 214 | action="store_true", |
|
215 | 215 | help="Run in daemon (background) mode") |
|
216 | 216 | parser.add_option( |
|
217 | 217 | '--pid-file', |
|
218 | 218 | dest='pid_file', |
|
219 | 219 | metavar='FILENAME', |
|
220 | 220 | help=("Save PID to file (default to pyramid.pid if running in " |
|
221 | 221 | "daemon mode)")) |
|
222 | 222 | parser.add_option( |
|
223 | 223 | '--running-file', |
|
224 | 224 | dest='running_file', |
|
225 | 225 | metavar='RUNNING_FILE', |
|
226 | 226 | help="Create a running file after the server is initalized with " |
|
227 | 227 | "stored PID of process") |
|
228 | 228 | parser.add_option( |
|
229 | 229 | '--log-file', |
|
230 | 230 | dest='log_file', |
|
231 | 231 | metavar='LOG_FILE', |
|
232 | 232 | help="Save output to the given log file (redirects stdout)") |
|
233 | 233 | parser.add_option( |
|
234 | 234 | '--reload', |
|
235 | 235 | dest='reload', |
|
236 | 236 | action='store_true', |
|
237 | 237 | help="Use auto-restart file monitor") |
|
238 | 238 | parser.add_option( |
|
239 | 239 | '--reload-interval', |
|
240 | 240 | dest='reload_interval', |
|
241 | 241 | default=1, |
|
242 | 242 | help=("Seconds between checking files (low number can cause " |
|
243 | 243 | "significant CPU usage)")) |
|
244 | 244 | parser.add_option( |
|
245 | 245 | '--monitor-restart', |
|
246 | 246 | dest='monitor_restart', |
|
247 | 247 | action='store_true', |
|
248 | 248 | help="Auto-restart server if it dies") |
|
249 | 249 | parser.add_option( |
|
250 | 250 | '--status', |
|
251 | 251 | action='store_true', |
|
252 | 252 | dest='show_status', |
|
253 | 253 | help="Show the status of the (presumably daemonized) server") |
|
254 | 254 | parser.add_option( |
|
255 | 255 | '-v', '--verbose', |
|
256 | 256 | default=default_verbosity, |
|
257 | 257 | dest='verbose', |
|
258 | 258 | action='count', |
|
259 | 259 | help="Set verbose level (default "+str(default_verbosity)+")") |
|
260 | 260 | parser.add_option( |
|
261 | 261 | '-q', '--quiet', |
|
262 | 262 | action='store_const', |
|
263 | 263 | const=0, |
|
264 | 264 | dest='verbose', |
|
265 | 265 | help="Suppress verbose output") |
|
266 | 266 | |
|
267 | 267 | if hasattr(os, 'setuid'): |
|
268 | 268 | # I don't think these are available on Windows |
|
269 | 269 | parser.add_option( |
|
270 | 270 | '--user', |
|
271 | 271 | dest='set_user', |
|
272 | 272 | metavar="USERNAME", |
|
273 | 273 | help="Set the user (usually only possible when run as root)") |
|
274 | 274 | parser.add_option( |
|
275 | 275 | '--group', |
|
276 | 276 | dest='set_group', |
|
277 | 277 | metavar="GROUP", |
|
278 | 278 | help="Set the group (usually only possible when run as root)") |
|
279 | 279 | |
|
280 | 280 | parser.add_option( |
|
281 | 281 | '--stop-daemon', |
|
282 | 282 | dest='stop_daemon', |
|
283 | 283 | action='store_true', |
|
284 | 284 | help=('Stop a daemonized server (given a PID file, or default ' |
|
285 | 285 | 'pyramid.pid file)')) |
|
286 | 286 | |
|
287 | 287 | _scheme_re = re.compile(r'^[a-z][a-z]+:', re.I) |
|
288 | 288 | |
|
289 | 289 | _reloader_environ_key = 'PYTHON_RELOADER_SHOULD_RUN' |
|
290 | 290 | _monitor_environ_key = 'PASTE_MONITOR_SHOULD_RUN' |
|
291 | 291 | |
|
292 | 292 | possible_subcommands = ('start', 'stop', 'restart', 'status') |
|
293 | 293 | |
|
294 | 294 | def __init__(self, argv, quiet=False): |
|
295 | 295 | self.options, self.args = self.parser.parse_args(argv[1:]) |
|
296 | 296 | if quiet: |
|
297 | 297 | self.options.verbose = 0 |
|
298 | 298 | |
|
299 | 299 | def out(self, msg): # pragma: no cover |
|
300 | 300 | if self.options.verbose > 0: |
|
301 | 301 | print(msg) |
|
302 | 302 | |
|
303 | 303 | def get_options(self): |
|
304 | 304 | if (len(self.args) > 1 |
|
305 | 305 | and self.args[1] in self.possible_subcommands): |
|
306 | 306 | restvars = self.args[2:] |
|
307 | 307 | else: |
|
308 | 308 | restvars = self.args[1:] |
|
309 | 309 | |
|
310 | 310 | return parse_vars(restvars) |
|
311 | 311 | |
|
312 | 312 | def run(self): # pragma: no cover |
|
313 | 313 | if self.options.stop_daemon: |
|
314 | 314 | return self.stop_daemon() |
|
315 | 315 | |
|
316 | 316 | if not hasattr(self.options, 'set_user'): |
|
317 | 317 | # Windows case: |
|
318 | 318 | self.options.set_user = self.options.set_group = None |
|
319 | 319 | |
|
320 | 320 | # @@: Is this the right stage to set the user at? |
|
321 | 321 | self.change_user_group( |
|
322 | 322 | self.options.set_user, self.options.set_group) |
|
323 | 323 | |
|
324 | 324 | if not self.args: |
|
325 | 325 | self.out('Please provide configuration file as first argument, ' |
|
326 | 326 | 'most likely it should be production.ini') |
|
327 | 327 | return 2 |
|
328 | 328 | app_spec = self.args[0] |
|
329 | 329 | |
|
330 | 330 | if (len(self.args) > 1 |
|
331 | 331 | and self.args[1] in self.possible_subcommands): |
|
332 | 332 | cmd = self.args[1] |
|
333 | 333 | else: |
|
334 | 334 | cmd = None |
|
335 | 335 | |
|
336 | 336 | if self.options.reload: |
|
337 | 337 | if os.environ.get(self._reloader_environ_key): |
|
338 | 338 | if self.options.verbose > 1: |
|
339 | 339 | self.out('Running reloading file monitor') |
|
340 | 340 | |
|
341 | 341 | install_reloader(int(self.options.reload_interval), |
|
342 | 342 | [app_spec] + generate_extra_file_list()) |
|
343 | 343 | # if self.requires_config_file: |
|
344 | 344 | # watch_file(self.args[0]) |
|
345 | 345 | else: |
|
346 | 346 | return self.restart_with_reloader() |
|
347 | 347 | |
|
348 | 348 | if cmd not in (None, 'start', 'stop', 'restart', 'status'): |
|
349 | 349 | self.out( |
|
350 | 350 | 'Error: must give start|stop|restart (not %s)' % cmd) |
|
351 | 351 | return 2 |
|
352 | 352 | |
|
353 | 353 | if cmd == 'status' or self.options.show_status: |
|
354 | 354 | return self.show_status() |
|
355 | 355 | |
|
356 | 356 | if cmd == 'restart' or cmd == 'stop': |
|
357 | 357 | result = self.stop_daemon() |
|
358 | 358 | if result: |
|
359 | 359 | if cmd == 'restart': |
|
360 | 360 | self.out("Could not stop daemon; aborting") |
|
361 | 361 | else: |
|
362 | 362 | self.out("Could not stop daemon") |
|
363 | 363 | return result |
|
364 | 364 | if cmd == 'stop': |
|
365 | 365 | return result |
|
366 | 366 | self.options.daemon = True |
|
367 | 367 | |
|
368 | 368 | if cmd == 'start': |
|
369 | 369 | self.options.daemon = True |
|
370 | 370 | |
|
371 | 371 | app_name = self.options.app_name |
|
372 | 372 | |
|
373 | 373 | vars = self.get_options() |
|
374 | 374 | |
|
375 | 375 | if self.options.vcs_server: |
|
376 | 376 | vars['vcs.start_server'] = 'true' |
|
377 | 377 | |
|
378 | 378 | if self.options.running_file: |
|
379 | 379 | global SERVER_RUNNING_FILE |
|
380 | 380 | SERVER_RUNNING_FILE = self.options.running_file |
|
381 | 381 | |
|
382 | 382 | if not self._scheme_re.search(app_spec): |
|
383 | 383 | app_spec = 'config:' + app_spec |
|
384 | 384 | server_name = self.options.server_name |
|
385 | 385 | if self.options.server: |
|
386 | 386 | server_spec = 'egg:pyramid' |
|
387 | 387 | assert server_name is None |
|
388 | 388 | server_name = self.options.server |
|
389 | 389 | else: |
|
390 | 390 | server_spec = app_spec |
|
391 | 391 | base = os.getcwd() |
|
392 | 392 | |
|
393 | 393 | if getattr(self.options, 'daemon', False): |
|
394 | 394 | if not self.options.pid_file: |
|
395 | 395 | self.options.pid_file = 'pyramid.pid' |
|
396 | 396 | if not self.options.log_file: |
|
397 | 397 | self.options.log_file = 'pyramid.log' |
|
398 | 398 | |
|
399 | 399 | # Ensure the log file is writeable |
|
400 | 400 | if self.options.log_file: |
|
401 | 401 | try: |
|
402 | 402 | writeable_log_file = open(self.options.log_file, 'a') |
|
403 | 403 | except IOError as ioe: |
|
404 | 404 | msg = 'Error: Unable to write to log file: %s' % ioe |
|
405 | 405 | raise ValueError(msg) |
|
406 | 406 | writeable_log_file.close() |
|
407 | 407 | |
|
408 | 408 | # Ensure the pid file is writeable |
|
409 | 409 | if self.options.pid_file: |
|
410 | 410 | try: |
|
411 | 411 | writeable_pid_file = open(self.options.pid_file, 'a') |
|
412 | 412 | except IOError as ioe: |
|
413 | 413 | msg = 'Error: Unable to write to pid file: %s' % ioe |
|
414 | 414 | raise ValueError(msg) |
|
415 | 415 | writeable_pid_file.close() |
|
416 | 416 | |
|
417 | 417 | |
|
418 | 418 | if getattr(self.options, 'daemon', False): |
|
419 | 419 | try: |
|
420 | 420 | self.daemonize() |
|
421 | 421 | except DaemonizeException as ex: |
|
422 | 422 | if self.options.verbose > 0: |
|
423 | 423 | self.out(str(ex)) |
|
424 | 424 | return 2 |
|
425 | 425 | |
|
426 | 426 | if (self.options.monitor_restart |
|
427 | 427 | and not os.environ.get(self._monitor_environ_key)): |
|
428 | 428 | return self.restart_with_monitor() |
|
429 | 429 | |
|
430 | 430 | if self.options.pid_file: |
|
431 | 431 | self.record_pid(self.options.pid_file) |
|
432 | 432 | |
|
433 | 433 | if self.options.log_file: |
|
434 | 434 | stdout_log = LazyWriter(self.options.log_file, 'a') |
|
435 | 435 | sys.stdout = stdout_log |
|
436 | 436 | sys.stderr = stdout_log |
|
437 | 437 | logging.basicConfig(stream=stdout_log) |
|
438 | 438 | |
|
439 | 439 | log_fn = app_spec |
|
440 | 440 | if log_fn.startswith('config:'): |
|
441 | 441 | log_fn = app_spec[len('config:'):] |
|
442 | 442 | elif log_fn.startswith('egg:'): |
|
443 | 443 | log_fn = None |
|
444 | 444 | if log_fn: |
|
445 | 445 | log_fn = os.path.join(base, log_fn) |
|
446 | 446 | setup_logging(log_fn) |
|
447 | 447 | set_rhodecode_is_test(log_fn) |
|
448 | 448 | |
|
449 | 449 | server = self.loadserver(server_spec, name=server_name, |
|
450 | 450 | relative_to=base, global_conf=vars) |
|
451 | 451 | # starting hooks |
|
452 | 452 | app = self.loadapp(app_spec, name=app_name, relative_to=base, |
|
453 | 453 | global_conf=vars) |
|
454 | 454 | |
|
455 | 455 | if self.options.verbose > 0: |
|
456 | 456 | if hasattr(os, 'getpid'): |
|
457 | 457 | msg = 'Starting %s in PID %i.' % (__name__, os.getpid()) |
|
458 | 458 | else: |
|
459 | 459 | msg = 'Starting %s.' % (__name__,) |
|
460 | 460 | self.out(msg) |
|
461 | 461 | if SERVER_RUNNING_FILE: |
|
462 | 462 | self.out('PID file written as %s' % (SERVER_RUNNING_FILE, )) |
|
463 | 463 | elif not self.options.pid_file: |
|
464 | 464 | self.out('No PID file written by default.') |
|
465 | 465 | |
|
466 | 466 | try: |
|
467 | 467 | when_ready(server) |
|
468 | 468 | server(app) |
|
469 | 469 | except (SystemExit, KeyboardInterrupt) as e: |
|
470 | 470 | if self.options.verbose > 1: |
|
471 | 471 | raise |
|
472 | 472 | if str(e): |
|
473 | 473 | msg = ' ' + str(e) |
|
474 | 474 | else: |
|
475 | 475 | msg = '' |
|
476 | 476 | self.out('Exiting%s (-v to see traceback)' % msg) |
|
477 | 477 | |
|
478 | 478 | |
|
479 | 479 | def loadapp(self, app_spec, name, relative_to, **kw): # pragma: no cover |
|
480 | 480 | return loadapp(app_spec, name=name, relative_to=relative_to, **kw) |
|
481 | 481 | |
|
482 | 482 | def loadserver(self, server_spec, name, relative_to, **kw): # pragma:no cover |
|
483 | 483 | return loadserver( |
|
484 | 484 | server_spec, name=name, relative_to=relative_to, **kw) |
|
485 | 485 | |
|
486 | 486 | def quote_first_command_arg(self, arg): # pragma: no cover |
|
487 | 487 | """ |
|
488 | 488 | There's a bug in Windows when running an executable that's |
|
489 | 489 | located inside a path with a space in it. This method handles |
|
490 | 490 | that case, or on non-Windows systems or an executable with no |
|
491 | 491 | spaces, it just leaves well enough alone. |
|
492 | 492 | """ |
|
493 | 493 | if sys.platform != 'win32' or ' ' not in arg: |
|
494 | 494 | # Problem does not apply: |
|
495 | 495 | return arg |
|
496 | 496 | try: |
|
497 | 497 | import win32api |
|
498 | 498 | except ImportError: |
|
499 | 499 | raise ValueError( |
|
500 | 500 | "The executable %r contains a space, and in order to " |
|
501 | 501 | "handle this issue you must have the win32api module " |
|
502 | 502 | "installed" % arg) |
|
503 | 503 | arg = win32api.GetShortPathName(arg) |
|
504 | 504 | return arg |
|
505 | 505 | |
|
506 | 506 | def daemonize(self): # pragma: no cover |
|
507 | 507 | pid = live_pidfile(self.options.pid_file) |
|
508 | 508 | if pid: |
|
509 | 509 | raise DaemonizeException( |
|
510 | 510 | "Daemon is already running (PID: %s from PID file %s)" |
|
511 | 511 | % (pid, self.options.pid_file)) |
|
512 | 512 | |
|
513 | 513 | if self.options.verbose > 0: |
|
514 | 514 | self.out('Entering daemon mode') |
|
515 | 515 | pid = os.fork() |
|
516 | 516 | if pid: |
|
517 | 517 | # The forked process also has a handle on resources, so we |
|
518 | 518 | # *don't* want proper termination of the process, we just |
|
519 | 519 | # want to exit quick (which os._exit() does) |
|
520 | 520 | os._exit(0) |
|
521 | 521 | # Make this the session leader |
|
522 | 522 | os.setsid() |
|
523 | 523 | # Fork again for good measure! |
|
524 | 524 | pid = os.fork() |
|
525 | 525 | if pid: |
|
526 | 526 | os._exit(0) |
|
527 | 527 | |
|
528 | 528 | # @@: Should we set the umask and cwd now? |
|
529 | 529 | |
|
530 | 530 | import resource # Resource usage information. |
|
531 | 531 | maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] |
|
532 | 532 | if maxfd == resource.RLIM_INFINITY: |
|
533 | 533 | maxfd = MAXFD |
|
534 | 534 | # Iterate through and close all file descriptors. |
|
535 | 535 | for fd in range(0, maxfd): |
|
536 | 536 | try: |
|
537 | 537 | os.close(fd) |
|
538 | 538 | except OSError: # ERROR, fd wasn't open to begin with (ignored) |
|
539 | 539 | pass |
|
540 | 540 | |
|
541 | 541 | if hasattr(os, "devnull"): |
|
542 | 542 | REDIRECT_TO = os.devnull |
|
543 | 543 | else: |
|
544 | 544 | REDIRECT_TO = "/dev/null" |
|
545 | 545 | os.open(REDIRECT_TO, os.O_RDWR) # standard input (0) |
|
546 | 546 | # Duplicate standard input to standard output and standard error. |
|
547 | 547 | os.dup2(0, 1) # standard output (1) |
|
548 | 548 | os.dup2(0, 2) # standard error (2) |
|
549 | 549 | |
|
550 | 550 | def _remove_pid_file(self, written_pid, filename, verbosity): |
|
551 | 551 | current_pid = os.getpid() |
|
552 | 552 | if written_pid != current_pid: |
|
553 | 553 | # A forked process must be exiting, not the process that |
|
554 | 554 | # wrote the PID file |
|
555 | 555 | return |
|
556 | 556 | if not os.path.exists(filename): |
|
557 | 557 | return |
|
558 | 558 | with open(filename) as f: |
|
559 | 559 | content = f.read().strip() |
|
560 | 560 | try: |
|
561 | 561 | pid_in_file = int(content) |
|
562 | 562 | except ValueError: |
|
563 | 563 | pass |
|
564 | 564 | else: |
|
565 | 565 | if pid_in_file != current_pid: |
|
566 | 566 | msg = "PID file %s contains %s, not expected PID %s" |
|
567 | 567 | self.out(msg % (filename, pid_in_file, current_pid)) |
|
568 | 568 | return |
|
569 | 569 | if verbosity > 0: |
|
570 | 570 | self.out("Removing PID file %s" % filename) |
|
571 | 571 | try: |
|
572 | 572 | os.unlink(filename) |
|
573 | 573 | return |
|
574 | 574 | except OSError as e: |
|
575 | 575 | # Record, but don't give traceback |
|
576 | 576 | self.out("Cannot remove PID file: (%s)" % e) |
|
577 | 577 | # well, at least lets not leave the invalid PID around... |
|
578 | 578 | try: |
|
579 | 579 | with open(filename, 'w') as f: |
|
580 | 580 | f.write('') |
|
581 | 581 | except OSError as e: |
|
582 | 582 | self.out('Stale PID left in file: %s (%s)' % (filename, e)) |
|
583 | 583 | else: |
|
584 | 584 | self.out('Stale PID removed') |
|
585 | 585 | |
|
586 | 586 | def record_pid(self, pid_file): |
|
587 | 587 | pid = os.getpid() |
|
588 | 588 | if self.options.verbose > 1: |
|
589 | 589 | self.out('Writing PID %s to %s' % (pid, pid_file)) |
|
590 | 590 | with open(pid_file, 'w') as f: |
|
591 | 591 | f.write(str(pid)) |
|
592 | 592 | atexit.register(self._remove_pid_file, pid, pid_file, self.options.verbose) |
|
593 | 593 | |
|
594 | 594 | def stop_daemon(self): # pragma: no cover |
|
595 | 595 | pid_file = self.options.pid_file or 'pyramid.pid' |
|
596 | 596 | if not os.path.exists(pid_file): |
|
597 | 597 | self.out('No PID file exists in %s' % pid_file) |
|
598 | 598 | return 1 |
|
599 | 599 | pid = read_pidfile(pid_file) |
|
600 | 600 | if not pid: |
|
601 | 601 | self.out("Not a valid PID file in %s" % pid_file) |
|
602 | 602 | return 1 |
|
603 | 603 | pid = live_pidfile(pid_file) |
|
604 | 604 | if not pid: |
|
605 | 605 | self.out("PID in %s is not valid (deleting)" % pid_file) |
|
606 | 606 | try: |
|
607 | 607 | os.unlink(pid_file) |
|
608 | 608 | except (OSError, IOError) as e: |
|
609 | 609 | self.out("Could not delete: %s" % e) |
|
610 | 610 | return 2 |
|
611 | 611 | return 1 |
|
612 | 612 | for j in range(10): |
|
613 | 613 | if not live_pidfile(pid_file): |
|
614 | 614 | break |
|
615 | 615 | import signal |
|
616 | 616 | kill(pid, signal.SIGTERM) |
|
617 | 617 | time.sleep(1) |
|
618 | 618 | else: |
|
619 | 619 | self.out("failed to kill web process %s" % pid) |
|
620 | 620 | return 3 |
|
621 | 621 | if os.path.exists(pid_file): |
|
622 | 622 | os.unlink(pid_file) |
|
623 | 623 | return 0 |
|
624 | 624 | |
|
625 | 625 | def show_status(self): # pragma: no cover |
|
626 | 626 | pid_file = self.options.pid_file or 'pyramid.pid' |
|
627 | 627 | if not os.path.exists(pid_file): |
|
628 | 628 | self.out('No PID file %s' % pid_file) |
|
629 | 629 | return 1 |
|
630 | 630 | pid = read_pidfile(pid_file) |
|
631 | 631 | if not pid: |
|
632 | 632 | self.out('No PID in file %s' % pid_file) |
|
633 | 633 | return 1 |
|
634 | 634 | pid = live_pidfile(pid_file) |
|
635 | 635 | if not pid: |
|
636 | 636 | self.out('PID %s in %s is not running' % (pid, pid_file)) |
|
637 | 637 | return 1 |
|
638 | 638 | self.out('Server running in PID %s' % pid) |
|
639 | 639 | return 0 |
|
640 | 640 | |
|
641 | 641 | def restart_with_reloader(self): # pragma: no cover |
|
642 | 642 | self.restart_with_monitor(reloader=True) |
|
643 | 643 | |
|
644 | 644 | def restart_with_monitor(self, reloader=False): # pragma: no cover |
|
645 | 645 | if self.options.verbose > 0: |
|
646 | 646 | if reloader: |
|
647 | 647 | self.out('Starting subprocess with file monitor') |
|
648 | 648 | else: |
|
649 | 649 | self.out('Starting subprocess with monitor parent') |
|
650 | 650 | while 1: |
|
651 | 651 | args = [self.quote_first_command_arg(sys.executable)] + sys.argv |
|
652 | 652 | new_environ = os.environ.copy() |
|
653 | 653 | if reloader: |
|
654 | 654 | new_environ[self._reloader_environ_key] = 'true' |
|
655 | 655 | else: |
|
656 | 656 | new_environ[self._monitor_environ_key] = 'true' |
|
657 | 657 | proc = None |
|
658 | 658 | try: |
|
659 | 659 | try: |
|
660 | 660 | _turn_sigterm_into_systemexit() |
|
661 | 661 | proc = subprocess32.Popen(args, env=new_environ) |
|
662 | 662 | exit_code = proc.wait() |
|
663 | 663 | proc = None |
|
664 | 664 | except KeyboardInterrupt: |
|
665 | 665 | self.out('^C caught in monitor process') |
|
666 | 666 | if self.options.verbose > 1: |
|
667 | 667 | raise |
|
668 | 668 | return 1 |
|
669 | 669 | finally: |
|
670 | 670 | if proc is not None: |
|
671 | 671 | import signal |
|
672 | 672 | try: |
|
673 | 673 | kill(proc.pid, signal.SIGTERM) |
|
674 | 674 | except (OSError, IOError): |
|
675 | 675 | pass |
|
676 | 676 | |
|
677 | 677 | if reloader: |
|
678 | 678 | # Reloader always exits with code 3; but if we are |
|
679 | 679 | # a monitor, any exit code will restart |
|
680 | 680 | if exit_code != 3: |
|
681 | 681 | return exit_code |
|
682 | 682 | if self.options.verbose > 0: |
|
683 | 683 | self.out('%s %s %s' % ('-' * 20, 'Restarting', '-' * 20)) |
|
684 | 684 | |
|
685 | 685 | def change_user_group(self, user, group): # pragma: no cover |
|
686 | 686 | if not user and not group: |
|
687 | 687 | return |
|
688 | 688 | import pwd |
|
689 | 689 | import grp |
|
690 | 690 | uid = gid = None |
|
691 | 691 | if group: |
|
692 | 692 | try: |
|
693 | 693 | gid = int(group) |
|
694 | 694 | group = grp.getgrgid(gid).gr_name |
|
695 | 695 | except ValueError: |
|
696 | 696 | try: |
|
697 | 697 | entry = grp.getgrnam(group) |
|
698 | 698 | except KeyError: |
|
699 | 699 | raise ValueError( |
|
700 | 700 | "Bad group: %r; no such group exists" % group) |
|
701 | 701 | gid = entry.gr_gid |
|
702 | 702 | try: |
|
703 | 703 | uid = int(user) |
|
704 | 704 | user = pwd.getpwuid(uid).pw_name |
|
705 | 705 | except ValueError: |
|
706 | 706 | try: |
|
707 | 707 | entry = pwd.getpwnam(user) |
|
708 | 708 | except KeyError: |
|
709 | 709 | raise ValueError( |
|
710 | 710 | "Bad username: %r; no such user exists" % user) |
|
711 | 711 | if not gid: |
|
712 | 712 | gid = entry.pw_gid |
|
713 | 713 | uid = entry.pw_uid |
|
714 | 714 | if self.options.verbose > 0: |
|
715 | 715 | self.out('Changing user to %s:%s (%s:%s)' % ( |
|
716 | 716 | user, group or '(unknown)', uid, gid)) |
|
717 | 717 | if gid: |
|
718 | 718 | os.setgid(gid) |
|
719 | 719 | if uid: |
|
720 | 720 | os.setuid(uid) |
|
721 | 721 | |
|
722 | 722 | |
|
723 | 723 | class LazyWriter(object): |
|
724 | 724 | |
|
725 | 725 | """ |
|
726 | 726 | File-like object that opens a file lazily when it is first written |
|
727 | 727 | to. |
|
728 | 728 | """ |
|
729 | 729 | |
|
730 | 730 | def __init__(self, filename, mode='w'): |
|
731 | 731 | self.filename = filename |
|
732 | 732 | self.fileobj = None |
|
733 | 733 | self.lock = threading.Lock() |
|
734 | 734 | self.mode = mode |
|
735 | 735 | |
|
736 | 736 | def open(self): |
|
737 | 737 | if self.fileobj is None: |
|
738 | 738 | with self.lock: |
|
739 | 739 | self.fileobj = open(self.filename, self.mode) |
|
740 | 740 | return self.fileobj |
|
741 | 741 | |
|
742 | 742 | def close(self): |
|
743 | 743 | fileobj = self.fileobj |
|
744 | 744 | if fileobj is not None: |
|
745 | 745 | fileobj.close() |
|
746 | 746 | |
|
747 | 747 | def __del__(self): |
|
748 | 748 | self.close() |
|
749 | 749 | |
|
750 | 750 | def write(self, text): |
|
751 | 751 | fileobj = self.open() |
|
752 | 752 | fileobj.write(text) |
|
753 | 753 | fileobj.flush() |
|
754 | 754 | |
|
755 | 755 | def writelines(self, text): |
|
756 | 756 | fileobj = self.open() |
|
757 | 757 | fileobj.writelines(text) |
|
758 | 758 | fileobj.flush() |
|
759 | 759 | |
|
760 | 760 | def flush(self): |
|
761 | 761 | self.open().flush() |
|
762 | 762 | |
|
763 | 763 | |
|
764 | 764 | def live_pidfile(pidfile): # pragma: no cover |
|
765 | 765 | """ |
|
766 | 766 | (pidfile:str) -> int | None |
|
767 | 767 | Returns an int found in the named file, if there is one, |
|
768 | 768 | and if there is a running process with that process id. |
|
769 | 769 | Return None if no such process exists. |
|
770 | 770 | """ |
|
771 | 771 | pid = read_pidfile(pidfile) |
|
772 | 772 | if pid: |
|
773 | 773 | try: |
|
774 | 774 | kill(int(pid), 0) |
|
775 | 775 | return pid |
|
776 | 776 | except OSError as e: |
|
777 | 777 | if e.errno == errno.EPERM: |
|
778 | 778 | return pid |
|
779 | 779 | return None |
|
780 | 780 | |
|
781 | 781 | |
|
782 | 782 | def read_pidfile(filename): |
|
783 | 783 | if os.path.exists(filename): |
|
784 | 784 | try: |
|
785 | 785 | with open(filename) as f: |
|
786 | 786 | content = f.read() |
|
787 | 787 | return int(content.strip()) |
|
788 | 788 | except (ValueError, IOError): |
|
789 | 789 | return None |
|
790 | 790 | else: |
|
791 | 791 | return None |
|
792 | 792 | |
|
793 | 793 | |
|
794 | 794 | def ensure_port_cleanup( |
|
795 | 795 | bound_addresses, maxtries=30, sleeptime=2): # pragma: no cover |
|
796 | 796 | """ |
|
797 | 797 | This makes sure any open ports are closed. |
|
798 | 798 | |
|
799 | 799 | Does this by connecting to them until they give connection |
|
800 | 800 | refused. Servers should call like:: |
|
801 | 801 | |
|
802 | 802 | ensure_port_cleanup([80, 443]) |
|
803 | 803 | """ |
|
804 | 804 | atexit.register(_cleanup_ports, bound_addresses, maxtries=maxtries, |
|
805 | 805 | sleeptime=sleeptime) |
|
806 | 806 | |
|
807 | 807 | |
|
808 | 808 | def _cleanup_ports( |
|
809 | 809 | bound_addresses, maxtries=30, sleeptime=2): # pragma: no cover |
|
810 | 810 | # Wait for the server to bind to the port. |
|
811 | 811 | import socket |
|
812 | 812 | import errno |
|
813 | 813 | for bound_address in bound_addresses: |
|
814 | 814 | for attempt in range(maxtries): |
|
815 | 815 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) |
|
816 | 816 | try: |
|
817 | 817 | sock.connect(bound_address) |
|
818 | 818 | except socket.error as e: |
|
819 | 819 | if e.args[0] != errno.ECONNREFUSED: |
|
820 | 820 | raise |
|
821 | 821 | break |
|
822 | 822 | else: |
|
823 | 823 | time.sleep(sleeptime) |
|
824 | 824 | else: |
|
825 | 825 | raise SystemExit('Timeout waiting for port.') |
|
826 | 826 | sock.close() |
|
827 | 827 | |
|
828 | 828 | |
|
829 | 829 | def _turn_sigterm_into_systemexit(): # pragma: no cover |
|
830 | 830 | """ |
|
831 | 831 | Attempts to turn a SIGTERM exception into a SystemExit exception. |
|
832 | 832 | """ |
|
833 | 833 | try: |
|
834 | 834 | import signal |
|
835 | 835 | except ImportError: |
|
836 | 836 | return |
|
837 | 837 | def handle_term(signo, frame): |
|
838 | 838 | raise SystemExit |
|
839 | 839 | signal.signal(signal.SIGTERM, handle_term) |
|
840 | 840 | |
|
841 | 841 | |
|
842 | 842 | def install_reloader(poll_interval=1, extra_files=None): # pragma: no cover |
|
843 | 843 | """ |
|
844 | 844 | Install the reloading monitor. |
|
845 | 845 | |
|
846 | 846 | On some platforms server threads may not terminate when the main |
|
847 | 847 | thread does, causing ports to remain open/locked. The |
|
848 | 848 | ``raise_keyboard_interrupt`` option creates a unignorable signal |
|
849 | 849 | which causes the whole application to shut-down (rudely). |
|
850 | 850 | """ |
|
851 | 851 | mon = Monitor(poll_interval=poll_interval) |
|
852 | 852 | if extra_files is None: |
|
853 | 853 | extra_files = [] |
|
854 | 854 | mon.extra_files.extend(extra_files) |
|
855 | 855 | t = threading.Thread(target=mon.periodic_reload) |
|
856 | 856 | t.setDaemon(True) |
|
857 | 857 | t.start() |
|
858 | 858 | |
|
859 | 859 | |
|
860 | 860 | class classinstancemethod(object): |
|
861 | 861 | """ |
|
862 | 862 | Acts like a class method when called from a class, like an |
|
863 | 863 | instance method when called by an instance. The method should |
|
864 | 864 | take two arguments, 'self' and 'cls'; one of these will be None |
|
865 | 865 | depending on how the method was called. |
|
866 | 866 | """ |
|
867 | 867 | |
|
868 | 868 | def __init__(self, func): |
|
869 | 869 | self.func = func |
|
870 | 870 | self.__doc__ = func.__doc__ |
|
871 | 871 | |
|
872 | 872 | def __get__(self, obj, type=None): |
|
873 | 873 | return _methodwrapper(self.func, obj=obj, type=type) |
|
874 | 874 | |
|
875 | 875 | |
|
876 | 876 | class _methodwrapper(object): |
|
877 | 877 | |
|
878 | 878 | def __init__(self, func, obj, type): |
|
879 | 879 | self.func = func |
|
880 | 880 | self.obj = obj |
|
881 | 881 | self.type = type |
|
882 | 882 | |
|
883 | 883 | def __call__(self, *args, **kw): |
|
884 | 884 | assert not 'self' in kw and not 'cls' in kw, ( |
|
885 | 885 | "You cannot use 'self' or 'cls' arguments to a " |
|
886 | 886 | "classinstancemethod") |
|
887 | 887 | return self.func(*((self.obj, self.type) + args), **kw) |
|
888 | 888 | |
|
889 | 889 | |
|
890 | 890 | class Monitor(object): # pragma: no cover |
|
891 | 891 | """ |
|
892 | 892 | A file monitor and server restarter. |
|
893 | 893 | |
|
894 | 894 | Use this like: |
|
895 | 895 | |
|
896 | 896 | ..code-block:: Python |
|
897 | 897 | |
|
898 | 898 | install_reloader() |
|
899 | 899 | |
|
900 | 900 | Then make sure your server is installed with a shell script like:: |
|
901 | 901 | |
|
902 | 902 | err=3 |
|
903 | 903 | while test "$err" -eq 3 ; do |
|
904 | 904 | python server.py |
|
905 | 905 | err="$?" |
|
906 | 906 | done |
|
907 | 907 | |
|
908 | 908 | or is run from this .bat file (if you use Windows):: |
|
909 | 909 | |
|
910 | 910 | @echo off |
|
911 | 911 | :repeat |
|
912 | 912 | python server.py |
|
913 | 913 | if %errorlevel% == 3 goto repeat |
|
914 | 914 | |
|
915 | 915 | or run a monitoring process in Python (``pserve --reload`` does |
|
916 | 916 | this). |
|
917 | 917 | |
|
918 | 918 | Use the ``watch_file(filename)`` function to cause a reload/restart for |
|
919 | 919 | other non-Python files (e.g., configuration files). If you have |
|
920 | 920 | a dynamic set of files that grows over time you can use something like:: |
|
921 | 921 | |
|
922 | 922 | def watch_config_files(): |
|
923 | 923 | return CONFIG_FILE_CACHE.keys() |
|
924 | 924 | add_file_callback(watch_config_files) |
|
925 | 925 | |
|
926 | 926 | Then every time the reloader polls files it will call |
|
927 | 927 | ``watch_config_files`` and check all the filenames it returns. |
|
928 | 928 | """ |
|
929 | 929 | instances = [] |
|
930 | 930 | global_extra_files = [] |
|
931 | 931 | global_file_callbacks = [] |
|
932 | 932 | |
|
933 | 933 | def __init__(self, poll_interval): |
|
934 | 934 | self.module_mtimes = {} |
|
935 | 935 | self.keep_running = True |
|
936 | 936 | self.poll_interval = poll_interval |
|
937 | 937 | self.extra_files = list(self.global_extra_files) |
|
938 | 938 | self.instances.append(self) |
|
939 | 939 | self.file_callbacks = list(self.global_file_callbacks) |
|
940 | 940 | |
|
941 | 941 | def _exit(self): |
|
942 | 942 | # use os._exit() here and not sys.exit() since within a |
|
943 | 943 | # thread sys.exit() just closes the given thread and |
|
944 | 944 | # won't kill the process; note os._exit does not call |
|
945 | 945 | # any atexit callbacks, nor does it do finally blocks, |
|
946 | 946 | # flush open files, etc. In otherwords, it is rude. |
|
947 | 947 | os._exit(3) |
|
948 | 948 | |
|
949 | 949 | def periodic_reload(self): |
|
950 | 950 | while True: |
|
951 | 951 | if not self.check_reload(): |
|
952 | 952 | self._exit() |
|
953 | 953 | break |
|
954 | 954 | time.sleep(self.poll_interval) |
|
955 | 955 | |
|
956 | 956 | def check_reload(self): |
|
957 | 957 | filenames = list(self.extra_files) |
|
958 | 958 | for file_callback in self.file_callbacks: |
|
959 | 959 | try: |
|
960 | 960 | filenames.extend(file_callback()) |
|
961 | 961 | except: |
|
962 | 962 | print( |
|
963 | 963 | "Error calling reloader callback %r:" % file_callback) |
|
964 | 964 | traceback.print_exc() |
|
965 | 965 | for module in list(sys.modules.values()): |
|
966 | 966 | try: |
|
967 | 967 | filename = module.__file__ |
|
968 | 968 | except (AttributeError, ImportError): |
|
969 | 969 | continue |
|
970 | 970 | if filename is not None: |
|
971 | 971 | filenames.append(filename) |
|
972 | 972 | |
|
973 | 973 | for filename in filenames: |
|
974 | 974 | try: |
|
975 | 975 | stat = os.stat(filename) |
|
976 | 976 | if stat: |
|
977 | 977 | mtime = stat.st_mtime |
|
978 | 978 | else: |
|
979 | 979 | mtime = 0 |
|
980 | 980 | except (OSError, IOError): |
|
981 | 981 | continue |
|
982 | 982 | if filename.endswith('.pyc') and os.path.exists(filename[:-1]): |
|
983 | 983 | mtime = max(os.stat(filename[:-1]).st_mtime, mtime) |
|
984 | 984 | if not filename in self.module_mtimes: |
|
985 | 985 | self.module_mtimes[filename] = mtime |
|
986 | 986 | elif self.module_mtimes[filename] < mtime: |
|
987 | 987 | print("%s changed; reloading..." % filename) |
|
988 | 988 | run_callback_for_pattern(filename) |
|
989 | 989 | return False |
|
990 | 990 | return True |
|
991 | 991 | |
|
992 | 992 | def watch_file(self, cls, filename): |
|
993 | 993 | """Watch the named file for changes""" |
|
994 | 994 | filename = os.path.abspath(filename) |
|
995 | 995 | if self is None: |
|
996 | 996 | for instance in cls.instances: |
|
997 | 997 | instance.watch_file(filename) |
|
998 | 998 | cls.global_extra_files.append(filename) |
|
999 | 999 | else: |
|
1000 | 1000 | self.extra_files.append(filename) |
|
1001 | 1001 | |
|
1002 | 1002 | watch_file = classinstancemethod(watch_file) |
|
1003 | 1003 | |
|
1004 | 1004 | def add_file_callback(self, cls, callback): |
|
1005 | 1005 | """Add a callback -- a function that takes no parameters -- that will |
|
1006 | 1006 | return a list of filenames to watch for changes.""" |
|
1007 | 1007 | if self is None: |
|
1008 | 1008 | for instance in cls.instances: |
|
1009 | 1009 | instance.add_file_callback(callback) |
|
1010 | 1010 | cls.global_file_callbacks.append(callback) |
|
1011 | 1011 | else: |
|
1012 | 1012 | self.file_callbacks.append(callback) |
|
1013 | 1013 | |
|
1014 | 1014 | add_file_callback = classinstancemethod(add_file_callback) |
|
1015 | 1015 | |
|
1016 | 1016 | watch_file = Monitor.watch_file |
|
1017 | 1017 | add_file_callback = Monitor.add_file_callback |
|
1018 | 1018 | |
|
1019 | 1019 | |
|
1020 | 1020 | def main(argv=sys.argv, quiet=False): |
|
1021 | 1021 | command = RcServerCommand(argv, quiet=quiet) |
|
1022 | 1022 | return command.run() |
|
1023 | 1023 | |
|
1024 | 1024 | if __name__ == '__main__': # pragma: no cover |
|
1025 | 1025 | sys.exit(main() or 0) |
@@ -1,349 +1,349 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2010-2018 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | """ |
|
22 | 22 | Helpers for fixture generation |
|
23 | 23 | """ |
|
24 | 24 | |
|
25 | 25 | import os |
|
26 | 26 | import time |
|
27 | 27 | import tempfile |
|
28 | 28 | import shutil |
|
29 | 29 | |
|
30 | 30 | import configobj |
|
31 | 31 | |
|
32 | 32 | from rhodecode.tests import * |
|
33 | 33 | from rhodecode.model.db import Repository, User, RepoGroup, UserGroup, Gist, UserEmailMap |
|
34 | 34 | from rhodecode.model.meta import Session |
|
35 | 35 | from rhodecode.model.repo import RepoModel |
|
36 | 36 | from rhodecode.model.user import UserModel |
|
37 | 37 | from rhodecode.model.repo_group import RepoGroupModel |
|
38 | 38 | from rhodecode.model.user_group import UserGroupModel |
|
39 | 39 | from rhodecode.model.gist import GistModel |
|
40 | 40 | from rhodecode.model.auth_token import AuthTokenModel |
|
41 | 41 | |
|
42 | 42 | dn = os.path.dirname |
|
43 | 43 | FIXTURES = os.path.join(dn(dn(os.path.abspath(__file__))), 'tests', 'fixtures') |
|
44 | 44 | |
|
45 | 45 | |
|
46 | 46 | def error_function(*args, **kwargs): |
|
47 | 47 | raise Exception('Total Crash !') |
|
48 | 48 | |
|
49 | 49 | |
|
50 | 50 | class TestINI(object): |
|
51 | 51 | """ |
|
52 | 52 | Allows to create a new test.ini file as a copy of existing one with edited |
|
53 | 53 | data. Example usage:: |
|
54 | 54 | |
|
55 | 55 | with TestINI('test.ini', [{'section':{'key':val'}]) as new_test_ini_path: |
|
56 |
print |
|
|
56 | print('paster server %s' % new_test_ini) | |
|
57 | 57 | """ |
|
58 | 58 | |
|
59 | 59 | def __init__(self, ini_file_path, ini_params, new_file_prefix='DEFAULT', |
|
60 | 60 | destroy=True, dir=None): |
|
61 | 61 | self.ini_file_path = ini_file_path |
|
62 | 62 | self.ini_params = ini_params |
|
63 | 63 | self.new_path = None |
|
64 | 64 | self.new_path_prefix = new_file_prefix |
|
65 | 65 | self._destroy = destroy |
|
66 | 66 | self._dir = dir |
|
67 | 67 | |
|
68 | 68 | def __enter__(self): |
|
69 | 69 | return self.create() |
|
70 | 70 | |
|
71 | 71 | def __exit__(self, exc_type, exc_val, exc_tb): |
|
72 | 72 | self.destroy() |
|
73 | 73 | |
|
74 | 74 | def create(self): |
|
75 | 75 | config = configobj.ConfigObj( |
|
76 | 76 | self.ini_file_path, file_error=True, write_empty_values=True) |
|
77 | 77 | |
|
78 | 78 | for data in self.ini_params: |
|
79 | 79 | section, ini_params = data.items()[0] |
|
80 | 80 | for key, val in ini_params.items(): |
|
81 | 81 | config[section][key] = val |
|
82 | 82 | with tempfile.NamedTemporaryFile( |
|
83 | 83 | prefix=self.new_path_prefix, suffix='.ini', dir=self._dir, |
|
84 | 84 | delete=False) as new_ini_file: |
|
85 | 85 | config.write(new_ini_file) |
|
86 | 86 | self.new_path = new_ini_file.name |
|
87 | 87 | |
|
88 | 88 | return self.new_path |
|
89 | 89 | |
|
90 | 90 | def destroy(self): |
|
91 | 91 | if self._destroy: |
|
92 | 92 | os.remove(self.new_path) |
|
93 | 93 | |
|
94 | 94 | |
|
95 | 95 | class Fixture(object): |
|
96 | 96 | |
|
97 | 97 | def anon_access(self, status): |
|
98 | 98 | """ |
|
99 | 99 | Context process for disabling anonymous access. use like: |
|
100 | 100 | fixture = Fixture() |
|
101 | 101 | with fixture.anon_access(False): |
|
102 | 102 | #tests |
|
103 | 103 | |
|
104 | 104 | after this block anon access will be set to `not status` |
|
105 | 105 | """ |
|
106 | 106 | |
|
107 | 107 | class context(object): |
|
108 | 108 | def __enter__(self): |
|
109 | 109 | anon = User.get_default_user() |
|
110 | 110 | anon.active = status |
|
111 | 111 | Session().add(anon) |
|
112 | 112 | Session().commit() |
|
113 | 113 | time.sleep(1.5) # must sleep for cache (1s to expire) |
|
114 | 114 | |
|
115 | 115 | def __exit__(self, exc_type, exc_val, exc_tb): |
|
116 | 116 | anon = User.get_default_user() |
|
117 | 117 | anon.active = not status |
|
118 | 118 | Session().add(anon) |
|
119 | 119 | Session().commit() |
|
120 | 120 | |
|
121 | 121 | return context() |
|
122 | 122 | |
|
123 | 123 | def _get_repo_create_params(self, **custom): |
|
124 | 124 | defs = { |
|
125 | 125 | 'repo_name': None, |
|
126 | 126 | 'repo_type': 'hg', |
|
127 | 127 | 'clone_uri': '', |
|
128 | 128 | 'push_uri': '', |
|
129 | 129 | 'repo_group': '-1', |
|
130 | 130 | 'repo_description': 'DESC', |
|
131 | 131 | 'repo_private': False, |
|
132 | 132 | 'repo_landing_rev': 'rev:tip', |
|
133 | 133 | 'repo_copy_permissions': False, |
|
134 | 134 | 'repo_state': Repository.STATE_CREATED, |
|
135 | 135 | } |
|
136 | 136 | defs.update(custom) |
|
137 | 137 | if 'repo_name_full' not in custom: |
|
138 | 138 | defs.update({'repo_name_full': defs['repo_name']}) |
|
139 | 139 | |
|
140 | 140 | # fix the repo name if passed as repo_name_full |
|
141 | 141 | if defs['repo_name']: |
|
142 | 142 | defs['repo_name'] = defs['repo_name'].split('/')[-1] |
|
143 | 143 | |
|
144 | 144 | return defs |
|
145 | 145 | |
|
146 | 146 | def _get_group_create_params(self, **custom): |
|
147 | 147 | defs = { |
|
148 | 148 | 'group_name': None, |
|
149 | 149 | 'group_description': 'DESC', |
|
150 | 150 | 'perm_updates': [], |
|
151 | 151 | 'perm_additions': [], |
|
152 | 152 | 'perm_deletions': [], |
|
153 | 153 | 'group_parent_id': -1, |
|
154 | 154 | 'enable_locking': False, |
|
155 | 155 | 'recursive': False, |
|
156 | 156 | } |
|
157 | 157 | defs.update(custom) |
|
158 | 158 | |
|
159 | 159 | return defs |
|
160 | 160 | |
|
161 | 161 | def _get_user_create_params(self, name, **custom): |
|
162 | 162 | defs = { |
|
163 | 163 | 'username': name, |
|
164 | 164 | 'password': 'qweqwe', |
|
165 | 165 | 'email': '%s+test@rhodecode.org' % name, |
|
166 | 166 | 'firstname': 'TestUser', |
|
167 | 167 | 'lastname': 'Test', |
|
168 | 168 | 'active': True, |
|
169 | 169 | 'admin': False, |
|
170 | 170 | 'extern_type': 'rhodecode', |
|
171 | 171 | 'extern_name': None, |
|
172 | 172 | } |
|
173 | 173 | defs.update(custom) |
|
174 | 174 | |
|
175 | 175 | return defs |
|
176 | 176 | |
|
177 | 177 | def _get_user_group_create_params(self, name, **custom): |
|
178 | 178 | defs = { |
|
179 | 179 | 'users_group_name': name, |
|
180 | 180 | 'user_group_description': 'DESC', |
|
181 | 181 | 'users_group_active': True, |
|
182 | 182 | 'user_group_data': {}, |
|
183 | 183 | } |
|
184 | 184 | defs.update(custom) |
|
185 | 185 | |
|
186 | 186 | return defs |
|
187 | 187 | |
|
188 | 188 | def create_repo(self, name, **kwargs): |
|
189 | 189 | repo_group = kwargs.get('repo_group') |
|
190 | 190 | if isinstance(repo_group, RepoGroup): |
|
191 | 191 | kwargs['repo_group'] = repo_group.group_id |
|
192 | 192 | name = name.split(Repository.NAME_SEP)[-1] |
|
193 | 193 | name = Repository.NAME_SEP.join((repo_group.group_name, name)) |
|
194 | 194 | |
|
195 | 195 | if 'skip_if_exists' in kwargs: |
|
196 | 196 | del kwargs['skip_if_exists'] |
|
197 | 197 | r = Repository.get_by_repo_name(name) |
|
198 | 198 | if r: |
|
199 | 199 | return r |
|
200 | 200 | |
|
201 | 201 | form_data = self._get_repo_create_params(repo_name=name, **kwargs) |
|
202 | 202 | cur_user = kwargs.get('cur_user', TEST_USER_ADMIN_LOGIN) |
|
203 | 203 | RepoModel().create(form_data, cur_user) |
|
204 | 204 | Session().commit() |
|
205 | 205 | repo = Repository.get_by_repo_name(name) |
|
206 | 206 | assert repo |
|
207 | 207 | return repo |
|
208 | 208 | |
|
209 | 209 | def create_fork(self, repo_to_fork, fork_name, **kwargs): |
|
210 | 210 | repo_to_fork = Repository.get_by_repo_name(repo_to_fork) |
|
211 | 211 | |
|
212 | 212 | form_data = self._get_repo_create_params(repo_name=fork_name, |
|
213 | 213 | fork_parent_id=repo_to_fork.repo_id, |
|
214 | 214 | repo_type=repo_to_fork.repo_type, |
|
215 | 215 | **kwargs) |
|
216 | 216 | #TODO: fix it !! |
|
217 | 217 | form_data['description'] = form_data['repo_description'] |
|
218 | 218 | form_data['private'] = form_data['repo_private'] |
|
219 | 219 | form_data['landing_rev'] = form_data['repo_landing_rev'] |
|
220 | 220 | |
|
221 | 221 | owner = kwargs.get('cur_user', TEST_USER_ADMIN_LOGIN) |
|
222 | 222 | RepoModel().create_fork(form_data, cur_user=owner) |
|
223 | 223 | Session().commit() |
|
224 | 224 | r = Repository.get_by_repo_name(fork_name) |
|
225 | 225 | assert r |
|
226 | 226 | return r |
|
227 | 227 | |
|
228 | 228 | def destroy_repo(self, repo_name, **kwargs): |
|
229 | 229 | RepoModel().delete(repo_name, **kwargs) |
|
230 | 230 | Session().commit() |
|
231 | 231 | |
|
232 | 232 | def destroy_repo_on_filesystem(self, repo_name): |
|
233 | 233 | rm_path = os.path.join(RepoModel().repos_path, repo_name) |
|
234 | 234 | if os.path.isdir(rm_path): |
|
235 | 235 | shutil.rmtree(rm_path) |
|
236 | 236 | |
|
237 | 237 | def create_repo_group(self, name, **kwargs): |
|
238 | 238 | if 'skip_if_exists' in kwargs: |
|
239 | 239 | del kwargs['skip_if_exists'] |
|
240 | 240 | gr = RepoGroup.get_by_group_name(group_name=name) |
|
241 | 241 | if gr: |
|
242 | 242 | return gr |
|
243 | 243 | form_data = self._get_group_create_params(group_name=name, **kwargs) |
|
244 | 244 | owner = kwargs.get('cur_user', TEST_USER_ADMIN_LOGIN) |
|
245 | 245 | gr = RepoGroupModel().create( |
|
246 | 246 | group_name=form_data['group_name'], |
|
247 | 247 | group_description=form_data['group_name'], |
|
248 | 248 | owner=owner) |
|
249 | 249 | Session().commit() |
|
250 | 250 | gr = RepoGroup.get_by_group_name(gr.group_name) |
|
251 | 251 | return gr |
|
252 | 252 | |
|
253 | 253 | def destroy_repo_group(self, repogroupid): |
|
254 | 254 | RepoGroupModel().delete(repogroupid) |
|
255 | 255 | Session().commit() |
|
256 | 256 | |
|
257 | 257 | def create_user(self, name, **kwargs): |
|
258 | 258 | if 'skip_if_exists' in kwargs: |
|
259 | 259 | del kwargs['skip_if_exists'] |
|
260 | 260 | user = User.get_by_username(name) |
|
261 | 261 | if user: |
|
262 | 262 | return user |
|
263 | 263 | form_data = self._get_user_create_params(name, **kwargs) |
|
264 | 264 | user = UserModel().create(form_data) |
|
265 | 265 | |
|
266 | 266 | # create token for user |
|
267 | 267 | AuthTokenModel().create( |
|
268 | 268 | user=user, description=u'TEST_USER_TOKEN') |
|
269 | 269 | |
|
270 | 270 | Session().commit() |
|
271 | 271 | user = User.get_by_username(user.username) |
|
272 | 272 | return user |
|
273 | 273 | |
|
274 | 274 | def destroy_user(self, userid): |
|
275 | 275 | UserModel().delete(userid) |
|
276 | 276 | Session().commit() |
|
277 | 277 | |
|
278 | 278 | def create_additional_user_email(self, user, email): |
|
279 | 279 | uem = UserEmailMap() |
|
280 | 280 | uem.user = user |
|
281 | 281 | uem.email = email |
|
282 | 282 | Session().add(uem) |
|
283 | 283 | return uem |
|
284 | 284 | |
|
285 | 285 | def destroy_users(self, userid_iter): |
|
286 | 286 | for user_id in userid_iter: |
|
287 | 287 | if User.get_by_username(user_id): |
|
288 | 288 | UserModel().delete(user_id) |
|
289 | 289 | Session().commit() |
|
290 | 290 | |
|
291 | 291 | def create_user_group(self, name, **kwargs): |
|
292 | 292 | if 'skip_if_exists' in kwargs: |
|
293 | 293 | del kwargs['skip_if_exists'] |
|
294 | 294 | gr = UserGroup.get_by_group_name(group_name=name) |
|
295 | 295 | if gr: |
|
296 | 296 | return gr |
|
297 | 297 | # map active flag to the real attribute. For API consistency of fixtures |
|
298 | 298 | if 'active' in kwargs: |
|
299 | 299 | kwargs['users_group_active'] = kwargs['active'] |
|
300 | 300 | del kwargs['active'] |
|
301 | 301 | form_data = self._get_user_group_create_params(name, **kwargs) |
|
302 | 302 | owner = kwargs.get('cur_user', TEST_USER_ADMIN_LOGIN) |
|
303 | 303 | user_group = UserGroupModel().create( |
|
304 | 304 | name=form_data['users_group_name'], |
|
305 | 305 | description=form_data['user_group_description'], |
|
306 | 306 | owner=owner, active=form_data['users_group_active'], |
|
307 | 307 | group_data=form_data['user_group_data']) |
|
308 | 308 | Session().commit() |
|
309 | 309 | user_group = UserGroup.get_by_group_name(user_group.users_group_name) |
|
310 | 310 | return user_group |
|
311 | 311 | |
|
312 | 312 | def destroy_user_group(self, usergroupid): |
|
313 | 313 | UserGroupModel().delete(user_group=usergroupid, force=True) |
|
314 | 314 | Session().commit() |
|
315 | 315 | |
|
316 | 316 | def create_gist(self, **kwargs): |
|
317 | 317 | form_data = { |
|
318 | 318 | 'description': 'new-gist', |
|
319 | 319 | 'owner': TEST_USER_ADMIN_LOGIN, |
|
320 | 320 | 'gist_type': GistModel.cls.GIST_PUBLIC, |
|
321 | 321 | 'lifetime': -1, |
|
322 | 322 | 'acl_level': Gist.ACL_LEVEL_PUBLIC, |
|
323 | 323 | 'gist_mapping': {'filename1.txt': {'content': 'hello world'},} |
|
324 | 324 | } |
|
325 | 325 | form_data.update(kwargs) |
|
326 | 326 | gist = GistModel().create( |
|
327 | 327 | description=form_data['description'], owner=form_data['owner'], |
|
328 | 328 | gist_mapping=form_data['gist_mapping'], gist_type=form_data['gist_type'], |
|
329 | 329 | lifetime=form_data['lifetime'], gist_acl_level=form_data['acl_level'] |
|
330 | 330 | ) |
|
331 | 331 | Session().commit() |
|
332 | 332 | return gist |
|
333 | 333 | |
|
334 | 334 | def destroy_gists(self, gistid=None): |
|
335 | 335 | for g in GistModel.cls.get_all(): |
|
336 | 336 | if gistid: |
|
337 | 337 | if gistid == g.gist_access_id: |
|
338 | 338 | GistModel().delete(g) |
|
339 | 339 | else: |
|
340 | 340 | GistModel().delete(g) |
|
341 | 341 | Session().commit() |
|
342 | 342 | |
|
343 | 343 | def load_resource(self, resource_name, strip=False): |
|
344 | 344 | with open(os.path.join(FIXTURES, resource_name)) as f: |
|
345 | 345 | source = f.read() |
|
346 | 346 | if strip: |
|
347 | 347 | source = source.strip() |
|
348 | 348 | |
|
349 | 349 | return source |
@@ -1,201 +1,201 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2010-2018 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | from StringIO import StringIO |
|
22 | 22 | |
|
23 | 23 | import pytest |
|
24 | 24 | from mock import patch, Mock |
|
25 | 25 | |
|
26 | 26 | from rhodecode.lib.middleware.simplesvn import SimpleSvn, SimpleSvnApp |
|
27 | 27 | from rhodecode.lib.utils import get_rhodecode_base_path |
|
28 | 28 | |
|
29 | 29 | |
|
30 | 30 | class TestSimpleSvn(object): |
|
31 | 31 | @pytest.fixture(autouse=True) |
|
32 | 32 | def simple_svn(self, baseapp, request_stub): |
|
33 | 33 | base_path = get_rhodecode_base_path() |
|
34 | 34 | self.app = SimpleSvn( |
|
35 | 35 | config={'auth_ret_code': '', 'base_path': base_path}, |
|
36 | 36 | registry=request_stub.registry) |
|
37 | 37 | |
|
38 | 38 | def test_get_config(self): |
|
39 | 39 | extras = {'foo': 'FOO', 'bar': 'BAR'} |
|
40 | 40 | config = self.app._create_config(extras, repo_name='test-repo') |
|
41 | 41 | assert config == extras |
|
42 | 42 | |
|
43 | 43 | @pytest.mark.parametrize( |
|
44 | 44 | 'method', ['OPTIONS', 'PROPFIND', 'GET', 'REPORT']) |
|
45 | 45 | def test_get_action_returns_pull(self, method): |
|
46 | 46 | environment = {'REQUEST_METHOD': method} |
|
47 | 47 | action = self.app._get_action(environment) |
|
48 | 48 | assert action == 'pull' |
|
49 | 49 | |
|
50 | 50 | @pytest.mark.parametrize( |
|
51 | 51 | 'method', [ |
|
52 | 52 | 'MKACTIVITY', 'PROPPATCH', 'PUT', 'CHECKOUT', 'MKCOL', 'MOVE', |
|
53 | 53 | 'COPY', 'DELETE', 'LOCK', 'UNLOCK', 'MERGE' |
|
54 | 54 | ]) |
|
55 | 55 | def test_get_action_returns_push(self, method): |
|
56 | 56 | environment = {'REQUEST_METHOD': method} |
|
57 | 57 | action = self.app._get_action(environment) |
|
58 | 58 | assert action == 'push' |
|
59 | 59 | |
|
60 | 60 | @pytest.mark.parametrize( |
|
61 | 61 | 'path, expected_name', [ |
|
62 | 62 | ('/hello-svn', 'hello-svn'), |
|
63 | 63 | ('/hello-svn/', 'hello-svn'), |
|
64 | 64 | ('/group/hello-svn/', 'group/hello-svn'), |
|
65 | 65 | ('/group/hello-svn/!svn/vcc/default', 'group/hello-svn'), |
|
66 | 66 | ]) |
|
67 | 67 | def test_get_repository_name(self, path, expected_name): |
|
68 | 68 | environment = {'PATH_INFO': path} |
|
69 | 69 | name = self.app._get_repository_name(environment) |
|
70 | 70 | assert name == expected_name |
|
71 | 71 | |
|
72 | 72 | def test_get_repository_name_subfolder(self, backend_svn): |
|
73 | 73 | repo = backend_svn.repo |
|
74 | 74 | environment = { |
|
75 | 75 | 'PATH_INFO': '/{}/path/with/subfolders'.format(repo.repo_name)} |
|
76 | 76 | name = self.app._get_repository_name(environment) |
|
77 | 77 | assert name == repo.repo_name |
|
78 | 78 | |
|
79 | 79 | def test_create_wsgi_app(self): |
|
80 | 80 | with patch.object(SimpleSvn, '_is_svn_enabled') as mock_method: |
|
81 | 81 | mock_method.return_value = False |
|
82 | 82 | with patch('rhodecode.lib.middleware.simplesvn.DisabledSimpleSvnApp') as ( |
|
83 | 83 | wsgi_app_mock): |
|
84 | 84 | config = Mock() |
|
85 | 85 | wsgi_app = self.app._create_wsgi_app( |
|
86 | 86 | repo_path='', repo_name='', config=config) |
|
87 | 87 | |
|
88 | 88 | wsgi_app_mock.assert_called_once_with(config) |
|
89 | 89 | assert wsgi_app == wsgi_app_mock() |
|
90 | 90 | |
|
91 | 91 | def test_create_wsgi_app_when_enabled(self): |
|
92 | 92 | with patch.object(SimpleSvn, '_is_svn_enabled') as mock_method: |
|
93 | 93 | mock_method.return_value = True |
|
94 | 94 | with patch('rhodecode.lib.middleware.simplesvn.SimpleSvnApp') as ( |
|
95 | 95 | wsgi_app_mock): |
|
96 | 96 | config = Mock() |
|
97 | 97 | wsgi_app = self.app._create_wsgi_app( |
|
98 | 98 | repo_path='', repo_name='', config=config) |
|
99 | 99 | |
|
100 | 100 | wsgi_app_mock.assert_called_once_with(config) |
|
101 | 101 | assert wsgi_app == wsgi_app_mock() |
|
102 | 102 | |
|
103 | 103 | |
|
104 | 104 | class TestSimpleSvnApp(object): |
|
105 | 105 | data = '<xml></xml>' |
|
106 | 106 | path = '/group/my-repo' |
|
107 | 107 | wsgi_input = StringIO(data) |
|
108 | 108 | environment = { |
|
109 | 109 | 'HTTP_DAV': ( |
|
110 | 110 | 'http://subversion.tigris.org/xmlns/dav/svn/depth,' |
|
111 | 111 | ' http://subversion.tigris.org/xmlns/dav/svn/mergeinfo'), |
|
112 | 112 | 'HTTP_USER_AGENT': 'SVN/1.8.11 (x86_64-linux) serf/1.3.8', |
|
113 | 113 | 'REQUEST_METHOD': 'OPTIONS', |
|
114 | 114 | 'PATH_INFO': path, |
|
115 | 115 | 'wsgi.input': wsgi_input, |
|
116 | 116 | 'CONTENT_TYPE': 'text/xml', |
|
117 | 117 | 'CONTENT_LENGTH': '130' |
|
118 | 118 | } |
|
119 | 119 | |
|
120 | 120 | def setup_method(self, method): |
|
121 | 121 | self.host = 'http://localhost/' |
|
122 | 122 | base_path = get_rhodecode_base_path() |
|
123 | 123 | self.app = SimpleSvnApp( |
|
124 | 124 | config={'subversion_http_server_url': self.host, |
|
125 | 125 | 'base_path': base_path}) |
|
126 | 126 | |
|
127 | 127 | def test_get_request_headers_with_content_type(self): |
|
128 | 128 | expected_headers = { |
|
129 | 129 | 'Dav': self.environment['HTTP_DAV'], |
|
130 | 130 | 'User-Agent': self.environment['HTTP_USER_AGENT'], |
|
131 | 131 | 'Content-Type': self.environment['CONTENT_TYPE'], |
|
132 | 132 | 'Content-Length': self.environment['CONTENT_LENGTH'] |
|
133 | 133 | } |
|
134 | 134 | headers = self.app._get_request_headers(self.environment) |
|
135 | 135 | assert headers == expected_headers |
|
136 | 136 | |
|
137 | 137 | def test_get_request_headers_without_content_type(self): |
|
138 | 138 | environment = self.environment.copy() |
|
139 | 139 | environment.pop('CONTENT_TYPE') |
|
140 | 140 | expected_headers = { |
|
141 | 141 | 'Dav': environment['HTTP_DAV'], |
|
142 | 142 | 'Content-Length': self.environment['CONTENT_LENGTH'], |
|
143 | 143 | 'User-Agent': environment['HTTP_USER_AGENT'], |
|
144 | 144 | } |
|
145 | 145 | request_headers = self.app._get_request_headers(environment) |
|
146 | 146 | assert request_headers == expected_headers |
|
147 | 147 | |
|
148 | 148 | def test_get_response_headers(self): |
|
149 | 149 | headers = { |
|
150 | 150 | 'Connection': 'keep-alive', |
|
151 | 151 | 'Keep-Alive': 'timeout=5, max=100', |
|
152 | 152 | 'Transfer-Encoding': 'chunked', |
|
153 | 153 | 'Content-Encoding': 'gzip', |
|
154 | 154 | 'MS-Author-Via': 'DAV', |
|
155 | 155 | 'SVN-Supported-Posts': 'create-txn-with-props' |
|
156 | 156 | } |
|
157 | 157 | expected_headers = [ |
|
158 | 158 | ('MS-Author-Via', 'DAV'), |
|
159 | 159 | ('SVN-Supported-Posts', 'create-txn-with-props'), |
|
160 | 160 | ] |
|
161 | 161 | response_headers = self.app._get_response_headers(headers) |
|
162 | 162 | assert sorted(response_headers) == sorted(expected_headers) |
|
163 | 163 | |
|
164 | 164 | def test_get_url(self): |
|
165 | 165 | url = self.app._get_url(self.path) |
|
166 | 166 | expected_url = '{}{}'.format(self.host.strip('/'), self.path) |
|
167 | 167 | assert url == expected_url |
|
168 | 168 | |
|
169 | 169 | def test_call(self): |
|
170 | 170 | start_response = Mock() |
|
171 | 171 | response_mock = Mock() |
|
172 | 172 | response_mock.headers = { |
|
173 | 173 | 'Content-Encoding': 'gzip', |
|
174 | 174 | 'MS-Author-Via': 'DAV', |
|
175 | 175 | 'SVN-Supported-Posts': 'create-txn-with-props' |
|
176 | 176 | } |
|
177 | 177 | response_mock.status_code = 200 |
|
178 | 178 | response_mock.reason = 'OK' |
|
179 | 179 | with patch('rhodecode.lib.middleware.simplesvn.requests.request') as ( |
|
180 | 180 | request_mock): |
|
181 | 181 | request_mock.return_value = response_mock |
|
182 | 182 | self.app(self.environment, start_response) |
|
183 | 183 | |
|
184 | 184 | expected_url = '{}{}'.format(self.host.strip('/'), self.path) |
|
185 | 185 | expected_request_headers = { |
|
186 | 186 | 'Dav': self.environment['HTTP_DAV'], |
|
187 | 187 | 'User-Agent': self.environment['HTTP_USER_AGENT'], |
|
188 | 188 | 'Content-Type': self.environment['CONTENT_TYPE'], |
|
189 | 189 | 'Content-Length': self.environment['CONTENT_LENGTH'] |
|
190 | 190 | } |
|
191 | 191 | expected_response_headers = [ |
|
192 | 192 | ('SVN-Supported-Posts', 'create-txn-with-props'), |
|
193 | 193 | ('MS-Author-Via', 'DAV'), |
|
194 | 194 | ] |
|
195 | 195 | request_mock.assert_called_once_with( |
|
196 | 196 | self.environment['REQUEST_METHOD'], expected_url, |
|
197 | data=self.data, headers=expected_request_headers) | |
|
197 | data=self.data, headers=expected_request_headers, stream=False) | |
|
198 | 198 | response_mock.iter_content.assert_called_once_with(chunk_size=1024) |
|
199 | 199 | args, _ = start_response.call_args |
|
200 | 200 | assert args[0] == '200 OK' |
|
201 | 201 | assert sorted(args[1]) == sorted(expected_response_headers) |
@@ -1,311 +1,312 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2016-2018 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | import pytest |
|
22 | 22 | from pygments.lexers import get_lexer_by_name |
|
23 | 23 | |
|
24 | 24 | from rhodecode.tests import no_newline_id_generator |
|
25 | 25 | from rhodecode.lib.codeblocks import ( |
|
26 | 26 | tokenize_string, split_token_stream, rollup_tokenstream, |
|
27 | 27 | render_tokenstream) |
|
28 | 28 | |
|
29 | 29 | |
|
30 | 30 | class TestTokenizeString(object): |
|
31 | 31 | |
|
32 | 32 | python_code = ''' |
|
33 | 33 | import this |
|
34 | 34 | |
|
35 | 35 | var = 6 |
|
36 |
print |
|
|
36 | print("this") | |
|
37 | 37 | |
|
38 | 38 | ''' |
|
39 | 39 | |
|
40 | 40 | def test_tokenize_as_python(self): |
|
41 | 41 | lexer = get_lexer_by_name('python') |
|
42 | 42 | tokens = list(tokenize_string(self.python_code, lexer)) |
|
43 | 43 | |
|
44 | 44 | assert tokens == [ |
|
45 | 45 | ('', u'\n'), |
|
46 | 46 | ('', u' '), |
|
47 | 47 | ('kn', u'import'), |
|
48 | 48 | ('', u' '), |
|
49 | 49 | ('nn', u'this'), |
|
50 | 50 | ('', u'\n'), |
|
51 | 51 | ('', u'\n'), |
|
52 | 52 | ('', u' '), |
|
53 | 53 | ('n', u'var'), |
|
54 | 54 | ('', u' '), |
|
55 | 55 | ('o', u'='), |
|
56 | 56 | ('', u' '), |
|
57 | 57 | ('mi', u'6'), |
|
58 | 58 | ('', u'\n'), |
|
59 | 59 | ('', u' '), |
|
60 | 60 | ('k', u'print'), |
|
61 |
('', |
|
|
61 | ('p', u'('), | |
|
62 | 62 |
('s2', |
|
63 | 63 |
('s2', |
|
64 | 64 |
('s2', |
|
65 | ('p', u')'), | |
|
65 | 66 | ('', u'\n'), |
|
66 | 67 | ('', u'\n'), |
|
67 | 68 | ('', u' ') |
|
68 | 69 | ] |
|
69 | 70 | |
|
70 | 71 | def test_tokenize_as_text(self): |
|
71 | 72 | lexer = get_lexer_by_name('text') |
|
72 | 73 | tokens = list(tokenize_string(self.python_code, lexer)) |
|
73 | 74 | |
|
74 | 75 | assert tokens == [ |
|
75 | 76 | ('', |
|
76 |
u'\n import this\n\n var = 6\n print |
|
|
77 | u'\n import this\n\n var = 6\n print("this")\n\n ') | |
|
77 | 78 | ] |
|
78 | 79 | |
|
79 | 80 | |
|
80 | 81 | class TestSplitTokenStream(object): |
|
81 | 82 | |
|
82 | 83 | def test_split_token_stream(self): |
|
83 | 84 | lines = list(split_token_stream( |
|
84 | 85 | [('type1', 'some\ntext'), ('type2', 'more\n')])) |
|
85 | 86 | |
|
86 | 87 | assert lines == [ |
|
87 | 88 | [('type1', u'some')], |
|
88 | 89 | [('type1', u'text'), ('type2', u'more')], |
|
89 | 90 | [('type2', u'')], |
|
90 | 91 | ] |
|
91 | 92 | |
|
92 | 93 | def test_split_token_stream_single(self): |
|
93 | 94 | lines = list(split_token_stream( |
|
94 | 95 | [('type1', '\n')])) |
|
95 | 96 | |
|
96 | 97 | assert lines == [ |
|
97 | 98 | [('type1', '')], |
|
98 | 99 | [('type1', '')], |
|
99 | 100 | ] |
|
100 | 101 | |
|
101 | 102 | def test_split_token_stream_single_repeat(self): |
|
102 | 103 | lines = list(split_token_stream( |
|
103 | 104 | [('type1', '\n\n\n')])) |
|
104 | 105 | |
|
105 | 106 | assert lines == [ |
|
106 | 107 | [('type1', '')], |
|
107 | 108 | [('type1', '')], |
|
108 | 109 | [('type1', '')], |
|
109 | 110 | [('type1', '')], |
|
110 | 111 | ] |
|
111 | 112 | |
|
112 | 113 | def test_split_token_stream_multiple_repeat(self): |
|
113 | 114 | lines = list(split_token_stream( |
|
114 | 115 | [('type1', '\n\n'), ('type2', '\n\n')])) |
|
115 | 116 | |
|
116 | 117 | assert lines == [ |
|
117 | 118 | [('type1', '')], |
|
118 | 119 | [('type1', '')], |
|
119 | 120 | [('type1', ''), ('type2', '')], |
|
120 | 121 | [('type2', '')], |
|
121 | 122 | [('type2', '')], |
|
122 | 123 | ] |
|
123 | 124 | |
|
124 | 125 | |
|
125 | 126 | class TestRollupTokens(object): |
|
126 | 127 | |
|
127 | 128 | @pytest.mark.parametrize('tokenstream,output', [ |
|
128 | 129 | ([], |
|
129 | 130 | []), |
|
130 | 131 | ([('A', 'hell'), ('A', 'o')], [ |
|
131 | 132 | ('A', [ |
|
132 | 133 | ('', 'hello')]), |
|
133 | 134 | ]), |
|
134 | 135 | ([('A', 'hell'), ('B', 'o')], [ |
|
135 | 136 | ('A', [ |
|
136 | 137 | ('', 'hell')]), |
|
137 | 138 | ('B', [ |
|
138 | 139 | ('', 'o')]), |
|
139 | 140 | ]), |
|
140 | 141 | ([('A', 'hel'), ('A', 'lo'), ('B', ' '), ('A', 'there')], [ |
|
141 | 142 | ('A', [ |
|
142 | 143 | ('', 'hello')]), |
|
143 | 144 | ('B', [ |
|
144 | 145 | ('', ' ')]), |
|
145 | 146 | ('A', [ |
|
146 | 147 | ('', 'there')]), |
|
147 | 148 | ]), |
|
148 | 149 | ]) |
|
149 | 150 | def test_rollup_tokenstream_without_ops(self, tokenstream, output): |
|
150 | 151 | assert list(rollup_tokenstream(tokenstream)) == output |
|
151 | 152 | |
|
152 | 153 | @pytest.mark.parametrize('tokenstream,output', [ |
|
153 | 154 | ([], |
|
154 | 155 | []), |
|
155 | 156 | ([('A', '', 'hell'), ('A', '', 'o')], [ |
|
156 | 157 | ('A', [ |
|
157 | 158 | ('', 'hello')]), |
|
158 | 159 | ]), |
|
159 | 160 | ([('A', '', 'hell'), ('B', '', 'o')], [ |
|
160 | 161 | ('A', [ |
|
161 | 162 | ('', 'hell')]), |
|
162 | 163 | ('B', [ |
|
163 | 164 | ('', 'o')]), |
|
164 | 165 | ]), |
|
165 | 166 | ([('A', '', 'h'), ('B', '', 'e'), ('C', '', 'y')], [ |
|
166 | 167 | ('A', [ |
|
167 | 168 | ('', 'h')]), |
|
168 | 169 | ('B', [ |
|
169 | 170 | ('', 'e')]), |
|
170 | 171 | ('C', [ |
|
171 | 172 | ('', 'y')]), |
|
172 | 173 | ]), |
|
173 | 174 | ([('A', '', 'h'), ('A', '', 'e'), ('C', '', 'y')], [ |
|
174 | 175 | ('A', [ |
|
175 | 176 | ('', 'he')]), |
|
176 | 177 | ('C', [ |
|
177 | 178 | ('', 'y')]), |
|
178 | 179 | ]), |
|
179 | 180 | ([('A', 'ins', 'h'), ('A', 'ins', 'e')], [ |
|
180 | 181 | ('A', [ |
|
181 | 182 | ('ins', 'he') |
|
182 | 183 | ]), |
|
183 | 184 | ]), |
|
184 | 185 | ([('A', 'ins', 'h'), ('A', 'del', 'e')], [ |
|
185 | 186 | ('A', [ |
|
186 | 187 | ('ins', 'h'), |
|
187 | 188 | ('del', 'e') |
|
188 | 189 | ]), |
|
189 | 190 | ]), |
|
190 | 191 | ([('A', 'ins', 'h'), ('B', 'del', 'e'), ('B', 'del', 'y')], [ |
|
191 | 192 | ('A', [ |
|
192 | 193 | ('ins', 'h'), |
|
193 | 194 | ]), |
|
194 | 195 | ('B', [ |
|
195 | 196 | ('del', 'ey'), |
|
196 | 197 | ]), |
|
197 | 198 | ]), |
|
198 | 199 | ([('A', 'ins', 'h'), ('A', 'del', 'e'), ('B', 'del', 'y')], [ |
|
199 | 200 | ('A', [ |
|
200 | 201 | ('ins', 'h'), |
|
201 | 202 | ('del', 'e'), |
|
202 | 203 | ]), |
|
203 | 204 | ('B', [ |
|
204 | 205 | ('del', 'y'), |
|
205 | 206 | ]), |
|
206 | 207 | ]), |
|
207 | 208 | ([('A', '', 'some'), ('A', 'ins', 'new'), ('A', '', 'name')], [ |
|
208 | 209 | ('A', [ |
|
209 | 210 | ('', 'some'), |
|
210 | 211 | ('ins', 'new'), |
|
211 | 212 | ('', 'name'), |
|
212 | 213 | ]), |
|
213 | 214 | ]), |
|
214 | 215 | ]) |
|
215 | 216 | def test_rollup_tokenstream_with_ops(self, tokenstream, output): |
|
216 | 217 | assert list(rollup_tokenstream(tokenstream)) == output |
|
217 | 218 | |
|
218 | 219 | |
|
219 | 220 | class TestRenderTokenStream(object): |
|
220 | 221 | |
|
221 | 222 | @pytest.mark.parametrize('tokenstream,output', [ |
|
222 | 223 | ( |
|
223 | 224 | [], |
|
224 | 225 | '', |
|
225 | 226 | ), |
|
226 | 227 | ( |
|
227 | 228 | [('', '', u'')], |
|
228 | 229 | '<span></span>', |
|
229 | 230 | ), |
|
230 | 231 | ( |
|
231 | 232 | [('', '', u'text')], |
|
232 | 233 | '<span>text</span>', |
|
233 | 234 | ), |
|
234 | 235 | ( |
|
235 | 236 | [('A', '', u'')], |
|
236 | 237 | '<span class="A"></span>', |
|
237 | 238 | ), |
|
238 | 239 | ( |
|
239 | 240 | [('A', '', u'hello')], |
|
240 | 241 | '<span class="A">hello</span>', |
|
241 | 242 | ), |
|
242 | 243 | ( |
|
243 | 244 | [('A', '', u'hel'), ('A', '', u'lo')], |
|
244 | 245 | '<span class="A">hello</span>', |
|
245 | 246 | ), |
|
246 | 247 | ( |
|
247 | 248 | [('A', '', u'two\n'), ('A', '', u'lines')], |
|
248 | 249 | '<span class="A">two\nlines</span>', |
|
249 | 250 | ), |
|
250 | 251 | ( |
|
251 | 252 | [('A', '', u'\nthree\n'), ('A', '', u'lines')], |
|
252 | 253 | '<span class="A">\nthree\nlines</span>', |
|
253 | 254 | ), |
|
254 | 255 | ( |
|
255 | 256 | [('', '', u'\n'), ('A', '', u'line')], |
|
256 | 257 | '<span>\n</span><span class="A">line</span>', |
|
257 | 258 | ), |
|
258 | 259 | ( |
|
259 | 260 | [('', 'ins', u'\n'), ('A', '', u'line')], |
|
260 | 261 | '<span><ins>\n</ins></span><span class="A">line</span>', |
|
261 | 262 | ), |
|
262 | 263 | ( |
|
263 | 264 | [('A', '', u'hel'), ('A', 'ins', u'lo')], |
|
264 | 265 | '<span class="A">hel<ins>lo</ins></span>', |
|
265 | 266 | ), |
|
266 | 267 | ( |
|
267 | 268 | [('A', '', u'hel'), ('A', 'ins', u'l'), ('A', 'ins', u'o')], |
|
268 | 269 | '<span class="A">hel<ins>lo</ins></span>', |
|
269 | 270 | ), |
|
270 | 271 | ( |
|
271 | 272 | [('A', '', u'hel'), ('A', 'ins', u'l'), ('A', 'del', u'o')], |
|
272 | 273 | '<span class="A">hel<ins>l</ins><del>o</del></span>', |
|
273 | 274 | ), |
|
274 | 275 | ( |
|
275 | 276 | [('A', '', u'hel'), ('B', '', u'lo')], |
|
276 | 277 | '<span class="A">hel</span><span class="B">lo</span>', |
|
277 | 278 | ), |
|
278 | 279 | ( |
|
279 | 280 | [('A', '', u'hel'), ('B', 'ins', u'lo')], |
|
280 | 281 | '<span class="A">hel</span><span class="B"><ins>lo</ins></span>', |
|
281 | 282 | ), |
|
282 | 283 | ], ids=no_newline_id_generator) |
|
283 | 284 | def test_render_tokenstream_with_ops(self, tokenstream, output): |
|
284 | 285 | html = render_tokenstream(tokenstream) |
|
285 | 286 | assert html == output |
|
286 | 287 | |
|
287 | 288 | @pytest.mark.parametrize('tokenstream,output', [ |
|
288 | 289 | ( |
|
289 | 290 | [('A', u'hel'), ('A', u'lo')], |
|
290 | 291 | '<span class="A">hello</span>', |
|
291 | 292 | ), |
|
292 | 293 | ( |
|
293 | 294 | [('A', u'hel'), ('A', u'l'), ('A', u'o')], |
|
294 | 295 | '<span class="A">hello</span>', |
|
295 | 296 | ), |
|
296 | 297 | ( |
|
297 | 298 | [('A', u'hel'), ('A', u'l'), ('A', u'o')], |
|
298 | 299 | '<span class="A">hello</span>', |
|
299 | 300 | ), |
|
300 | 301 | ( |
|
301 | 302 | [('A', u'hel'), ('B', u'lo')], |
|
302 | 303 | '<span class="A">hel</span><span class="B">lo</span>', |
|
303 | 304 | ), |
|
304 | 305 | ( |
|
305 | 306 | [('A', u'hel'), ('B', u'lo')], |
|
306 | 307 | '<span class="A">hel</span><span class="B">lo</span>', |
|
307 | 308 | ), |
|
308 | 309 | ]) |
|
309 | 310 | def test_render_tokenstream_without_ops(self, tokenstream, output): |
|
310 | 311 | html = render_tokenstream(tokenstream) |
|
311 | 312 | assert html == output |
@@ -1,463 +1,463 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2010-2018 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | """ |
|
22 | 22 | Module to test the performance of pull, push and clone operations. |
|
23 | 23 | |
|
24 | 24 | It works by replaying a group of commits to the repo. |
|
25 | 25 | """ |
|
26 | 26 | |
|
27 | 27 | import argparse |
|
28 | 28 | import collections |
|
29 | 29 | import ConfigParser |
|
30 | 30 | import functools |
|
31 | 31 | import itertools |
|
32 | 32 | import os |
|
33 | 33 | import pprint |
|
34 | 34 | import shutil |
|
35 | 35 | import subprocess32 |
|
36 | 36 | import sys |
|
37 | 37 | import time |
|
38 | 38 | |
|
39 | 39 | import api |
|
40 | 40 | |
|
41 | 41 | |
|
42 | 42 | def mean(container): |
|
43 | 43 | """Return the mean of the container.""" |
|
44 | 44 | if not container: |
|
45 | 45 | return -1.0 |
|
46 | 46 | return sum(container) / len(container) |
|
47 | 47 | |
|
48 | 48 | |
|
49 | 49 | def keep_cwd(f): |
|
50 | 50 | """Decorator that keeps track of the starting working directory.""" |
|
51 | 51 | @functools.wraps(f) |
|
52 | 52 | def wrapped_f(*args, **kwargs): |
|
53 | 53 | cur_dir = os.getcwd() |
|
54 | 54 | try: |
|
55 | 55 | return f(*args, **kwargs) |
|
56 | 56 | finally: |
|
57 | 57 | os.chdir(cur_dir) |
|
58 | 58 | |
|
59 | 59 | return wrapped_f |
|
60 | 60 | |
|
61 | 61 | |
|
62 | 62 | def timed(f): |
|
63 | 63 | """Decorator that returns the time it took to execute the function.""" |
|
64 | 64 | @functools.wraps(f) |
|
65 | 65 | def wrapped_f(*args, **kwargs): |
|
66 | 66 | start_time = time.time() |
|
67 | 67 | try: |
|
68 | 68 | f(*args, **kwargs) |
|
69 | 69 | finally: |
|
70 | 70 | return time.time() - start_time |
|
71 | 71 | |
|
72 | 72 | return wrapped_f |
|
73 | 73 | |
|
74 | 74 | |
|
75 | 75 | def execute(*popenargs, **kwargs): |
|
76 | 76 | """Extension of subprocess.check_output to support writing to stdin.""" |
|
77 | 77 | input = kwargs.pop('stdin', None) |
|
78 | 78 | stdin = None |
|
79 | 79 | if input: |
|
80 | 80 | stdin = subprocess32.PIPE |
|
81 | 81 | #if 'stderr' not in kwargs: |
|
82 | 82 | # kwargs['stderr'] = subprocess32.PIPE |
|
83 | 83 | if 'stdout' in kwargs: |
|
84 | 84 | raise ValueError('stdout argument not allowed, it will be overridden.') |
|
85 | 85 | process = subprocess32.Popen(stdin=stdin, stdout=subprocess32.PIPE, |
|
86 | 86 | *popenargs, **kwargs) |
|
87 | 87 | output, error = process.communicate(input=input) |
|
88 | 88 | retcode = process.poll() |
|
89 | 89 | if retcode: |
|
90 | 90 | cmd = kwargs.get("args") |
|
91 | 91 | if cmd is None: |
|
92 | 92 | cmd = popenargs[0] |
|
93 | print cmd, output, error | |
|
93 | print('{} {} {} '.format(cmd, output, error)) | |
|
94 | 94 | raise subprocess32.CalledProcessError(retcode, cmd, output=output) |
|
95 | 95 | return output |
|
96 | 96 | |
|
97 | 97 | |
|
98 | 98 | def get_repo_name(repo_url): |
|
99 | 99 | """Extract the repo name from its url.""" |
|
100 | 100 | repo_url = repo_url.rstrip('/') |
|
101 | 101 | return repo_url.split('/')[-1].split('.')[0] |
|
102 | 102 | |
|
103 | 103 | |
|
104 | 104 | class TestPerformanceBase(object): |
|
105 | 105 | def __init__(self, base_dir, repo_url, n_commits, max_commits, |
|
106 | 106 | skip_commits): |
|
107 | 107 | self.repo_url = repo_url |
|
108 | 108 | self.repo_name = get_repo_name(self.repo_url) |
|
109 | 109 | self.upstream_repo_name = '%s_upstream' % self.repo_name |
|
110 | 110 | self.base_dir = os.path.abspath(base_dir) |
|
111 | 111 | self.n_commits = n_commits |
|
112 | 112 | self.max_commits = max_commits |
|
113 | 113 | self.skip_commits = skip_commits |
|
114 | 114 | self.push_times = [] |
|
115 | 115 | self.pull_times = [] |
|
116 | 116 | self.empty_pull_times = [] |
|
117 | 117 | self.clone_time = -1.0 |
|
118 | 118 | self.last_commit = None |
|
119 | 119 | |
|
120 | 120 | self.cloned_repo = '' |
|
121 | 121 | self.pull_repo = '' |
|
122 | 122 | self.orig_repo = '' |
|
123 | 123 | |
|
124 | 124 | def run(self): |
|
125 | 125 | try: |
|
126 | 126 | self.test() |
|
127 | 127 | except Exception as error: |
|
128 |
print |
|
|
128 | print(error) | |
|
129 | 129 | finally: |
|
130 | 130 | self.cleanup() |
|
131 | 131 | |
|
132 |
print |
|
|
133 |
print |
|
|
134 |
print |
|
|
135 |
print |
|
|
132 | print('Clone time :{}'.format(self.clone_time)) | |
|
133 | print('Push time :{}'.format(mean(self.push_times))) | |
|
134 | print('Pull time :{}'.format(mean(self.pull_times))) | |
|
135 | print('Empty pull time:{}'.format(mean(self.empty_pull_times))) | |
|
136 | 136 | |
|
137 | 137 | return { |
|
138 | 138 | 'clone': self.clone_time, |
|
139 | 139 | 'push': mean(self.push_times), |
|
140 | 140 | 'pull': mean(self.pull_times), |
|
141 | 141 | 'empty_pull': mean(self.empty_pull_times), |
|
142 | 142 | } |
|
143 | 143 | |
|
144 | 144 | @keep_cwd |
|
145 | 145 | def test(self): |
|
146 | 146 | os.chdir(self.base_dir) |
|
147 | 147 | |
|
148 | 148 | self.orig_repo = os.path.join(self.base_dir, self.repo_name) |
|
149 | 149 | if not os.path.exists(self.orig_repo): |
|
150 | 150 | self.clone_repo(self.repo_url, default_only=True) |
|
151 | 151 | |
|
152 | 152 | upstream_url = self.create_repo(self.upstream_repo_name, self.repo_type) |
|
153 | 153 | |
|
154 | 154 | self.add_remote(self.orig_repo, upstream_url) |
|
155 | 155 | |
|
156 | 156 | self.pull_repo = os.path.join(self.base_dir, '%s_pull' % self.repo_name) |
|
157 | 157 | self.clone_repo(upstream_url, self.pull_repo) |
|
158 | 158 | |
|
159 | 159 | commits = self.get_commits(self.orig_repo) |
|
160 | 160 | self.last_commit = commits[-1] |
|
161 | 161 | if self.skip_commits: |
|
162 | 162 | self.push( |
|
163 | 163 | self.orig_repo, commits[self.skip_commits - 1], 'upstream') |
|
164 | 164 | commits = commits[self.skip_commits:self.max_commits] |
|
165 | 165 | |
|
166 |
print |
|
|
167 |
for i in |
|
|
166 | print('Working with %d commits' % len(commits)) | |
|
167 | for i in range(self.n_commits - 1, len(commits), self.n_commits): | |
|
168 | 168 | commit = commits[i] |
|
169 |
print |
|
|
169 | print('Processing commit %s (%d)' % (commit, i + 1)) | |
|
170 | 170 | self.push_times.append( |
|
171 | 171 | self.push(self.orig_repo, commit, 'upstream')) |
|
172 | 172 | self.check_remote_last_commit_is(commit, upstream_url) |
|
173 | 173 | |
|
174 | 174 | self.pull_times.append(self.pull(self.pull_repo)) |
|
175 | 175 | self.check_local_last_commit_is(commit, self.pull_repo) |
|
176 | 176 | |
|
177 | 177 | self.empty_pull_times.append(self.pull(self.pull_repo)) |
|
178 | 178 | |
|
179 | 179 | self.cloned_repo = os.path.join(self.base_dir, |
|
180 | 180 | '%s_clone' % self.repo_name) |
|
181 | 181 | self.clone_time = self.clone_repo(upstream_url, self.cloned_repo) |
|
182 | 182 | |
|
183 | 183 | def cleanup(self): |
|
184 | 184 | try: |
|
185 | 185 | self.delete_repo(self.upstream_repo_name) |
|
186 | 186 | except api.ApiError: |
|
187 | 187 | # Continue in case we could not delete the repo. Maybe we did not |
|
188 | 188 | # create it in the first place. |
|
189 | 189 | pass |
|
190 | 190 | |
|
191 | 191 | shutil.rmtree(self.pull_repo, ignore_errors=True) |
|
192 | 192 | shutil.rmtree(self.cloned_repo, ignore_errors=True) |
|
193 | 193 | |
|
194 | 194 | if os.path.exists(self.orig_repo): |
|
195 | 195 | self.remove_remote(self.orig_repo) |
|
196 | 196 | |
|
197 | 197 | |
|
198 | 198 | class RhodeCodeMixin(object): |
|
199 | 199 | """Mixin providing the methods to create and delete repos in RhodeCode.""" |
|
200 | 200 | def __init__(self, api_key): |
|
201 | 201 | self.api = api.RCApi(api_key=api_key) |
|
202 | 202 | |
|
203 | 203 | def create_repo(self, repo_name, repo_type): |
|
204 | 204 | return self.api.create_repo(repo_name, repo_type, |
|
205 | 205 | 'Repo for perfomance testing') |
|
206 | 206 | |
|
207 | 207 | def delete_repo(self, repo_name): |
|
208 | 208 | return self.api.delete_repo(repo_name) |
|
209 | 209 | |
|
210 | 210 | |
|
211 | 211 | class GitMixin(object): |
|
212 | 212 | """Mixin providing the git operations.""" |
|
213 | 213 | @timed |
|
214 | 214 | def clone_repo(self, repo_url, destination=None, default_only=False): |
|
215 | 215 | args = ['git', 'clone'] |
|
216 | 216 | if default_only: |
|
217 | 217 | args.extend(['--branch', 'master', '--single-branch']) |
|
218 | 218 | args.append(repo_url) |
|
219 | 219 | if destination: |
|
220 | 220 | args.append(destination) |
|
221 | 221 | execute(args) |
|
222 | 222 | |
|
223 | 223 | @keep_cwd |
|
224 | 224 | def add_remote(self, repo, remote_url, remote_name='upstream'): |
|
225 | 225 | self.remove_remote(repo, remote_name) |
|
226 | 226 | os.chdir(repo) |
|
227 | 227 | execute(['git', 'remote', 'add', remote_name, remote_url]) |
|
228 | 228 | |
|
229 | 229 | @keep_cwd |
|
230 | 230 | def remove_remote(self, repo, remote_name='upstream'): |
|
231 | 231 | os.chdir(repo) |
|
232 | 232 | remotes = execute(['git', 'remote']).split('\n') |
|
233 | 233 | if remote_name in remotes: |
|
234 | 234 | execute(['git', 'remote', 'remove', remote_name]) |
|
235 | 235 | |
|
236 | 236 | @keep_cwd |
|
237 | 237 | def get_commits(self, repo, branch='master'): |
|
238 | 238 | os.chdir(repo) |
|
239 | 239 | commits_list = execute( |
|
240 | 240 | ['git', 'log', '--first-parent', branch, '--pretty=%H']) |
|
241 | 241 | return commits_list.strip().split('\n')[::-1] |
|
242 | 242 | |
|
243 | 243 | @timed |
|
244 | 244 | def push(self, repo, commit, remote_name=None): |
|
245 | 245 | os.chdir(repo) |
|
246 | 246 | try: |
|
247 | 247 | execute(['git', 'reset', '--soft', commit]) |
|
248 | 248 | args = ['git', 'push'] |
|
249 | 249 | if remote_name: |
|
250 | 250 | args.append(remote_name) |
|
251 | 251 | execute(args) |
|
252 | 252 | finally: |
|
253 | 253 | execute(['git', 'reset', '--soft', 'HEAD@{1}']) |
|
254 | 254 | |
|
255 | 255 | @timed |
|
256 | 256 | def pull(self, repo): |
|
257 | 257 | os.chdir(repo) |
|
258 | 258 | execute(['git', 'pull']) |
|
259 | 259 | |
|
260 | 260 | def _remote_last_commit(self, repo_url): |
|
261 | 261 | output = execute(['git', 'ls-remote', repo_url, 'HEAD']) |
|
262 | 262 | return output.split()[0] |
|
263 | 263 | |
|
264 | 264 | def check_remote_last_commit_is(self, commit, repo_url): |
|
265 | 265 | last_remote_commit = self._remote_last_commit(repo_url) |
|
266 | 266 | if last_remote_commit != commit: |
|
267 | 267 | raise Exception('Push did not work, expected commit %s but got %s' % |
|
268 | 268 | (commit, last_remote_commit)) |
|
269 | 269 | |
|
270 | 270 | @keep_cwd |
|
271 | 271 | def _local_last_commit(self, repo): |
|
272 | 272 | os.chdir(repo) |
|
273 | 273 | return execute(['git', 'rev-parse', 'HEAD']).strip() |
|
274 | 274 | |
|
275 | 275 | def check_local_last_commit_is(self, commit, repo): |
|
276 | 276 | last_local_commit = self._local_last_commit(repo) |
|
277 | 277 | if last_local_commit != commit: |
|
278 | 278 | raise Exception('Pull did not work, expected commit %s but got %s' % |
|
279 | 279 | (commit, last_local_commit)) |
|
280 | 280 | |
|
281 | 281 | |
|
282 | 282 | class HgMixin(object): |
|
283 | 283 | """Mixin providing the mercurial operations.""" |
|
284 | 284 | @timed |
|
285 | 285 | def clone_repo(self, repo_url, destination=None, default_only=False): |
|
286 | 286 | args = ['hg', 'clone'] |
|
287 | 287 | if default_only: |
|
288 | 288 | args.extend(['--branch', 'default']) |
|
289 | 289 | args.append(repo_url) |
|
290 | 290 | if destination: |
|
291 | 291 | args.append(destination) |
|
292 | 292 | execute(args) |
|
293 | 293 | |
|
294 | 294 | @keep_cwd |
|
295 | 295 | def add_remote(self, repo, remote_url, remote_name='upstream'): |
|
296 | 296 | self.remove_remote(repo, remote_name) |
|
297 | 297 | os.chdir(repo) |
|
298 | 298 | hgrc = ConfigParser.RawConfigParser() |
|
299 | 299 | hgrc.read('.hg/hgrc') |
|
300 | 300 | hgrc.set('paths', remote_name, remote_url) |
|
301 | 301 | with open('.hg/hgrc', 'w') as f: |
|
302 | 302 | hgrc.write(f) |
|
303 | 303 | |
|
304 | 304 | @keep_cwd |
|
305 | 305 | def remove_remote(self, repo, remote_name='upstream'): |
|
306 | 306 | os.chdir(repo) |
|
307 | 307 | hgrc = ConfigParser.RawConfigParser() |
|
308 | 308 | hgrc.read('.hg/hgrc') |
|
309 | 309 | hgrc.remove_option('paths', remote_name) |
|
310 | 310 | with open('.hg/hgrc', 'w') as f: |
|
311 | 311 | hgrc.write(f) |
|
312 | 312 | |
|
313 | 313 | @keep_cwd |
|
314 | 314 | def get_commits(self, repo, branch='default'): |
|
315 | 315 | os.chdir(repo) |
|
316 | 316 | # See http://stackoverflow.com/questions/15376649/is-there-a-mercurial-equivalent-to-git-log-first-parent |
|
317 | 317 | commits_list = execute(['hg', 'log', '--branch', branch, '--template', |
|
318 | 318 | '{node}\n', '--follow-first']) |
|
319 | 319 | return commits_list.strip().split('\n')[::-1] |
|
320 | 320 | |
|
321 | 321 | @timed |
|
322 | 322 | def push(self, repo, commit, remote_name=None): |
|
323 | 323 | os.chdir(repo) |
|
324 | 324 | args = ['hg', 'push', '--rev', commit, '--new-branch'] |
|
325 | 325 | if remote_name: |
|
326 | 326 | args.append(remote_name) |
|
327 | 327 | execute(args) |
|
328 | 328 | |
|
329 | 329 | @timed |
|
330 | 330 | def pull(self, repo): |
|
331 | 331 | os.chdir(repo) |
|
332 | 332 | execute(['hg', '--config', 'alias.pull=pull', 'pull', '-u']) |
|
333 | 333 | |
|
334 | 334 | def _remote_last_commit(self, repo_url): |
|
335 | 335 | return execute(['hg', 'identify', repo_url])[:12] |
|
336 | 336 | |
|
337 | 337 | def check_remote_last_commit_is(self, commit, repo_url): |
|
338 | 338 | last_remote_commit = self._remote_last_commit(repo_url) |
|
339 | 339 | if not commit.startswith(last_remote_commit): |
|
340 | 340 | raise Exception('Push did not work, expected commit %s but got %s' % |
|
341 | 341 | (commit, last_remote_commit)) |
|
342 | 342 | |
|
343 | 343 | @keep_cwd |
|
344 | 344 | def _local_last_commit(self, repo): |
|
345 | 345 | os.chdir(repo) |
|
346 | 346 | return execute(['hg', 'identify'])[:12] |
|
347 | 347 | |
|
348 | 348 | def check_local_last_commit_is(self, commit, repo): |
|
349 | 349 | last_local_commit = self._local_last_commit(repo) |
|
350 | 350 | if not commit.startswith(last_local_commit): |
|
351 | 351 | raise Exception('Pull did not work, expected commit %s but got %s' % |
|
352 | 352 | (commit, last_local_commit)) |
|
353 | 353 | |
|
354 | 354 | |
|
355 | 355 | class GitTestPerformance(GitMixin, RhodeCodeMixin, TestPerformanceBase): |
|
356 | 356 | def __init__(self, base_dir, repo_url, n_commits, max_commits, skip_commits, |
|
357 | 357 | api_key): |
|
358 | 358 | TestPerformanceBase.__init__(self, base_dir, repo_url, n_commits, |
|
359 | 359 | max_commits, skip_commits) |
|
360 | 360 | RhodeCodeMixin.__init__(self, api_key) |
|
361 | 361 | self.repo_type = 'git' |
|
362 | 362 | |
|
363 | 363 | |
|
364 | 364 | class HgTestPerformance(HgMixin, RhodeCodeMixin, TestPerformanceBase): |
|
365 | 365 | def __init__(self, base_dir, repo_url, n_commits, max_commits, skip_commits, |
|
366 | 366 | api_key): |
|
367 | 367 | TestPerformanceBase.__init__(self, base_dir, repo_url, n_commits, |
|
368 | 368 | max_commits, skip_commits) |
|
369 | 369 | RhodeCodeMixin.__init__(self, api_key) |
|
370 | 370 | self.repo_type = 'hg' |
|
371 | 371 | |
|
372 | 372 | |
|
373 | 373 | def get_test(base_dir, repo_url, repo_type, step, max_commits, skip_commits, |
|
374 | 374 | api_key): |
|
375 | 375 | max_commits = min(10 * step, |
|
376 | 376 | int((max_commits - skip_commits) / step) * step) |
|
377 | 377 | max_commits += skip_commits |
|
378 | 378 | if repo_type == 'git': |
|
379 | 379 | return GitTestPerformance( |
|
380 | 380 | base_dir, repo_url, step, max_commits, skip_commits, api_key) |
|
381 | 381 | elif repo_type == 'hg': |
|
382 | 382 | return HgTestPerformance( |
|
383 | 383 | base_dir, repo_url, step, max_commits, skip_commits, api_key) |
|
384 | 384 | |
|
385 | 385 | |
|
386 | 386 | def main(argv): |
|
387 | 387 | parser = argparse.ArgumentParser( |
|
388 | 388 | description='Performance tests for push/pull/clone for git and ' + |
|
389 | 389 | 'mercurial repos.') |
|
390 | 390 | parser.add_argument( |
|
391 | 391 | '--tests', dest='tests', action='store', required=False, default='all', |
|
392 | 392 | help='The tests to run. Default: all. But could be any comma ' + |
|
393 | 393 | 'separated list with python, hg, kernel or git') |
|
394 | 394 | parser.add_argument( |
|
395 | 395 | '--sizes', dest='sizes', action='store', required=False, |
|
396 | 396 | default='1,10,100,1000,2500', |
|
397 | 397 | help='The sizes to use. Default: 1,10,100,1000,2500') |
|
398 | 398 | parser.add_argument( |
|
399 | 399 | '--dir', dest='dir', action='store', required=True, |
|
400 | 400 | help='The dir where to store the repos') |
|
401 | 401 | parser.add_argument( |
|
402 | 402 | '--api-key', dest='api_key', action='store', required=True, |
|
403 | 403 | help='The api key of RhodeCode') |
|
404 | 404 | options = parser.parse_args(argv[1:]) |
|
405 |
print |
|
|
405 | print(options) | |
|
406 | 406 | |
|
407 | 407 | test_config = { |
|
408 | 408 | 'python': { |
|
409 | 409 | 'url': 'https://hg.python.org/cpython/', |
|
410 | 410 | 'limit': 23322, |
|
411 | 411 | 'type': 'hg', |
|
412 | 412 | # Do not time the first commit, as it is HUGE! |
|
413 | 413 | 'skip': 1, |
|
414 | 414 | }, |
|
415 | 415 | 'hg': { |
|
416 | 416 | 'url': 'http://selenic.com/hg', |
|
417 | 417 | 'limit': 14396, |
|
418 | 418 | 'type': 'hg', |
|
419 | 419 | }, |
|
420 | 420 | 'kernel': { |
|
421 | 421 | 'url': 'https://github.com/torvalds/linux.git', |
|
422 | 422 | 'limit': 46271, |
|
423 | 423 | 'type': 'git', |
|
424 | 424 | }, |
|
425 | 425 | 'git': { |
|
426 | 426 | 'url': 'https://github.com/git/git.git', |
|
427 | 427 | 'limit': 13525, |
|
428 | 428 | 'type': 'git', |
|
429 | 429 | } |
|
430 | 430 | |
|
431 | 431 | } |
|
432 | 432 | |
|
433 | 433 | test_names = options.tests.split(',') |
|
434 | 434 | if test_names == ['all']: |
|
435 | 435 | test_names = test_config.keys() |
|
436 | 436 | if not set(test_names) <= set(test_config.keys()): |
|
437 | 437 |
print |
|
438 | 438 |
|
|
439 | 439 | return 1 |
|
440 | 440 | |
|
441 | 441 | sizes = options.sizes.split(',') |
|
442 | 442 | sizes = map(int, sizes) |
|
443 | 443 | |
|
444 | 444 | base_dir = options.dir |
|
445 | 445 | api_key = options.api_key |
|
446 | 446 | results = collections.defaultdict(dict) |
|
447 | 447 | for test_name, size in itertools.product(test_names, sizes): |
|
448 | 448 | test = get_test(base_dir, |
|
449 | 449 | test_config[test_name]['url'], |
|
450 | 450 | test_config[test_name]['type'], |
|
451 | 451 | size, |
|
452 | 452 | test_config[test_name]['limit'], |
|
453 | 453 | test_config[test_name].get('skip', 0), |
|
454 | 454 | api_key) |
|
455 |
print |
|
|
456 |
print |
|
|
457 |
print |
|
|
455 | print('*' * 80) | |
|
456 | print('Running performance test: %s with size %d' % (test_name, size)) | |
|
457 | print('*' * 80) | |
|
458 | 458 | results[test_name][size] = test.run() |
|
459 | 459 | pprint.pprint(dict(results)) |
|
460 | 460 | |
|
461 | 461 | |
|
462 | 462 | if __name__ == '__main__': |
|
463 | 463 | sys.exit(main(sys.argv)) |
@@ -1,135 +1,135 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2010-2018 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | """ |
|
22 | 22 | Utility to gather certain statistics about a process. |
|
23 | 23 | |
|
24 | 24 | Used to generate data about the memory consumption of the vcsserver. It is |
|
25 | 25 | quite generic and should work for every process. Use the parameter `--help` |
|
26 | 26 | to see all options. |
|
27 | 27 | |
|
28 | 28 | Example call:: |
|
29 | 29 | |
|
30 | 30 | python profile-mem.py --pid=89816 --ae --ae-key=YOUR_API_KEY |
|
31 | 31 | |
|
32 | 32 | """ |
|
33 | 33 | |
|
34 | 34 | |
|
35 | 35 | import argparse |
|
36 | 36 | import json |
|
37 | 37 | import sys |
|
38 | 38 | import time |
|
39 | 39 | |
|
40 | 40 | import datetime |
|
41 | 41 | import requests |
|
42 | 42 | import psutil |
|
43 | 43 | |
|
44 | 44 | import logging |
|
45 | 45 | import socket |
|
46 | 46 | logging.basicConfig(level=logging.DEBUG) |
|
47 | 47 | |
|
48 | 48 | |
|
49 | 49 | def profile(): |
|
50 | 50 | config = parse_options() |
|
51 | 51 | try: |
|
52 | 52 | process = psutil.Process(config.pid) |
|
53 | 53 | except psutil.NoSuchProcess: |
|
54 |
print |
|
|
54 | print("Process {pid} does not exist!".format(pid=config.pid)) | |
|
55 | 55 | sys.exit(1) |
|
56 | 56 | |
|
57 | 57 | while True: |
|
58 | 58 | stats = process_stats(process) |
|
59 | 59 | dump_stats(stats) |
|
60 | 60 | if config.appenlight: |
|
61 | 61 | client = AppenlightClient( |
|
62 | 62 | url=config.appenlight_url, |
|
63 | 63 | api_key=config.appenlight_api_key) |
|
64 | 64 | client.dump_stats(stats) |
|
65 | 65 | time.sleep(config.interval) |
|
66 | 66 | |
|
67 | 67 | |
|
68 | 68 | def parse_options(): |
|
69 | 69 | parser = argparse.ArgumentParser( |
|
70 | 70 | description=__doc__) |
|
71 | 71 | parser.add_argument( |
|
72 | 72 | '--pid', required=True, type=int, |
|
73 | 73 | help="Process ID to monitor.") |
|
74 | 74 | parser.add_argument( |
|
75 | 75 | '--interval', '-i', type=float, default=5, |
|
76 | 76 | help="Interval in secods.") |
|
77 | 77 | parser.add_argument( |
|
78 | 78 | '--appenlight', '--ae', action='store_true') |
|
79 | 79 | parser.add_argument( |
|
80 | 80 | '--appenlight-url', '--ae-url', |
|
81 | 81 | default='https://ae.rhodecode.com/api/logs', |
|
82 | 82 | help='URL of the Appenlight API endpoint, defaults to "%(default)s".') |
|
83 | 83 | parser.add_argument( |
|
84 | 84 | '--appenlight-api-key', '--ae-key', |
|
85 | 85 | help='API key to use when sending data to appenlight. This has to be ' |
|
86 | 86 | 'set if Appenlight is enabled.') |
|
87 | 87 | return parser.parse_args() |
|
88 | 88 | |
|
89 | 89 | |
|
90 | 90 | def process_stats(process): |
|
91 | 91 | mem = process.memory_info() |
|
92 | 92 | iso_now = datetime.datetime.utcnow().isoformat() |
|
93 | 93 | stats = [ |
|
94 | 94 | {'message': 'Memory stats of process {pid}'.format(pid=process.pid), |
|
95 | 95 | 'namespace': 'process.{pid}'.format(pid=process.pid), |
|
96 | 96 | 'server': socket.getfqdn(socket.gethostname()), |
|
97 | 97 | 'tags': [ |
|
98 | 98 | ['rss', mem.rss], |
|
99 | 99 | ['vms', mem.vms]], |
|
100 | 100 | 'date': iso_now, |
|
101 | 101 | }, |
|
102 | 102 | ] |
|
103 | 103 | return stats |
|
104 | 104 | |
|
105 | 105 | |
|
106 | 106 | def dump_stats(stats): |
|
107 | 107 | for sample in stats: |
|
108 |
print |
|
|
108 | print(json.dumps(sample)) | |
|
109 | 109 | |
|
110 | 110 | |
|
111 | 111 | class AppenlightClient(): |
|
112 | 112 | |
|
113 | 113 | url_template = '{url}?protocol_version=0.5' |
|
114 | 114 | |
|
115 | 115 | def __init__(self, url, api_key): |
|
116 | 116 | self.url = self.url_template.format(url=url) |
|
117 | 117 | self.api_key = api_key |
|
118 | 118 | |
|
119 | 119 | def dump_stats(self, stats): |
|
120 | 120 | response = requests.post( |
|
121 | 121 | self.url, |
|
122 | 122 | headers={ |
|
123 | 123 | 'X-appenlight-api-key': self.api_key}, |
|
124 | 124 | data=json.dumps(stats)) |
|
125 | 125 | if not response.status_code == 200: |
|
126 | 126 | logging.error( |
|
127 | 127 | 'Sending to appenlight failed\n%s\n%s', |
|
128 | 128 | response.headers, response.text) |
|
129 | 129 | |
|
130 | 130 | |
|
131 | 131 | if __name__ == '__main__': |
|
132 | 132 | try: |
|
133 | 133 | profile() |
|
134 | 134 | except KeyboardInterrupt: |
|
135 | 135 | pass |
@@ -1,155 +1,155 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2010-2018 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | """ |
|
22 | 22 | This is a standalone script which will start VCS and RC. |
|
23 | 23 | |
|
24 | 24 | Performance numbers will be written on each interval to: |
|
25 | 25 | vcs_profileX.csv |
|
26 | 26 | rc_profileX.csv |
|
27 | 27 | |
|
28 | 28 | To stop the script by press Ctrl-C |
|
29 | 29 | """ |
|
30 | 30 | |
|
31 | 31 | import datetime |
|
32 | 32 | import os |
|
33 | 33 | import psutil |
|
34 | 34 | import subprocess32 |
|
35 | 35 | import sys |
|
36 | 36 | import time |
|
37 | 37 | import traceback |
|
38 | 38 | import urllib |
|
39 | 39 | |
|
40 | 40 | PROFILING_INTERVAL = 5 |
|
41 | 41 | RC_WEBSITE = "http://localhost:5001/" |
|
42 | 42 | |
|
43 | 43 | |
|
44 | 44 | def get_file(prefix): |
|
45 | 45 | out_file = None |
|
46 |
for i in |
|
|
46 | for i in range(100): | |
|
47 | 47 | file_path = "%s_profile%.3d.csv" % (prefix, i) |
|
48 | 48 | if os.path.exists(file_path): |
|
49 | 49 | continue |
|
50 | 50 | out_file = open(file_path, "w") |
|
51 | 51 | out_file.write("Time; CPU %; Memory (MB); Total FDs; Dulwich FDs; Threads\n") |
|
52 | 52 | break |
|
53 | 53 | return out_file |
|
54 | 54 | |
|
55 | 55 | |
|
56 | 56 | def dump_system(): |
|
57 |
print |
|
|
58 |
print |
|
|
59 | (psutil.cpu_count(), psutil.cpu_count(logical=False)) | |
|
60 |
print |
|
|
61 |
print |
|
|
62 |
print |
|
|
63 |
print |
|
|
64 |
print |
|
|
65 |
print |
|
|
57 | print("System Overview...") | |
|
58 | print("\nCPU Count: %d (%d real)" % | |
|
59 | (psutil.cpu_count(), psutil.cpu_count(logical=False))) | |
|
60 | print("\nDisk:") | |
|
61 | print(psutil.disk_usage(os.sep)) | |
|
62 | print("\nMemory:") | |
|
63 | print(psutil.virtual_memory()) | |
|
64 | print("\nMemory (swap):") | |
|
65 | print(psutil.swap_memory()) | |
|
66 | 66 | |
|
67 | 67 | |
|
68 | 68 | def count_dulwich_fds(proc): |
|
69 | 69 | p = subprocess32.Popen(["lsof", "-p", proc.pid], stdout=subprocess32.PIPE) |
|
70 | 70 | out, err = p.communicate() |
|
71 | 71 | |
|
72 | 72 | count = 0 |
|
73 | 73 | for line in out.splitlines(): |
|
74 | 74 | content = line.split() |
|
75 | 75 | # http://git-scm.com/book/en/Git-Internals-Packfiles |
|
76 | 76 | if content[-1].endswith(".idx"): |
|
77 | 77 | count += 1 |
|
78 | 78 | |
|
79 | 79 | return count |
|
80 | 80 | |
|
81 | 81 | def dump_process(pid, out_file): |
|
82 | 82 | now = datetime.datetime.now() |
|
83 | 83 | cpu = pid.cpu_percent() |
|
84 | 84 | mem = pid.memory_info() |
|
85 | 85 | fds = pid.num_fds() |
|
86 | 86 | dulwich_fds = count_dulwich_fds(pid) |
|
87 | 87 | threads = pid.num_threads() |
|
88 | 88 | |
|
89 | 89 | content = [now.strftime('%m/%d/%y %H:%M:%S'), |
|
90 | 90 | cpu, |
|
91 | 91 | "%.2f" % (mem[0]/1024.0/1024.0), |
|
92 | 92 | fds, dulwich_fds, threads] |
|
93 | 93 | out_file.write("; ".join([str(item) for item in content])) |
|
94 | 94 | out_file.write("\n") |
|
95 | 95 | |
|
96 | 96 | |
|
97 | 97 | # Open output files |
|
98 | 98 | vcs_out = get_file("vcs") |
|
99 | 99 | if vcs_out is None: |
|
100 |
print |
|
|
100 | print("Unable to enumerate output file for VCS") | |
|
101 | 101 | sys.exit(1) |
|
102 | 102 | rc_out = get_file("rc") |
|
103 | 103 | if rc_out is None: |
|
104 |
print |
|
|
104 | print("Unable to enumerate output file for RC") | |
|
105 | 105 | sys.exit(1) |
|
106 | 106 | |
|
107 | 107 | # Show system information |
|
108 | 108 | dump_system() |
|
109 | 109 | |
|
110 |
print |
|
|
110 | print("\nStarting VCS...") | |
|
111 | 111 | vcs = psutil.Popen(["vcsserver"]) |
|
112 | 112 | time.sleep(1) |
|
113 | 113 | if not vcs.is_running(): |
|
114 |
print |
|
|
114 | print("VCS - Failed to start") | |
|
115 | 115 | sys.exit(1) |
|
116 |
print |
|
|
116 | print("VCS - Ok") | |
|
117 | 117 | |
|
118 |
print |
|
|
118 | print("\nStarting RhodeCode...") | |
|
119 | 119 | rc = psutil.Popen("RC_VCSSERVER_TEST_DISABLE=1 paster serve test.ini", |
|
120 | 120 | shell=True, stdin=subprocess32.PIPE) |
|
121 | 121 | time.sleep(1) |
|
122 | 122 | if not rc.is_running(): |
|
123 |
print |
|
|
123 | print("RC - Failed to start") | |
|
124 | 124 | vcs.terminate() |
|
125 | 125 | sys.exit(1) |
|
126 | 126 | |
|
127 | 127 | # Send command to create the databases |
|
128 | 128 | rc.stdin.write("y\n") |
|
129 | 129 | |
|
130 | 130 | # Verify that the website is up |
|
131 | 131 | time.sleep(4) |
|
132 | 132 | try: |
|
133 | 133 | urllib.urlopen(RC_WEBSITE) |
|
134 | 134 | except IOError: |
|
135 |
print |
|
|
135 | print("RC - Website not started") | |
|
136 | 136 | vcs.terminate() |
|
137 | 137 | sys.exit(1) |
|
138 |
print |
|
|
138 | print("RC - Ok") | |
|
139 | 139 | |
|
140 |
print |
|
|
140 | print("\nProfiling...\n%s\n" % ("-"*80)) | |
|
141 | 141 | while True: |
|
142 | 142 | try: |
|
143 | 143 | dump_process(vcs, vcs_out) |
|
144 | 144 | dump_process(rc, rc_out) |
|
145 | 145 | time.sleep(PROFILING_INTERVAL) |
|
146 | 146 | except Exception: |
|
147 |
print |
|
|
147 | print(traceback.format_exc()) | |
|
148 | 148 | break |
|
149 | 149 | |
|
150 | 150 | # Finalize the profiling |
|
151 | 151 | vcs_out.close() |
|
152 | 152 | rc_out.close() |
|
153 | 153 | |
|
154 | 154 | vcs.terminate() |
|
155 | 155 | rc.terminate() |
@@ -1,69 +1,69 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2010-2018 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | import timeit |
|
22 | 22 | |
|
23 | 23 | server = "localhost:5000" |
|
24 | 24 | |
|
25 | 25 | pages = [ |
|
26 | 26 | "cpython", |
|
27 | 27 | "cpython/annotate/74236c8bf064188516b32bf95016971227ec72a9/Makefile.pre.in", |
|
28 | 28 | "cpython/changelog", |
|
29 | 29 | "cpython/changeset/e0f681f4ade3af52915d5f32daac97ada580d71a", |
|
30 | 30 | "cpython/compare/tag@v3.4.1rc1...tag@v3.4.1?target_repo=cpython", |
|
31 | 31 | "cpython/files/tip/", |
|
32 | 32 | "cpython/files/74236c8bf064188516b32bf95016971227ec72a9/Grammar", |
|
33 | 33 | "", |
|
34 | 34 | "git", |
|
35 | 35 | "git/annotate/6c4ab27f2378ce67940b4496365043119d7ffff2/gitk-git/.gitignore", |
|
36 | 36 | "git/changelog", |
|
37 | 37 | "git/changeset/d299e9e550c1bf8640907fdba1f03cc585ee71df", |
|
38 | 38 | "git/compare/rev@1200...rev@1300?target_repo=git", |
|
39 | 39 | "git/files/tip/", |
|
40 | 40 | "git/files/6c4ab27f2378ce67940b4496365043119d7ffff2/.gitignore" |
|
41 | 41 | ] |
|
42 | 42 | |
|
43 | 43 | svn_pages = [ |
|
44 | 44 | "svn-apache", |
|
45 | 45 | "svn-apache/annotate/672129/cocoon/trunk/README.txt", |
|
46 | 46 | "svn-apache/changelog", |
|
47 | 47 | "svn-apache/changeset/1164362", |
|
48 | 48 | "svn-apache/compare/rev@1164350...rev@1164360?target_repo=svn-apache", |
|
49 | 49 | "svn-apache/compare/rev@1164300...rev@1164360?target_repo=svn-apache", |
|
50 | 50 | "svn-apache/files/tip/", |
|
51 | 51 | "svn-apache/files/1164363/cocoon/trunk/README.txt", |
|
52 | 52 | ] |
|
53 | 53 | |
|
54 | 54 | # Uncomment to check also svn performance |
|
55 | 55 | # pages = pages + svn_pages |
|
56 | 56 | |
|
57 | 57 | repeat = 10 |
|
58 | 58 | |
|
59 |
print |
|
|
59 | print("Repeating each URL x%d\n" % repeat) | |
|
60 | 60 | for page in pages: |
|
61 | 61 | url = "http://%s/%s" % (server, page) |
|
62 |
print |
|
|
62 | print(url) | |
|
63 | 63 | |
|
64 | 64 | stmt = "urllib2.urlopen('%s', timeout=120)" % url |
|
65 | 65 | t = timeit.Timer(stmt=stmt, setup="import urllib2") |
|
66 | 66 | |
|
67 | 67 | result = t.repeat(repeat=repeat, number=1) |
|
68 |
print |
|
|
69 | (min(result), max(result), sum(result)/len(result)) | |
|
68 | print("\t%.3f (min) - %.3f (max) - %.3f (avg)\n" % | |
|
69 | (min(result), max(result), sum(result)/len(result))) |
@@ -1,187 +1,187 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2010-2018 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | """ |
|
22 | 22 | Test for crawling a project for memory usage |
|
23 | 23 | This should be runned just as regular script together |
|
24 | 24 | with a watch script that will show memory usage. |
|
25 | 25 | |
|
26 | 26 | watch -n1 ./rhodecode/tests/mem_watch |
|
27 | 27 | """ |
|
28 | 28 | |
|
29 | 29 | |
|
30 | 30 | import cookielib |
|
31 | 31 | import urllib |
|
32 | 32 | import urllib2 |
|
33 | 33 | import time |
|
34 | 34 | import os |
|
35 | 35 | import sys |
|
36 | 36 | from os.path import join as jn |
|
37 | 37 | from os.path import dirname as dn |
|
38 | 38 | |
|
39 | 39 | from sqlalchemy.util import OrderedSet |
|
40 | 40 | |
|
41 | 41 | __here__ = os.path.abspath(__file__) |
|
42 | 42 | __root__ = dn(dn(dn(__here__))) |
|
43 | 43 | sys.path.append(__root__) |
|
44 | 44 | |
|
45 | 45 | from rhodecode.lib import vcs |
|
46 | 46 | from rhodecode.lib.vcs.exceptions import RepositoryError |
|
47 | 47 | |
|
48 | 48 | PASES = 3 |
|
49 | 49 | HOST = 'http://127.0.0.1' |
|
50 | 50 | PORT = 5001 |
|
51 | 51 | BASE_URI = '%s:%s/' % (HOST, PORT) |
|
52 | 52 | |
|
53 | 53 | if len(sys.argv) == 2: |
|
54 | 54 | BASE_URI = sys.argv[1] |
|
55 | 55 | |
|
56 | 56 | if not BASE_URI.endswith('/'): |
|
57 | 57 | BASE_URI += '/' |
|
58 | 58 | |
|
59 |
print |
|
|
59 | print('Crawling @ %s' % BASE_URI) | |
|
60 | 60 | BASE_URI += '%s' |
|
61 | 61 | PROJECT_PATH = jn('/', 'home', 'marcink', 'repos') |
|
62 | 62 | PROJECTS = [ |
|
63 | 63 | #'linux-magx-pbranch', |
|
64 | 64 | 'CPython', |
|
65 | 65 | 'rhodecode_tip', |
|
66 | 66 | ] |
|
67 | 67 | |
|
68 | 68 | |
|
69 | 69 | cj = cookielib.FileCookieJar('/tmp/rc_test_cookie.txt') |
|
70 | 70 | o = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) |
|
71 | 71 | o.addheaders = [ |
|
72 | 72 | ('User-agent', 'rhodecode-crawler'), |
|
73 | 73 | ('Accept-Language', 'en - us, en;q = 0.5') |
|
74 | 74 | ] |
|
75 | 75 | |
|
76 | 76 | urllib2.install_opener(o) |
|
77 | 77 | |
|
78 | 78 | |
|
79 | 79 | def _get_repo(proj): |
|
80 | 80 | if isinstance(proj, basestring): |
|
81 | 81 | repo = vcs.get_repo(jn(PROJECT_PATH, proj)) |
|
82 | 82 | proj = proj |
|
83 | 83 | else: |
|
84 | 84 | repo = proj |
|
85 | 85 | proj = repo.name |
|
86 | 86 | |
|
87 | 87 | return repo, proj |
|
88 | 88 | |
|
89 | 89 | |
|
90 | 90 | def test_changelog_walk(proj, pages=100): |
|
91 | 91 | repo, proj = _get_repo(proj) |
|
92 | 92 | |
|
93 | 93 | total_time = 0 |
|
94 | 94 | for i in range(1, pages): |
|
95 | 95 | |
|
96 | 96 | page = '/'.join((proj, 'changelog',)) |
|
97 | 97 | |
|
98 | 98 | full_uri = (BASE_URI % page) + '?' + urllib.urlencode({'page': i}) |
|
99 | 99 | s = time.time() |
|
100 | 100 | f = o.open(full_uri) |
|
101 | 101 | |
|
102 | 102 | assert f.url == full_uri, 'URL:%s does not match %s' % (f.url, full_uri) |
|
103 | 103 | |
|
104 | 104 | size = len(f.read()) |
|
105 | 105 | e = time.time() - s |
|
106 | 106 | total_time += e |
|
107 |
print |
|
|
107 | print('visited %s size:%s req:%s ms' % (full_uri, size, e)) | |
|
108 | 108 | |
|
109 |
print |
|
|
110 |
print |
|
|
109 | print('total_time {}'.format(total_time)) | |
|
110 | print('average on req {}'.format(total_time / float(pages))) | |
|
111 | 111 | |
|
112 | 112 | |
|
113 | 113 | def test_commit_walk(proj, limit=None): |
|
114 | 114 | repo, proj = _get_repo(proj) |
|
115 | 115 | |
|
116 |
print |
|
|
116 | print('processing', jn(PROJECT_PATH, proj)) | |
|
117 | 117 | total_time = 0 |
|
118 | 118 | |
|
119 | 119 | cnt = 0 |
|
120 | 120 | for i in repo: |
|
121 | 121 | cnt += 1 |
|
122 | 122 | raw_cs = '/'.join((proj, 'changeset', i.raw_id)) |
|
123 | 123 | if limit and limit == cnt: |
|
124 | 124 | break |
|
125 | 125 | |
|
126 | 126 | full_uri = (BASE_URI % raw_cs) |
|
127 |
print |
|
|
127 | print('%s visiting %s\%s' % (cnt, full_uri, i)) | |
|
128 | 128 | s = time.time() |
|
129 | 129 | f = o.open(full_uri) |
|
130 | 130 | size = len(f.read()) |
|
131 | 131 | e = time.time() - s |
|
132 | 132 | total_time += e |
|
133 |
print |
|
|
133 | print('%s visited %s\%s size:%s req:%s ms' % (cnt, full_uri, i, size, e)) | |
|
134 | 134 | |
|
135 |
print |
|
|
136 |
print |
|
|
135 | print('total_time {}'.format(total_time)) | |
|
136 | print('average on req {}'.format(total_time / float(cnt))) | |
|
137 | 137 | |
|
138 | 138 | |
|
139 | 139 | def test_files_walk(proj, limit=100): |
|
140 | 140 | repo, proj = _get_repo(proj) |
|
141 | 141 | |
|
142 |
print |
|
|
142 | print('processing {}'.format(jn(PROJECT_PATH, proj))) | |
|
143 | 143 | total_time = 0 |
|
144 | 144 | |
|
145 | 145 | paths_ = OrderedSet(['']) |
|
146 | 146 | try: |
|
147 | 147 | tip = repo.get_commit('tip') |
|
148 | 148 | for topnode, dirs, files in tip.walk('/'): |
|
149 | 149 | |
|
150 | 150 | for dir in dirs: |
|
151 | 151 | paths_.add(dir.path) |
|
152 | 152 | for f in dir: |
|
153 | 153 | paths_.add(f.path) |
|
154 | 154 | |
|
155 | 155 | for f in files: |
|
156 | 156 | paths_.add(f.path) |
|
157 | 157 | |
|
158 | 158 | except RepositoryError as e: |
|
159 | 159 | pass |
|
160 | 160 | |
|
161 | 161 | cnt = 0 |
|
162 | 162 | for f in paths_: |
|
163 | 163 | cnt += 1 |
|
164 | 164 | if limit and limit == cnt: |
|
165 | 165 | break |
|
166 | 166 | |
|
167 | 167 | file_path = '/'.join((proj, 'files', 'tip', f)) |
|
168 | 168 | full_uri = (BASE_URI % file_path) |
|
169 |
print |
|
|
169 | print('%s visiting %s' % (cnt, full_uri)) | |
|
170 | 170 | s = time.time() |
|
171 | 171 | f = o.open(full_uri) |
|
172 | 172 | size = len(f.read()) |
|
173 | 173 | e = time.time() - s |
|
174 | 174 | total_time += e |
|
175 |
print |
|
|
175 | print('%s visited OK size:%s req:%s ms' % (cnt, size, e)) | |
|
176 | 176 | |
|
177 |
print |
|
|
178 |
print |
|
|
177 | print('total_time {}'.format(total_time)) | |
|
178 | print('average on req {}'.format(total_time / float(cnt))) | |
|
179 | 179 | |
|
180 | 180 | if __name__ == '__main__': |
|
181 | 181 | for path in PROJECTS: |
|
182 | 182 | repo = vcs.get_repo(jn(PROJECT_PATH, path)) |
|
183 | 183 | for i in range(PASES): |
|
184 |
print |
|
|
184 | print('PASS %s/%s' % (i, PASES)) | |
|
185 | 185 | test_changelog_walk(repo, pages=80) |
|
186 | 186 | test_commit_walk(repo, limit=100) |
|
187 | 187 | test_files_walk(repo, limit=100) |
General Comments 0
You need to be logged in to leave comments.
Login now