Show More
@@ -1,219 +1,218 | |||||
1 | #!/usr/bin/env python |
|
1 | #!/usr/bin/env python | |
2 | # encoding: utf-8 |
|
2 | # encoding: utf-8 | |
3 | # files controller for pylons |
|
3 | # files controller for pylons | |
4 | # Copyright (C) 2009-2010 Marcin Kuzminski <marcin@python-works.com> |
|
4 | # Copyright (C) 2009-2010 Marcin Kuzminski <marcin@python-works.com> | |
5 |
|
5 | |||
6 | # This program is free software; you can redistribute it and/or |
|
6 | # This program is free software; you can redistribute it and/or | |
7 | # modify it under the terms of the GNU General Public License |
|
7 | # modify it under the terms of the GNU General Public License | |
8 | # as published by the Free Software Foundation; version 2 |
|
8 | # as published by the Free Software Foundation; version 2 | |
9 | # of the License or (at your opinion) any later version of the license. |
|
9 | # of the License or (at your opinion) any later version of the license. | |
10 | # |
|
10 | # | |
11 | # This program is distributed in the hope that it will be useful, |
|
11 | # This program is distributed in the hope that it will be useful, | |
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | # GNU General Public License for more details. |
|
14 | # GNU General Public License for more details. | |
15 | # |
|
15 | # | |
16 | # You should have received a copy of the GNU General Public License |
|
16 | # You should have received a copy of the GNU General Public License | |
17 | # along with this program; if not, write to the Free Software |
|
17 | # along with this program; if not, write to the Free Software | |
18 | # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, |
|
18 | # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, | |
19 | # MA 02110-1301, USA. |
|
19 | # MA 02110-1301, USA. | |
20 | """ |
|
20 | """ | |
21 | Created on April 21, 2010 |
|
21 | Created on April 21, 2010 | |
22 | files controller for pylons |
|
22 | files controller for pylons | |
23 | @author: marcink |
|
23 | @author: marcink | |
24 | """ |
|
24 | """ | |
25 | from mercurial import archival |
|
25 | from mercurial import archival | |
26 | from pylons import request, response, session, tmpl_context as c, url |
|
26 | from pylons import request, response, session, tmpl_context as c, url | |
27 | from pylons.i18n.translation import _ |
|
27 | from pylons.i18n.translation import _ | |
28 | from pylons.controllers.util import redirect |
|
28 | from pylons.controllers.util import redirect | |
29 | from rhodecode.lib.auth import LoginRequired, HasRepoPermissionAnyDecorator |
|
29 | from rhodecode.lib.auth import LoginRequired, HasRepoPermissionAnyDecorator | |
30 | from rhodecode.lib.base import BaseController, render |
|
30 | from rhodecode.lib.base import BaseController, render | |
31 | from rhodecode.lib.utils import EmptyChangeset |
|
31 | from rhodecode.lib.utils import EmptyChangeset | |
32 | from rhodecode.model.hg import HgModel |
|
32 | from rhodecode.model.hg import HgModel | |
33 | from vcs.exceptions import RepositoryError, ChangesetError |
|
33 | from vcs.exceptions import RepositoryError, ChangesetError | |
34 | from vcs.nodes import FileNode |
|
34 | from vcs.nodes import FileNode | |
35 | from vcs.utils import diffs as differ |
|
35 | from vcs.utils import diffs as differ | |
36 | import logging |
|
36 | import logging | |
37 | import rhodecode.lib.helpers as h |
|
37 | import rhodecode.lib.helpers as h | |
38 | import tempfile |
|
38 | import tempfile | |
39 |
|
39 | |||
40 | log = logging.getLogger(__name__) |
|
40 | log = logging.getLogger(__name__) | |
41 |
|
41 | |||
42 | class FilesController(BaseController): |
|
42 | class FilesController(BaseController): | |
43 |
|
43 | |||
44 | @LoginRequired() |
|
44 | @LoginRequired() | |
45 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', |
|
45 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', | |
46 | 'repository.admin') |
|
46 | 'repository.admin') | |
47 | def __before__(self): |
|
47 | def __before__(self): | |
48 | super(FilesController, self).__before__() |
|
48 | super(FilesController, self).__before__() | |
49 | c.file_size_limit = 250 * 1024 #limit of file size to display |
|
49 | c.file_size_limit = 250 * 1024 #limit of file size to display | |
50 |
|
50 | |||
51 | def index(self, repo_name, revision, f_path): |
|
51 | def index(self, repo_name, revision, f_path): | |
52 | hg_model = HgModel() |
|
52 | hg_model = HgModel() | |
53 |
c. |
|
53 | c.repo = hg_model.get_repo(c.repo_name) | |
54 | revision = request.POST.get('at_rev', None) or revision |
|
54 | revision = request.POST.get('at_rev', None) or revision | |
55 |
|
55 | |||
56 | def get_next_rev(cur): |
|
56 | def get_next_rev(cur): | |
57 | max_rev = len(c.repo.revisions) - 1 |
|
57 | max_rev = len(c.repo.revisions) - 1 | |
58 | r = cur + 1 |
|
58 | r = cur + 1 | |
59 | if r > max_rev: |
|
59 | if r > max_rev: | |
60 | r = max_rev |
|
60 | r = max_rev | |
61 | return r |
|
61 | return r | |
62 |
|
62 | |||
63 | def get_prev_rev(cur): |
|
63 | def get_prev_rev(cur): | |
64 | r = cur - 1 |
|
64 | r = cur - 1 | |
65 | return r |
|
65 | return r | |
66 |
|
66 | |||
67 | c.f_path = f_path |
|
67 | c.f_path = f_path | |
68 |
|
68 | |||
69 |
|
69 | |||
70 | try: |
|
70 | try: | |
71 | c.changeset = repo.get_changeset(revision) |
|
71 | c.changeset = c.repo.get_changeset(revision) | |
72 | cur_rev = c.changeset.revision |
|
72 | cur_rev = c.changeset.revision | |
73 | prev_rev = repo.get_changeset(get_prev_rev(cur_rev)).raw_id |
|
73 | prev_rev = c.repo.get_changeset(get_prev_rev(cur_rev)).raw_id | |
74 | next_rev = repo.get_changeset(get_next_rev(cur_rev)).raw_id |
|
74 | next_rev = c.repo.get_changeset(get_next_rev(cur_rev)).raw_id | |
75 |
|
75 | |||
76 | c.url_prev = url('files_home', repo_name=c.repo_name, |
|
76 | c.url_prev = url('files_home', repo_name=c.repo_name, | |
77 | revision=prev_rev, f_path=f_path) |
|
77 | revision=prev_rev, f_path=f_path) | |
78 | c.url_next = url('files_home', repo_name=c.repo_name, |
|
78 | c.url_next = url('files_home', repo_name=c.repo_name, | |
79 | revision=next_rev, f_path=f_path) |
|
79 | revision=next_rev, f_path=f_path) | |
80 |
|
80 | |||
81 | try: |
|
81 | try: | |
82 | c.files_list = c.changeset.get_node(f_path) |
|
82 | c.files_list = c.changeset.get_node(f_path) | |
83 | c.file_history = self._get_history(repo, c.files_list, f_path) |
|
83 | c.file_history = self._get_history(c.repo, c.files_list, f_path) | |
84 |
|
84 | |||
85 | except RepositoryError, e: |
|
85 | except RepositoryError, e: | |
86 | h.flash(str(e), category='warning') |
|
86 | h.flash(str(e), category='warning') | |
87 | redirect(h.url('files_home', repo_name=repo_name, revision=revision)) |
|
87 | redirect(h.url('files_home', repo_name=repo_name, revision=revision)) | |
88 |
|
88 | |||
89 | except RepositoryError, e: |
|
89 | except RepositoryError, e: | |
90 | h.flash(str(e), category='warning') |
|
90 | h.flash(str(e), category='warning') | |
91 | redirect(h.url('files_home', repo_name=repo_name, revision='tip')) |
|
91 | redirect(h.url('files_home', repo_name=repo_name, revision='tip')) | |
92 |
|
92 | |||
93 |
|
93 | |||
94 |
|
94 | |||
95 | return render('files/files.html') |
|
95 | return render('files/files.html') | |
96 |
|
96 | |||
97 | def rawfile(self, repo_name, revision, f_path): |
|
97 | def rawfile(self, repo_name, revision, f_path): | |
98 | hg_model = HgModel() |
|
98 | hg_model = HgModel() | |
99 | c.repo = hg_model.get_repo(c.repo_name) |
|
99 | c.repo = hg_model.get_repo(c.repo_name) | |
100 | file_node = c.repo.get_changeset(revision).get_node(f_path) |
|
100 | file_node = c.repo.get_changeset(revision).get_node(f_path) | |
101 | response.content_type = file_node.mimetype |
|
101 | response.content_type = file_node.mimetype | |
102 | response.content_disposition = 'attachment; filename=%s' \ |
|
102 | response.content_disposition = 'attachment; filename=%s' \ | |
103 | % f_path.split('/')[-1] |
|
103 | % f_path.split('/')[-1] | |
104 | return file_node.content |
|
104 | return file_node.content | |
105 |
|
105 | |||
106 | def raw(self, repo_name, revision, f_path): |
|
106 | def raw(self, repo_name, revision, f_path): | |
107 | hg_model = HgModel() |
|
107 | hg_model = HgModel() | |
108 | c.repo = hg_model.get_repo(c.repo_name) |
|
108 | c.repo = hg_model.get_repo(c.repo_name) | |
109 | file_node = c.repo.get_changeset(revision).get_node(f_path) |
|
109 | file_node = c.repo.get_changeset(revision).get_node(f_path) | |
110 | response.content_type = 'text/plain' |
|
110 | response.content_type = 'text/plain' | |
111 |
|
111 | |||
112 | return file_node.content |
|
112 | return file_node.content | |
113 |
|
113 | |||
114 | def annotate(self, repo_name, revision, f_path): |
|
114 | def annotate(self, repo_name, revision, f_path): | |
115 | hg_model = HgModel() |
|
115 | hg_model = HgModel() | |
116 | c.repo = hg_model.get_repo(c.repo_name) |
|
116 | c.repo = hg_model.get_repo(c.repo_name) | |
117 | cs = c.repo.get_changeset(revision) |
|
117 | c.cs = c.repo.get_changeset(revision) | |
118 | c.file = cs.get_node(f_path) |
|
118 | c.file = c.cs.get_node(f_path) | |
119 |
c.file_ |
|
119 | c.file_history = self._get_history(c.repo, c.file, f_path) | |
120 | c.cur_rev = cs.raw_id |
|
120 | ||
121 | c.rev_nr = cs.revision |
|
|||
122 | c.f_path = f_path |
|
121 | c.f_path = f_path | |
123 |
|
122 | |||
124 | return render('files/files_annotate.html') |
|
123 | return render('files/files_annotate.html') | |
125 |
|
124 | |||
126 | def archivefile(self, repo_name, revision, fileformat): |
|
125 | def archivefile(self, repo_name, revision, fileformat): | |
127 | archive_specs = { |
|
126 | archive_specs = { | |
128 | '.tar.bz2': ('application/x-tar', 'tbz2'), |
|
127 | '.tar.bz2': ('application/x-tar', 'tbz2'), | |
129 | '.tar.gz': ('application/x-tar', 'tgz'), |
|
128 | '.tar.gz': ('application/x-tar', 'tgz'), | |
130 | '.zip': ('application/zip', 'zip'), |
|
129 | '.zip': ('application/zip', 'zip'), | |
131 | } |
|
130 | } | |
132 | if not archive_specs.has_key(fileformat): |
|
131 | if not archive_specs.has_key(fileformat): | |
133 | return 'Unknown archive type %s' % fileformat |
|
132 | return 'Unknown archive type %s' % fileformat | |
134 |
|
133 | |||
135 | def read_in_chunks(file_object, chunk_size=1024 * 40): |
|
134 | def read_in_chunks(file_object, chunk_size=1024 * 40): | |
136 | """Lazy function (generator) to read a file piece by piece. |
|
135 | """Lazy function (generator) to read a file piece by piece. | |
137 | Default chunk size: 40k.""" |
|
136 | Default chunk size: 40k.""" | |
138 | while True: |
|
137 | while True: | |
139 | data = file_object.read(chunk_size) |
|
138 | data = file_object.read(chunk_size) | |
140 | if not data: |
|
139 | if not data: | |
141 | break |
|
140 | break | |
142 | yield data |
|
141 | yield data | |
143 |
|
142 | |||
144 | archive = tempfile.TemporaryFile() |
|
143 | archive = tempfile.TemporaryFile() | |
145 | repo = HgModel().get_repo(repo_name).repo |
|
144 | repo = HgModel().get_repo(repo_name).repo | |
146 | fname = '%s-%s%s' % (repo_name, revision, fileformat) |
|
145 | fname = '%s-%s%s' % (repo_name, revision, fileformat) | |
147 | archival.archive(repo, archive, revision, archive_specs[fileformat][1], |
|
146 | archival.archive(repo, archive, revision, archive_specs[fileformat][1], | |
148 | prefix='%s-%s' % (repo_name, revision)) |
|
147 | prefix='%s-%s' % (repo_name, revision)) | |
149 | response.content_type = archive_specs[fileformat][0] |
|
148 | response.content_type = archive_specs[fileformat][0] | |
150 | response.content_disposition = 'attachment; filename=%s' % fname |
|
149 | response.content_disposition = 'attachment; filename=%s' % fname | |
151 | archive.seek(0) |
|
150 | archive.seek(0) | |
152 | return read_in_chunks(archive) |
|
151 | return read_in_chunks(archive) | |
153 |
|
152 | |||
154 | def diff(self, repo_name, f_path): |
|
153 | def diff(self, repo_name, f_path): | |
155 | hg_model = HgModel() |
|
154 | hg_model = HgModel() | |
156 | diff1 = request.GET.get('diff1') |
|
155 | diff1 = request.GET.get('diff1') | |
157 | diff2 = request.GET.get('diff2') |
|
156 | diff2 = request.GET.get('diff2') | |
158 | c.action = request.GET.get('diff') |
|
157 | c.action = request.GET.get('diff') | |
159 | c.no_changes = diff1 == diff2 |
|
158 | c.no_changes = diff1 == diff2 | |
160 | c.f_path = f_path |
|
159 | c.f_path = f_path | |
161 | c.repo = hg_model.get_repo(c.repo_name) |
|
160 | c.repo = hg_model.get_repo(c.repo_name) | |
162 |
|
161 | |||
163 | try: |
|
162 | try: | |
164 | if diff1 not in ['', None, 'None', '0' * 12, '0' * 40]: |
|
163 | if diff1 not in ['', None, 'None', '0' * 12, '0' * 40]: | |
165 | c.changeset_1 = c.repo.get_changeset(diff1) |
|
164 | c.changeset_1 = c.repo.get_changeset(diff1) | |
166 | node1 = c.changeset_1.get_node(f_path) |
|
165 | node1 = c.changeset_1.get_node(f_path) | |
167 | else: |
|
166 | else: | |
168 | c.changeset_1 = EmptyChangeset() |
|
167 | c.changeset_1 = EmptyChangeset() | |
169 | node1 = FileNode('.', '', changeset=c.changeset_1) |
|
168 | node1 = FileNode('.', '', changeset=c.changeset_1) | |
170 |
|
169 | |||
171 | if diff2 not in ['', None, 'None', '0' * 12, '0' * 40]: |
|
170 | if diff2 not in ['', None, 'None', '0' * 12, '0' * 40]: | |
172 | c.changeset_2 = c.repo.get_changeset(diff2) |
|
171 | c.changeset_2 = c.repo.get_changeset(diff2) | |
173 | node2 = c.changeset_2.get_node(f_path) |
|
172 | node2 = c.changeset_2.get_node(f_path) | |
174 | else: |
|
173 | else: | |
175 | c.changeset_2 = EmptyChangeset() |
|
174 | c.changeset_2 = EmptyChangeset() | |
176 | node2 = FileNode('.', '', changeset=c.changeset_2) |
|
175 | node2 = FileNode('.', '', changeset=c.changeset_2) | |
177 | except RepositoryError: |
|
176 | except RepositoryError: | |
178 | return redirect(url('files_home', |
|
177 | return redirect(url('files_home', | |
179 | repo_name=c.repo_name, f_path=f_path)) |
|
178 | repo_name=c.repo_name, f_path=f_path)) | |
180 |
|
179 | |||
181 | f_udiff = differ.get_udiff(node1, node2) |
|
180 | f_udiff = differ.get_udiff(node1, node2) | |
182 | diff = differ.DiffProcessor(f_udiff) |
|
181 | diff = differ.DiffProcessor(f_udiff) | |
183 |
|
182 | |||
184 | if c.action == 'download': |
|
183 | if c.action == 'download': | |
185 | diff_name = '%s_vs_%s.diff' % (diff1, diff2) |
|
184 | diff_name = '%s_vs_%s.diff' % (diff1, diff2) | |
186 | response.content_type = 'text/plain' |
|
185 | response.content_type = 'text/plain' | |
187 | response.content_disposition = 'attachment; filename=%s' \ |
|
186 | response.content_disposition = 'attachment; filename=%s' \ | |
188 | % diff_name |
|
187 | % diff_name | |
189 | return diff.raw_diff() |
|
188 | return diff.raw_diff() | |
190 |
|
189 | |||
191 | elif c.action == 'raw': |
|
190 | elif c.action == 'raw': | |
192 | response.content_type = 'text/plain' |
|
191 | response.content_type = 'text/plain' | |
193 | return diff.raw_diff() |
|
192 | return diff.raw_diff() | |
194 |
|
193 | |||
195 | elif c.action == 'diff': |
|
194 | elif c.action == 'diff': | |
196 | if node1.size > c.file_size_limit or node2.size > c.file_size_limit: |
|
195 | if node1.size > c.file_size_limit or node2.size > c.file_size_limit: | |
197 | c.cur_diff = _('Diff is to big to display') |
|
196 | c.cur_diff = _('Diff is to big to display') | |
198 | else: |
|
197 | else: | |
199 | c.cur_diff = diff.as_html() |
|
198 | c.cur_diff = diff.as_html() | |
200 | else: |
|
199 | else: | |
201 | #default option |
|
200 | #default option | |
202 | if node1.size > c.file_size_limit or node2.size > c.file_size_limit: |
|
201 | if node1.size > c.file_size_limit or node2.size > c.file_size_limit: | |
203 | c.cur_diff = _('Diff is to big to display') |
|
202 | c.cur_diff = _('Diff is to big to display') | |
204 | else: |
|
203 | else: | |
205 | c.cur_diff = diff.as_html() |
|
204 | c.cur_diff = diff.as_html() | |
206 |
|
205 | |||
207 | if not c.cur_diff: c.no_changes = True |
|
206 | if not c.cur_diff: c.no_changes = True | |
208 | return render('files/file_diff.html') |
|
207 | return render('files/file_diff.html') | |
209 |
|
208 | |||
210 | def _get_history(self, repo, node, f_path): |
|
209 | def _get_history(self, repo, node, f_path): | |
211 | from vcs.nodes import NodeKind |
|
210 | from vcs.nodes import NodeKind | |
212 | if not node.kind is NodeKind.FILE: |
|
211 | if not node.kind is NodeKind.FILE: | |
213 | return [] |
|
212 | return [] | |
214 | changesets = node.history |
|
213 | changesets = node.history | |
215 | hist_l = [] |
|
214 | hist_l = [] | |
216 | for chs in changesets: |
|
215 | for chs in changesets: | |
217 | n_desc = 'r%s:%s' % (chs.revision, chs.short_id) |
|
216 | n_desc = 'r%s:%s' % (chs.revision, chs.short_id) | |
218 | hist_l.append((chs.raw_id, n_desc,)) |
|
217 | hist_l.append((chs.raw_id, n_desc,)) | |
219 | return hist_l |
|
218 | return hist_l |
@@ -1,336 +1,338 | |||||
1 | from celery.decorators import task |
|
1 | from celery.decorators import task | |
2 |
|
2 | |||
3 | from operator import itemgetter |
|
3 | from operator import itemgetter | |
4 | from pylons.i18n.translation import _ |
|
4 | from pylons.i18n.translation import _ | |
5 | from rhodecode.lib.celerylib import run_task, locked_task |
|
5 | from rhodecode.lib.celerylib import run_task, locked_task | |
6 | from rhodecode.lib.helpers import person |
|
6 | from rhodecode.lib.helpers import person | |
7 | from rhodecode.lib.smtp_mailer import SmtpMailer |
|
7 | from rhodecode.lib.smtp_mailer import SmtpMailer | |
8 | from rhodecode.lib.utils import OrderedDict |
|
8 | from rhodecode.lib.utils import OrderedDict | |
9 | from time import mktime |
|
9 | from time import mktime | |
10 | from vcs.backends.hg import MercurialRepository |
|
10 | from vcs.backends.hg import MercurialRepository | |
11 | from vcs.backends.git import GitRepository |
|
11 | from vcs.backends.git import GitRepository | |
12 | import os |
|
12 | import os | |
13 | import traceback |
|
13 | import traceback | |
14 | from vcs.backends import get_repo |
|
14 | from vcs.backends import get_repo | |
15 | from vcs.utils.helpers import get_scm |
|
15 | from vcs.utils.helpers import get_scm | |
16 |
|
16 | |||
17 | try: |
|
17 | try: | |
18 | import json |
|
18 | import json | |
19 | except ImportError: |
|
19 | except ImportError: | |
20 | #python 2.5 compatibility |
|
20 | #python 2.5 compatibility | |
21 | import simplejson as json |
|
21 | import simplejson as json | |
22 |
|
22 | |||
23 | try: |
|
23 | try: | |
24 | from celeryconfig import PYLONS_CONFIG as config |
|
24 | from celeryconfig import PYLONS_CONFIG as config | |
25 | celery_on = True |
|
25 | celery_on = True | |
26 | except ImportError: |
|
26 | except ImportError: | |
27 | #if celeryconfig is not present let's just load our pylons |
|
27 | #if celeryconfig is not present let's just load our pylons | |
28 | #config instead |
|
28 | #config instead | |
29 | from pylons import config |
|
29 | from pylons import config | |
30 | celery_on = False |
|
30 | celery_on = False | |
31 |
|
31 | |||
32 |
|
32 | |||
33 | __all__ = ['whoosh_index', 'get_commits_stats', |
|
33 | __all__ = ['whoosh_index', 'get_commits_stats', | |
34 | 'reset_user_password', 'send_email'] |
|
34 | 'reset_user_password', 'send_email'] | |
35 |
|
35 | |||
36 | def get_session(): |
|
36 | def get_session(): | |
37 | if celery_on: |
|
37 | if celery_on: | |
38 | from sqlalchemy import engine_from_config |
|
38 | from sqlalchemy import engine_from_config | |
39 | from sqlalchemy.orm import sessionmaker, scoped_session |
|
39 | from sqlalchemy.orm import sessionmaker, scoped_session | |
40 | engine = engine_from_config(dict(config.items('app:main')), 'sqlalchemy.db1.') |
|
40 | engine = engine_from_config(dict(config.items('app:main')), 'sqlalchemy.db1.') | |
41 | sa = scoped_session(sessionmaker(bind=engine)) |
|
41 | sa = scoped_session(sessionmaker(bind=engine)) | |
42 | else: |
|
42 | else: | |
43 | #If we don't use celery reuse our current application Session |
|
43 | #If we don't use celery reuse our current application Session | |
44 | from rhodecode.model.meta import Session |
|
44 | from rhodecode.model.meta import Session | |
45 | sa = Session() |
|
45 | sa = Session() | |
46 |
|
46 | |||
47 | return sa |
|
47 | return sa | |
48 |
|
48 | |||
49 | def get_hg_settings(): |
|
49 | def get_hg_settings(): | |
50 | from rhodecode.model.db import RhodeCodeSettings |
|
50 | from rhodecode.model.db import RhodeCodeSettings | |
51 | sa = get_session() |
|
51 | sa = get_session() | |
52 | ret = sa.query(RhodeCodeSettings).all() |
|
52 | ret = sa.query(RhodeCodeSettings).all() | |
53 |
|
53 | |||
54 | if not ret: |
|
54 | if not ret: | |
55 | raise Exception('Could not get application settings !') |
|
55 | raise Exception('Could not get application settings !') | |
56 | settings = {} |
|
56 | settings = {} | |
57 | for each in ret: |
|
57 | for each in ret: | |
58 | settings['rhodecode_' + each.app_settings_name] = each.app_settings_value |
|
58 | settings['rhodecode_' + each.app_settings_name] = each.app_settings_value | |
59 |
|
59 | |||
60 | return settings |
|
60 | return settings | |
61 |
|
61 | |||
62 | def get_hg_ui_settings(): |
|
62 | def get_hg_ui_settings(): | |
63 | from rhodecode.model.db import RhodeCodeUi |
|
63 | from rhodecode.model.db import RhodeCodeUi | |
64 | sa = get_session() |
|
64 | sa = get_session() | |
65 | ret = sa.query(RhodeCodeUi).all() |
|
65 | ret = sa.query(RhodeCodeUi).all() | |
66 |
|
66 | |||
67 | if not ret: |
|
67 | if not ret: | |
68 | raise Exception('Could not get application ui settings !') |
|
68 | raise Exception('Could not get application ui settings !') | |
69 | settings = {} |
|
69 | settings = {} | |
70 | for each in ret: |
|
70 | for each in ret: | |
71 | k = each.ui_key |
|
71 | k = each.ui_key | |
72 | v = each.ui_value |
|
72 | v = each.ui_value | |
73 | if k == '/': |
|
73 | if k == '/': | |
74 | k = 'root_path' |
|
74 | k = 'root_path' | |
75 |
|
75 | |||
76 | if k.find('.') != -1: |
|
76 | if k.find('.') != -1: | |
77 | k = k.replace('.', '_') |
|
77 | k = k.replace('.', '_') | |
78 |
|
78 | |||
79 | if each.ui_section == 'hooks': |
|
79 | if each.ui_section == 'hooks': | |
80 | v = each.ui_active |
|
80 | v = each.ui_active | |
81 |
|
81 | |||
82 | settings[each.ui_section + '_' + k] = v |
|
82 | settings[each.ui_section + '_' + k] = v | |
83 |
|
83 | |||
84 | return settings |
|
84 | return settings | |
85 |
|
85 | |||
86 | @task |
|
86 | @task | |
87 | @locked_task |
|
87 | @locked_task | |
88 | def whoosh_index(repo_location, full_index): |
|
88 | def whoosh_index(repo_location, full_index): | |
89 | log = whoosh_index.get_logger() |
|
89 | log = whoosh_index.get_logger() | |
90 | from rhodecode.lib.indexers.daemon import WhooshIndexingDaemon |
|
90 | from rhodecode.lib.indexers.daemon import WhooshIndexingDaemon | |
91 | WhooshIndexingDaemon(repo_location=repo_location).run(full_index=full_index) |
|
91 | index_location = '' | |
|
92 | WhooshIndexingDaemon(index_location=index_location, | |||
|
93 | repo_location=repo_location).run(full_index=full_index) | |||
92 |
|
94 | |||
93 | @task |
|
95 | @task | |
94 | @locked_task |
|
96 | @locked_task | |
95 | def get_commits_stats(repo_name, ts_min_y, ts_max_y): |
|
97 | def get_commits_stats(repo_name, ts_min_y, ts_max_y): | |
96 | from rhodecode.model.db import Statistics, Repository |
|
98 | from rhodecode.model.db import Statistics, Repository | |
97 | log = get_commits_stats.get_logger() |
|
99 | log = get_commits_stats.get_logger() | |
98 | author_key_cleaner = lambda k: person(k).replace('"', "") #for js data compatibilty |
|
100 | author_key_cleaner = lambda k: person(k).replace('"', "") #for js data compatibilty | |
99 |
|
101 | |||
100 | commits_by_day_author_aggregate = {} |
|
102 | commits_by_day_author_aggregate = {} | |
101 | commits_by_day_aggregate = {} |
|
103 | commits_by_day_aggregate = {} | |
102 | repos_path = get_hg_ui_settings()['paths_root_path'] |
|
104 | repos_path = get_hg_ui_settings()['paths_root_path'] | |
103 | p = os.path.join(repos_path, repo_name) |
|
105 | p = os.path.join(repos_path, repo_name) | |
104 | repo = get_repo(p) |
|
106 | repo = get_repo(p) | |
105 |
|
107 | |||
106 | skip_date_limit = True |
|
108 | skip_date_limit = True | |
107 | parse_limit = 250 #limit for single task changeset parsing optimal for |
|
109 | parse_limit = 250 #limit for single task changeset parsing optimal for | |
108 | last_rev = 0 |
|
110 | last_rev = 0 | |
109 | last_cs = None |
|
111 | last_cs = None | |
110 | timegetter = itemgetter('time') |
|
112 | timegetter = itemgetter('time') | |
111 |
|
113 | |||
112 | sa = get_session() |
|
114 | sa = get_session() | |
113 |
|
115 | |||
114 | dbrepo = sa.query(Repository)\ |
|
116 | dbrepo = sa.query(Repository)\ | |
115 | .filter(Repository.repo_name == repo_name).scalar() |
|
117 | .filter(Repository.repo_name == repo_name).scalar() | |
116 | cur_stats = sa.query(Statistics)\ |
|
118 | cur_stats = sa.query(Statistics)\ | |
117 | .filter(Statistics.repository == dbrepo).scalar() |
|
119 | .filter(Statistics.repository == dbrepo).scalar() | |
118 | if cur_stats: |
|
120 | if cur_stats: | |
119 | last_rev = cur_stats.stat_on_revision |
|
121 | last_rev = cur_stats.stat_on_revision | |
120 | if not repo.revisions: |
|
122 | if not repo.revisions: | |
121 | return True |
|
123 | return True | |
122 |
|
124 | |||
123 | if last_rev == repo.revisions[-1] and len(repo.revisions) > 1: |
|
125 | if last_rev == repo.revisions[-1] and len(repo.revisions) > 1: | |
124 | #pass silently without any work if we're not on first revision or current |
|
126 | #pass silently without any work if we're not on first revision or current | |
125 | #state of parsing revision(from db marker) is the last revision |
|
127 | #state of parsing revision(from db marker) is the last revision | |
126 | return True |
|
128 | return True | |
127 |
|
129 | |||
128 | if cur_stats: |
|
130 | if cur_stats: | |
129 | commits_by_day_aggregate = OrderedDict( |
|
131 | commits_by_day_aggregate = OrderedDict( | |
130 | json.loads( |
|
132 | json.loads( | |
131 | cur_stats.commit_activity_combined)) |
|
133 | cur_stats.commit_activity_combined)) | |
132 | commits_by_day_author_aggregate = json.loads(cur_stats.commit_activity) |
|
134 | commits_by_day_author_aggregate = json.loads(cur_stats.commit_activity) | |
133 |
|
135 | |||
134 | log.debug('starting parsing %s', parse_limit) |
|
136 | log.debug('starting parsing %s', parse_limit) | |
135 | lmktime = mktime |
|
137 | lmktime = mktime | |
136 |
|
138 | |||
137 | for cnt, rev in enumerate(repo.revisions[last_rev:]): |
|
139 | for cnt, rev in enumerate(repo.revisions[last_rev:]): | |
138 | last_cs = cs = repo.get_changeset(rev) |
|
140 | last_cs = cs = repo.get_changeset(rev) | |
139 | k = '%s-%s-%s' % (cs.date.timetuple()[0], cs.date.timetuple()[1], |
|
141 | k = '%s-%s-%s' % (cs.date.timetuple()[0], cs.date.timetuple()[1], | |
140 | cs.date.timetuple()[2]) |
|
142 | cs.date.timetuple()[2]) | |
141 | timetupple = [int(x) for x in k.split('-')] |
|
143 | timetupple = [int(x) for x in k.split('-')] | |
142 | timetupple.extend([0 for _ in xrange(6)]) |
|
144 | timetupple.extend([0 for _ in xrange(6)]) | |
143 | k = lmktime(timetupple) |
|
145 | k = lmktime(timetupple) | |
144 | if commits_by_day_author_aggregate.has_key(author_key_cleaner(cs.author)): |
|
146 | if commits_by_day_author_aggregate.has_key(author_key_cleaner(cs.author)): | |
145 | try: |
|
147 | try: | |
146 | l = [timegetter(x) for x in commits_by_day_author_aggregate\ |
|
148 | l = [timegetter(x) for x in commits_by_day_author_aggregate\ | |
147 | [author_key_cleaner(cs.author)]['data']] |
|
149 | [author_key_cleaner(cs.author)]['data']] | |
148 | time_pos = l.index(k) |
|
150 | time_pos = l.index(k) | |
149 | except ValueError: |
|
151 | except ValueError: | |
150 | time_pos = False |
|
152 | time_pos = False | |
151 |
|
153 | |||
152 | if time_pos >= 0 and time_pos is not False: |
|
154 | if time_pos >= 0 and time_pos is not False: | |
153 |
|
155 | |||
154 | datadict = commits_by_day_author_aggregate\ |
|
156 | datadict = commits_by_day_author_aggregate\ | |
155 | [author_key_cleaner(cs.author)]['data'][time_pos] |
|
157 | [author_key_cleaner(cs.author)]['data'][time_pos] | |
156 |
|
158 | |||
157 | datadict["commits"] += 1 |
|
159 | datadict["commits"] += 1 | |
158 | datadict["added"] += len(cs.added) |
|
160 | datadict["added"] += len(cs.added) | |
159 | datadict["changed"] += len(cs.changed) |
|
161 | datadict["changed"] += len(cs.changed) | |
160 | datadict["removed"] += len(cs.removed) |
|
162 | datadict["removed"] += len(cs.removed) | |
161 |
|
163 | |||
162 | else: |
|
164 | else: | |
163 | if k >= ts_min_y and k <= ts_max_y or skip_date_limit: |
|
165 | if k >= ts_min_y and k <= ts_max_y or skip_date_limit: | |
164 |
|
166 | |||
165 | datadict = {"time":k, |
|
167 | datadict = {"time":k, | |
166 | "commits":1, |
|
168 | "commits":1, | |
167 | "added":len(cs.added), |
|
169 | "added":len(cs.added), | |
168 | "changed":len(cs.changed), |
|
170 | "changed":len(cs.changed), | |
169 | "removed":len(cs.removed), |
|
171 | "removed":len(cs.removed), | |
170 | } |
|
172 | } | |
171 | commits_by_day_author_aggregate\ |
|
173 | commits_by_day_author_aggregate\ | |
172 | [author_key_cleaner(cs.author)]['data'].append(datadict) |
|
174 | [author_key_cleaner(cs.author)]['data'].append(datadict) | |
173 |
|
175 | |||
174 | else: |
|
176 | else: | |
175 | if k >= ts_min_y and k <= ts_max_y or skip_date_limit: |
|
177 | if k >= ts_min_y and k <= ts_max_y or skip_date_limit: | |
176 | commits_by_day_author_aggregate[author_key_cleaner(cs.author)] = { |
|
178 | commits_by_day_author_aggregate[author_key_cleaner(cs.author)] = { | |
177 | "label":author_key_cleaner(cs.author), |
|
179 | "label":author_key_cleaner(cs.author), | |
178 | "data":[{"time":k, |
|
180 | "data":[{"time":k, | |
179 | "commits":1, |
|
181 | "commits":1, | |
180 | "added":len(cs.added), |
|
182 | "added":len(cs.added), | |
181 | "changed":len(cs.changed), |
|
183 | "changed":len(cs.changed), | |
182 | "removed":len(cs.removed), |
|
184 | "removed":len(cs.removed), | |
183 | }], |
|
185 | }], | |
184 | "schema":["commits"], |
|
186 | "schema":["commits"], | |
185 | } |
|
187 | } | |
186 |
|
188 | |||
187 | #gather all data by day |
|
189 | #gather all data by day | |
188 | if commits_by_day_aggregate.has_key(k): |
|
190 | if commits_by_day_aggregate.has_key(k): | |
189 | commits_by_day_aggregate[k] += 1 |
|
191 | commits_by_day_aggregate[k] += 1 | |
190 | else: |
|
192 | else: | |
191 | commits_by_day_aggregate[k] = 1 |
|
193 | commits_by_day_aggregate[k] = 1 | |
192 |
|
194 | |||
193 | if cnt >= parse_limit: |
|
195 | if cnt >= parse_limit: | |
194 | #don't fetch to much data since we can freeze application |
|
196 | #don't fetch to much data since we can freeze application | |
195 | break |
|
197 | break | |
196 | overview_data = [] |
|
198 | overview_data = [] | |
197 | for k, v in commits_by_day_aggregate.items(): |
|
199 | for k, v in commits_by_day_aggregate.items(): | |
198 | overview_data.append([k, v]) |
|
200 | overview_data.append([k, v]) | |
199 | overview_data = sorted(overview_data, key=itemgetter(0)) |
|
201 | overview_data = sorted(overview_data, key=itemgetter(0)) | |
200 | if not commits_by_day_author_aggregate: |
|
202 | if not commits_by_day_author_aggregate: | |
201 | commits_by_day_author_aggregate[author_key_cleaner(repo.contact)] = { |
|
203 | commits_by_day_author_aggregate[author_key_cleaner(repo.contact)] = { | |
202 | "label":author_key_cleaner(repo.contact), |
|
204 | "label":author_key_cleaner(repo.contact), | |
203 | "data":[0, 1], |
|
205 | "data":[0, 1], | |
204 | "schema":["commits"], |
|
206 | "schema":["commits"], | |
205 | } |
|
207 | } | |
206 |
|
208 | |||
207 | stats = cur_stats if cur_stats else Statistics() |
|
209 | stats = cur_stats if cur_stats else Statistics() | |
208 | stats.commit_activity = json.dumps(commits_by_day_author_aggregate) |
|
210 | stats.commit_activity = json.dumps(commits_by_day_author_aggregate) | |
209 | stats.commit_activity_combined = json.dumps(overview_data) |
|
211 | stats.commit_activity_combined = json.dumps(overview_data) | |
210 |
|
212 | |||
211 | log.debug('last revison %s', last_rev) |
|
213 | log.debug('last revison %s', last_rev) | |
212 | leftovers = len(repo.revisions[last_rev:]) |
|
214 | leftovers = len(repo.revisions[last_rev:]) | |
213 | log.debug('revisions to parse %s', leftovers) |
|
215 | log.debug('revisions to parse %s', leftovers) | |
214 |
|
216 | |||
215 | if last_rev == 0 or leftovers < parse_limit: |
|
217 | if last_rev == 0 or leftovers < parse_limit: | |
216 | stats.languages = json.dumps(__get_codes_stats(repo_name)) |
|
218 | stats.languages = json.dumps(__get_codes_stats(repo_name)) | |
217 |
|
219 | |||
218 | stats.repository = dbrepo |
|
220 | stats.repository = dbrepo | |
219 | stats.stat_on_revision = last_cs.revision |
|
221 | stats.stat_on_revision = last_cs.revision | |
220 |
|
222 | |||
221 | try: |
|
223 | try: | |
222 | sa.add(stats) |
|
224 | sa.add(stats) | |
223 | sa.commit() |
|
225 | sa.commit() | |
224 | except: |
|
226 | except: | |
225 | log.error(traceback.format_exc()) |
|
227 | log.error(traceback.format_exc()) | |
226 | sa.rollback() |
|
228 | sa.rollback() | |
227 | return False |
|
229 | return False | |
228 | if len(repo.revisions) > 1: |
|
230 | if len(repo.revisions) > 1: | |
229 | run_task(get_commits_stats, repo_name, ts_min_y, ts_max_y) |
|
231 | run_task(get_commits_stats, repo_name, ts_min_y, ts_max_y) | |
230 |
|
232 | |||
231 | return True |
|
233 | return True | |
232 |
|
234 | |||
233 | @task |
|
235 | @task | |
234 | def reset_user_password(user_email): |
|
236 | def reset_user_password(user_email): | |
235 | log = reset_user_password.get_logger() |
|
237 | log = reset_user_password.get_logger() | |
236 | from rhodecode.lib import auth |
|
238 | from rhodecode.lib import auth | |
237 | from rhodecode.model.db import User |
|
239 | from rhodecode.model.db import User | |
238 |
|
240 | |||
239 | try: |
|
241 | try: | |
240 | try: |
|
242 | try: | |
241 | sa = get_session() |
|
243 | sa = get_session() | |
242 | user = sa.query(User).filter(User.email == user_email).scalar() |
|
244 | user = sa.query(User).filter(User.email == user_email).scalar() | |
243 | new_passwd = auth.PasswordGenerator().gen_password(8, |
|
245 | new_passwd = auth.PasswordGenerator().gen_password(8, | |
244 | auth.PasswordGenerator.ALPHABETS_BIG_SMALL) |
|
246 | auth.PasswordGenerator.ALPHABETS_BIG_SMALL) | |
245 | if user: |
|
247 | if user: | |
246 | user.password = auth.get_crypt_password(new_passwd) |
|
248 | user.password = auth.get_crypt_password(new_passwd) | |
247 | sa.add(user) |
|
249 | sa.add(user) | |
248 | sa.commit() |
|
250 | sa.commit() | |
249 | log.info('change password for %s', user_email) |
|
251 | log.info('change password for %s', user_email) | |
250 | if new_passwd is None: |
|
252 | if new_passwd is None: | |
251 | raise Exception('unable to generate new password') |
|
253 | raise Exception('unable to generate new password') | |
252 |
|
254 | |||
253 | except: |
|
255 | except: | |
254 | log.error(traceback.format_exc()) |
|
256 | log.error(traceback.format_exc()) | |
255 | sa.rollback() |
|
257 | sa.rollback() | |
256 |
|
258 | |||
257 | run_task(send_email, user_email, |
|
259 | run_task(send_email, user_email, | |
258 | "Your new rhodecode password", |
|
260 | "Your new rhodecode password", | |
259 | 'Your new rhodecode password:%s' % (new_passwd)) |
|
261 | 'Your new rhodecode password:%s' % (new_passwd)) | |
260 | log.info('send new password mail to %s', user_email) |
|
262 | log.info('send new password mail to %s', user_email) | |
261 |
|
263 | |||
262 |
|
264 | |||
263 | except: |
|
265 | except: | |
264 | log.error('Failed to update user password') |
|
266 | log.error('Failed to update user password') | |
265 | log.error(traceback.format_exc()) |
|
267 | log.error(traceback.format_exc()) | |
266 | return True |
|
268 | return True | |
267 |
|
269 | |||
268 | @task |
|
270 | @task | |
269 | def send_email(recipients, subject, body): |
|
271 | def send_email(recipients, subject, body): | |
270 | log = send_email.get_logger() |
|
272 | log = send_email.get_logger() | |
271 | email_config = dict(config.items('DEFAULT')) |
|
273 | email_config = dict(config.items('DEFAULT')) | |
272 | mail_from = email_config.get('app_email_from') |
|
274 | mail_from = email_config.get('app_email_from') | |
273 | user = email_config.get('smtp_username') |
|
275 | user = email_config.get('smtp_username') | |
274 | passwd = email_config.get('smtp_password') |
|
276 | passwd = email_config.get('smtp_password') | |
275 | mail_server = email_config.get('smtp_server') |
|
277 | mail_server = email_config.get('smtp_server') | |
276 | mail_port = email_config.get('smtp_port') |
|
278 | mail_port = email_config.get('smtp_port') | |
277 | tls = email_config.get('smtp_use_tls') |
|
279 | tls = email_config.get('smtp_use_tls') | |
278 | ssl = False |
|
280 | ssl = False | |
279 |
|
281 | |||
280 | try: |
|
282 | try: | |
281 | m = SmtpMailer(mail_from, user, passwd, mail_server, |
|
283 | m = SmtpMailer(mail_from, user, passwd, mail_server, | |
282 | mail_port, ssl, tls) |
|
284 | mail_port, ssl, tls) | |
283 | m.send(recipients, subject, body) |
|
285 | m.send(recipients, subject, body) | |
284 | except: |
|
286 | except: | |
285 | log.error('Mail sending failed') |
|
287 | log.error('Mail sending failed') | |
286 | log.error(traceback.format_exc()) |
|
288 | log.error(traceback.format_exc()) | |
287 | return False |
|
289 | return False | |
288 | return True |
|
290 | return True | |
289 |
|
291 | |||
290 | @task |
|
292 | @task | |
291 | def create_repo_fork(form_data, cur_user): |
|
293 | def create_repo_fork(form_data, cur_user): | |
292 | from rhodecode.model.repo import RepoModel |
|
294 | from rhodecode.model.repo import RepoModel | |
293 | from vcs import get_backend |
|
295 | from vcs import get_backend | |
294 | log = create_repo_fork.get_logger() |
|
296 | log = create_repo_fork.get_logger() | |
295 | repo_model = RepoModel(get_session()) |
|
297 | repo_model = RepoModel(get_session()) | |
296 | repo_model.create(form_data, cur_user, just_db=True, fork=True) |
|
298 | repo_model.create(form_data, cur_user, just_db=True, fork=True) | |
297 | repo_name = form_data['repo_name'] |
|
299 | repo_name = form_data['repo_name'] | |
298 | repos_path = get_hg_ui_settings()['paths_root_path'] |
|
300 | repos_path = get_hg_ui_settings()['paths_root_path'] | |
299 | repo_path = os.path.join(repos_path, repo_name) |
|
301 | repo_path = os.path.join(repos_path, repo_name) | |
300 | repo_fork_path = os.path.join(repos_path, form_data['fork_name']) |
|
302 | repo_fork_path = os.path.join(repos_path, form_data['fork_name']) | |
301 | alias = form_data['repo_type'] |
|
303 | alias = form_data['repo_type'] | |
302 |
|
304 | |||
303 | log.info('creating repo fork %s as %s', repo_name, repo_path) |
|
305 | log.info('creating repo fork %s as %s', repo_name, repo_path) | |
304 | backend = get_backend(alias) |
|
306 | backend = get_backend(alias) | |
305 | backend(str(repo_fork_path), create=True, src_url=str(repo_path)) |
|
307 | backend(str(repo_fork_path), create=True, src_url=str(repo_path)) | |
306 |
|
308 | |||
307 | def __get_codes_stats(repo_name): |
|
309 | def __get_codes_stats(repo_name): | |
308 | LANGUAGES_EXTENSIONS = ['action', 'adp', 'ashx', 'asmx', |
|
310 | LANGUAGES_EXTENSIONS = ['action', 'adp', 'ashx', 'asmx', | |
309 | 'aspx', 'asx', 'axd', 'c', 'cfg', 'cfm', 'cpp', 'cs', 'diff', 'do', 'el', |
|
311 | 'aspx', 'asx', 'axd', 'c', 'cfg', 'cfm', 'cpp', 'cs', 'diff', 'do', 'el', | |
310 | 'erl', 'h', 'java', 'js', 'jsp', 'jspx', 'lisp', 'lua', 'm', 'mako', 'ml', |
|
312 | 'erl', 'h', 'java', 'js', 'jsp', 'jspx', 'lisp', 'lua', 'm', 'mako', 'ml', | |
311 | 'pas', 'patch', 'php', 'php3', 'php4', 'phtml', 'pm', 'py', 'rb', 'rst', |
|
313 | 'pas', 'patch', 'php', 'php3', 'php4', 'phtml', 'pm', 'py', 'rb', 'rst', | |
312 | 's', 'sh', 'tpl', 'txt', 'vim', 'wss', 'xhtml', 'xml', 'xsl', 'xslt', 'yaws'] |
|
314 | 's', 'sh', 'tpl', 'txt', 'vim', 'wss', 'xhtml', 'xml', 'xsl', 'xslt', 'yaws'] | |
313 |
|
315 | |||
314 |
|
316 | |||
315 | repos_path = get_hg_ui_settings()['paths_root_path'] |
|
317 | repos_path = get_hg_ui_settings()['paths_root_path'] | |
316 | p = os.path.join(repos_path, repo_name) |
|
318 | p = os.path.join(repos_path, repo_name) | |
317 | repo = get_repo(p) |
|
319 | repo = get_repo(p) | |
318 | tip = repo.get_changeset() |
|
320 | tip = repo.get_changeset() | |
319 | code_stats = {} |
|
321 | code_stats = {} | |
320 |
|
322 | |||
321 | def aggregate(cs): |
|
323 | def aggregate(cs): | |
322 | for f in cs[2]: |
|
324 | for f in cs[2]: | |
323 | k = f.mimetype |
|
325 | k = f.mimetype | |
324 | if f.extension in LANGUAGES_EXTENSIONS: |
|
326 | if f.extension in LANGUAGES_EXTENSIONS: | |
325 | if code_stats.has_key(k): |
|
327 | if code_stats.has_key(k): | |
326 | code_stats[k] += 1 |
|
328 | code_stats[k] += 1 | |
327 | else: |
|
329 | else: | |
328 | code_stats[k] = 1 |
|
330 | code_stats[k] = 1 | |
329 |
|
331 | |||
330 | map(aggregate, tip.walk('/')) |
|
332 | map(aggregate, tip.walk('/')) | |
331 |
|
333 | |||
332 | return code_stats or {} |
|
334 | return code_stats or {} | |
333 |
|
335 | |||
334 |
|
336 | |||
335 |
|
337 | |||
336 |
|
338 |
@@ -1,445 +1,445 | |||||
1 | """Helper functions |
|
1 | """Helper functions | |
2 |
|
2 | |||
3 | Consists of functions to typically be used within templates, but also |
|
3 | Consists of functions to typically be used within templates, but also | |
4 | available to Controllers. This module is available to both as 'h'. |
|
4 | available to Controllers. This module is available to both as 'h'. | |
5 | """ |
|
5 | """ | |
6 | from pygments.formatters import HtmlFormatter |
|
6 | from pygments.formatters import HtmlFormatter | |
7 | from pygments import highlight as code_highlight |
|
7 | from pygments import highlight as code_highlight | |
8 | from pylons import url, app_globals as g |
|
8 | from pylons import url, app_globals as g | |
9 | from pylons.i18n.translation import _, ungettext |
|
9 | from pylons.i18n.translation import _, ungettext | |
10 | from vcs.utils.annotate import annotate_highlight |
|
10 | from vcs.utils.annotate import annotate_highlight | |
11 | from webhelpers.html import literal, HTML, escape |
|
11 | from webhelpers.html import literal, HTML, escape | |
12 | from webhelpers.html.tools import * |
|
12 | from webhelpers.html.tools import * | |
13 | from webhelpers.html.builder import make_tag |
|
13 | from webhelpers.html.builder import make_tag | |
14 | from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \ |
|
14 | from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \ | |
15 | end_form, file, form, hidden, image, javascript_link, link_to, link_to_if, \ |
|
15 | end_form, file, form, hidden, image, javascript_link, link_to, link_to_if, \ | |
16 | link_to_unless, ol, required_legend, select, stylesheet_link, submit, text, \ |
|
16 | link_to_unless, ol, required_legend, select, stylesheet_link, submit, text, \ | |
17 | password, textarea, title, ul, xml_declaration, radio |
|
17 | password, textarea, title, ul, xml_declaration, radio | |
18 | from webhelpers.html.tools import auto_link, button_to, highlight, js_obfuscate, \ |
|
18 | from webhelpers.html.tools import auto_link, button_to, highlight, js_obfuscate, \ | |
19 | mail_to, strip_links, strip_tags, tag_re |
|
19 | mail_to, strip_links, strip_tags, tag_re | |
20 | from webhelpers.number import format_byte_size, format_bit_size |
|
20 | from webhelpers.number import format_byte_size, format_bit_size | |
21 | from webhelpers.pylonslib import Flash as _Flash |
|
21 | from webhelpers.pylonslib import Flash as _Flash | |
22 | from webhelpers.pylonslib.secure_form import secure_form |
|
22 | from webhelpers.pylonslib.secure_form import secure_form | |
23 | from webhelpers.text import chop_at, collapse, convert_accented_entities, \ |
|
23 | from webhelpers.text import chop_at, collapse, convert_accented_entities, \ | |
24 | convert_misc_entities, lchop, plural, rchop, remove_formatting, \ |
|
24 | convert_misc_entities, lchop, plural, rchop, remove_formatting, \ | |
25 | replace_whitespace, urlify, truncate, wrap_paragraphs |
|
25 | replace_whitespace, urlify, truncate, wrap_paragraphs | |
26 | from webhelpers.date import time_ago_in_words |
|
26 | from webhelpers.date import time_ago_in_words | |
27 |
|
27 | |||
28 | #Custom helpers here :) |
|
28 | #Custom helpers here :) | |
29 | class _Link(object): |
|
29 | class _Link(object): | |
30 | ''' |
|
30 | ''' | |
31 | Make a url based on label and url with help of url_for |
|
31 | Make a url based on label and url with help of url_for | |
32 | :param label:name of link if not defined url is used |
|
32 | :param label:name of link if not defined url is used | |
33 | :param url: the url for link |
|
33 | :param url: the url for link | |
34 | ''' |
|
34 | ''' | |
35 |
|
35 | |||
36 | def __call__(self, label='', *url_, **urlargs): |
|
36 | def __call__(self, label='', *url_, **urlargs): | |
37 | if label is None or '': |
|
37 | if label is None or '': | |
38 | label = url |
|
38 | label = url | |
39 | link_fn = link_to(label, url(*url_, **urlargs)) |
|
39 | link_fn = link_to(label, url(*url_, **urlargs)) | |
40 | return link_fn |
|
40 | return link_fn | |
41 |
|
41 | |||
42 | link = _Link() |
|
42 | link = _Link() | |
43 |
|
43 | |||
44 | class _GetError(object): |
|
44 | class _GetError(object): | |
45 |
|
45 | |||
46 | def __call__(self, field_name, form_errors): |
|
46 | def __call__(self, field_name, form_errors): | |
47 | tmpl = """<span class="error_msg">%s</span>""" |
|
47 | tmpl = """<span class="error_msg">%s</span>""" | |
48 | if form_errors and form_errors.has_key(field_name): |
|
48 | if form_errors and form_errors.has_key(field_name): | |
49 | return literal(tmpl % form_errors.get(field_name)) |
|
49 | return literal(tmpl % form_errors.get(field_name)) | |
50 |
|
50 | |||
51 | get_error = _GetError() |
|
51 | get_error = _GetError() | |
52 |
|
52 | |||
53 | def recursive_replace(str, replace=' '): |
|
53 | def recursive_replace(str, replace=' '): | |
54 | """ |
|
54 | """ | |
55 | Recursive replace of given sign to just one instance |
|
55 | Recursive replace of given sign to just one instance | |
56 | :param str: given string |
|
56 | :param str: given string | |
57 | :param replace:char to find and replace multiple instances |
|
57 | :param replace:char to find and replace multiple instances | |
58 |
|
58 | |||
59 | Examples:: |
|
59 | Examples:: | |
60 | >>> recursive_replace("Mighty---Mighty-Bo--sstones",'-') |
|
60 | >>> recursive_replace("Mighty---Mighty-Bo--sstones",'-') | |
61 | 'Mighty-Mighty-Bo-sstones' |
|
61 | 'Mighty-Mighty-Bo-sstones' | |
62 | """ |
|
62 | """ | |
63 |
|
63 | |||
64 | if str.find(replace * 2) == -1: |
|
64 | if str.find(replace * 2) == -1: | |
65 | return str |
|
65 | return str | |
66 | else: |
|
66 | else: | |
67 | str = str.replace(replace * 2, replace) |
|
67 | str = str.replace(replace * 2, replace) | |
68 | return recursive_replace(str, replace) |
|
68 | return recursive_replace(str, replace) | |
69 |
|
69 | |||
70 | class _ToolTip(object): |
|
70 | class _ToolTip(object): | |
71 |
|
71 | |||
72 | def __call__(self, tooltip_title, trim_at=50): |
|
72 | def __call__(self, tooltip_title, trim_at=50): | |
73 | """ |
|
73 | """ | |
74 | Special function just to wrap our text into nice formatted autowrapped |
|
74 | Special function just to wrap our text into nice formatted autowrapped | |
75 | text |
|
75 | text | |
76 | :param tooltip_title: |
|
76 | :param tooltip_title: | |
77 | """ |
|
77 | """ | |
78 |
|
78 | |||
79 | return wrap_paragraphs(escape(tooltip_title), trim_at)\ |
|
79 | return wrap_paragraphs(escape(tooltip_title), trim_at)\ | |
80 | .replace('\n', '<br/>') |
|
80 | .replace('\n', '<br/>') | |
81 |
|
81 | |||
82 | def activate(self): |
|
82 | def activate(self): | |
83 | """ |
|
83 | """ | |
84 | Adds tooltip mechanism to the given Html all tooltips have to have |
|
84 | Adds tooltip mechanism to the given Html all tooltips have to have | |
85 | set class tooltip and set attribute tooltip_title. |
|
85 | set class tooltip and set attribute tooltip_title. | |
86 | Then a tooltip will be generated based on that |
|
86 | Then a tooltip will be generated based on that | |
87 | All with yui js tooltip |
|
87 | All with yui js tooltip | |
88 | """ |
|
88 | """ | |
89 |
|
89 | |||
90 | js = ''' |
|
90 | js = ''' | |
91 | YAHOO.util.Event.onDOMReady(function(){ |
|
91 | YAHOO.util.Event.onDOMReady(function(){ | |
92 | function toolTipsId(){ |
|
92 | function toolTipsId(){ | |
93 | var ids = []; |
|
93 | var ids = []; | |
94 | var tts = YAHOO.util.Dom.getElementsByClassName('tooltip'); |
|
94 | var tts = YAHOO.util.Dom.getElementsByClassName('tooltip'); | |
95 |
|
95 | |||
96 | for (var i = 0; i < tts.length; i++) { |
|
96 | for (var i = 0; i < tts.length; i++) { | |
97 | //if element doesn not have and id autgenerate one for tooltip |
|
97 | //if element doesn not have and id autgenerate one for tooltip | |
98 |
|
98 | |||
99 | if (!tts[i].id){ |
|
99 | if (!tts[i].id){ | |
100 | tts[i].id='tt'+i*100; |
|
100 | tts[i].id='tt'+i*100; | |
101 | } |
|
101 | } | |
102 | ids.push(tts[i].id); |
|
102 | ids.push(tts[i].id); | |
103 | } |
|
103 | } | |
104 | return ids |
|
104 | return ids | |
105 | }; |
|
105 | }; | |
106 | var myToolTips = new YAHOO.widget.Tooltip("tooltip", { |
|
106 | var myToolTips = new YAHOO.widget.Tooltip("tooltip", { | |
107 | context: toolTipsId(), |
|
107 | context: toolTipsId(), | |
108 | monitorresize:false, |
|
108 | monitorresize:false, | |
109 | xyoffset :[0,0], |
|
109 | xyoffset :[0,0], | |
110 | autodismissdelay:300000, |
|
110 | autodismissdelay:300000, | |
111 | hidedelay:5, |
|
111 | hidedelay:5, | |
112 | showdelay:20, |
|
112 | showdelay:20, | |
113 | }); |
|
113 | }); | |
114 |
|
114 | |||
115 | //Mouse Over event disabled for new repositories since they dont |
|
115 | //Mouse Over event disabled for new repositories since they dont | |
116 | //have last commit message |
|
116 | //have last commit message | |
117 | myToolTips.contextMouseOverEvent.subscribe( |
|
117 | myToolTips.contextMouseOverEvent.subscribe( | |
118 | function(type, args) { |
|
118 | function(type, args) { | |
119 | var context = args[0]; |
|
119 | var context = args[0]; | |
120 | var txt = context.getAttribute('tooltip_title'); |
|
120 | var txt = context.getAttribute('tooltip_title'); | |
121 | if(txt){ |
|
121 | if(txt){ | |
122 | return true; |
|
122 | return true; | |
123 | } |
|
123 | } | |
124 | else{ |
|
124 | else{ | |
125 | return false; |
|
125 | return false; | |
126 | } |
|
126 | } | |
127 | }); |
|
127 | }); | |
128 |
|
128 | |||
129 |
|
129 | |||
130 | // Set the text for the tooltip just before we display it. Lazy method |
|
130 | // Set the text for the tooltip just before we display it. Lazy method | |
131 | myToolTips.contextTriggerEvent.subscribe( |
|
131 | myToolTips.contextTriggerEvent.subscribe( | |
132 | function(type, args) { |
|
132 | function(type, args) { | |
133 |
|
133 | |||
134 |
|
134 | |||
135 | var context = args[0]; |
|
135 | var context = args[0]; | |
136 |
|
136 | |||
137 | var txt = context.getAttribute('tooltip_title'); |
|
137 | var txt = context.getAttribute('tooltip_title'); | |
138 | this.cfg.setProperty("text", txt); |
|
138 | this.cfg.setProperty("text", txt); | |
139 |
|
139 | |||
140 |
|
140 | |||
141 | // positioning of tooltip |
|
141 | // positioning of tooltip | |
142 | var tt_w = this.element.clientWidth; |
|
142 | var tt_w = this.element.clientWidth; | |
143 | var tt_h = this.element.clientHeight; |
|
143 | var tt_h = this.element.clientHeight; | |
144 |
|
144 | |||
145 | var context_w = context.offsetWidth; |
|
145 | var context_w = context.offsetWidth; | |
146 | var context_h = context.offsetHeight; |
|
146 | var context_h = context.offsetHeight; | |
147 |
|
147 | |||
148 | var pos_x = YAHOO.util.Dom.getX(context); |
|
148 | var pos_x = YAHOO.util.Dom.getX(context); | |
149 | var pos_y = YAHOO.util.Dom.getY(context); |
|
149 | var pos_y = YAHOO.util.Dom.getY(context); | |
150 |
|
150 | |||
151 | var display_strategy = 'top'; |
|
151 | var display_strategy = 'top'; | |
152 | var xy_pos = [0,0]; |
|
152 | var xy_pos = [0,0]; | |
153 | switch (display_strategy){ |
|
153 | switch (display_strategy){ | |
154 |
|
154 | |||
155 | case 'top': |
|
155 | case 'top': | |
156 | var cur_x = (pos_x+context_w/2)-(tt_w/2); |
|
156 | var cur_x = (pos_x+context_w/2)-(tt_w/2); | |
157 | var cur_y = pos_y-tt_h-4; |
|
157 | var cur_y = pos_y-tt_h-4; | |
158 | xy_pos = [cur_x,cur_y]; |
|
158 | xy_pos = [cur_x,cur_y]; | |
159 | break; |
|
159 | break; | |
160 | case 'bottom': |
|
160 | case 'bottom': | |
161 | var cur_x = (pos_x+context_w/2)-(tt_w/2); |
|
161 | var cur_x = (pos_x+context_w/2)-(tt_w/2); | |
162 | var cur_y = pos_y+context_h+4; |
|
162 | var cur_y = pos_y+context_h+4; | |
163 | xy_pos = [cur_x,cur_y]; |
|
163 | xy_pos = [cur_x,cur_y]; | |
164 | break; |
|
164 | break; | |
165 | case 'left': |
|
165 | case 'left': | |
166 | var cur_x = (pos_x-tt_w-4); |
|
166 | var cur_x = (pos_x-tt_w-4); | |
167 | var cur_y = pos_y-((tt_h/2)-context_h/2); |
|
167 | var cur_y = pos_y-((tt_h/2)-context_h/2); | |
168 | xy_pos = [cur_x,cur_y]; |
|
168 | xy_pos = [cur_x,cur_y]; | |
169 | break; |
|
169 | break; | |
170 | case 'right': |
|
170 | case 'right': | |
171 | var cur_x = (pos_x+context_w+4); |
|
171 | var cur_x = (pos_x+context_w+4); | |
172 | var cur_y = pos_y-((tt_h/2)-context_h/2); |
|
172 | var cur_y = pos_y-((tt_h/2)-context_h/2); | |
173 | xy_pos = [cur_x,cur_y]; |
|
173 | xy_pos = [cur_x,cur_y]; | |
174 | break; |
|
174 | break; | |
175 | default: |
|
175 | default: | |
176 | var cur_x = (pos_x+context_w/2)-(tt_w/2); |
|
176 | var cur_x = (pos_x+context_w/2)-(tt_w/2); | |
177 | var cur_y = pos_y-tt_h-4; |
|
177 | var cur_y = pos_y-tt_h-4; | |
178 | xy_pos = [cur_x,cur_y]; |
|
178 | xy_pos = [cur_x,cur_y]; | |
179 | break; |
|
179 | break; | |
180 |
|
180 | |||
181 | } |
|
181 | } | |
182 |
|
182 | |||
183 | this.cfg.setProperty("xy",xy_pos); |
|
183 | this.cfg.setProperty("xy",xy_pos); | |
184 |
|
184 | |||
185 | }); |
|
185 | }); | |
186 |
|
186 | |||
187 | //Mouse out |
|
187 | //Mouse out | |
188 | myToolTips.contextMouseOutEvent.subscribe( |
|
188 | myToolTips.contextMouseOutEvent.subscribe( | |
189 | function(type, args) { |
|
189 | function(type, args) { | |
190 | var context = args[0]; |
|
190 | var context = args[0]; | |
191 |
|
191 | |||
192 | }); |
|
192 | }); | |
193 | }); |
|
193 | }); | |
194 | ''' |
|
194 | ''' | |
195 | return literal(js) |
|
195 | return literal(js) | |
196 |
|
196 | |||
197 | tooltip = _ToolTip() |
|
197 | tooltip = _ToolTip() | |
198 |
|
198 | |||
199 | class _FilesBreadCrumbs(object): |
|
199 | class _FilesBreadCrumbs(object): | |
200 |
|
200 | |||
201 | def __call__(self, repo_name, rev, paths): |
|
201 | def __call__(self, repo_name, rev, paths): | |
202 | url_l = [link_to(repo_name, url('files_home', |
|
202 | url_l = [link_to(repo_name, url('files_home', | |
203 | repo_name=repo_name, |
|
203 | repo_name=repo_name, | |
204 | revision=rev, f_path=''))] |
|
204 | revision=rev, f_path=''))] | |
205 | paths_l = paths.split('/') |
|
205 | paths_l = paths.split('/') | |
206 |
|
206 | |||
207 | for cnt, p in enumerate(paths_l, 1): |
|
207 | for cnt, p in enumerate(paths_l, 1): | |
208 | if p != '': |
|
208 | if p != '': | |
209 | url_l.append(link_to(p, url('files_home', |
|
209 | url_l.append(link_to(p, url('files_home', | |
210 | repo_name=repo_name, |
|
210 | repo_name=repo_name, | |
211 | revision=rev, |
|
211 | revision=rev, | |
212 | f_path='/'.join(paths_l[:cnt])))) |
|
212 | f_path='/'.join(paths_l[:cnt])))) | |
213 |
|
213 | |||
214 | return literal('/'.join(url_l)) |
|
214 | return literal('/'.join(url_l)) | |
215 |
|
215 | |||
216 | files_breadcrumbs = _FilesBreadCrumbs() |
|
216 | files_breadcrumbs = _FilesBreadCrumbs() | |
217 | class CodeHtmlFormatter(HtmlFormatter): |
|
217 | class CodeHtmlFormatter(HtmlFormatter): | |
218 |
|
218 | |||
219 | def wrap(self, source, outfile): |
|
219 | def wrap(self, source, outfile): | |
220 | return self._wrap_div(self._wrap_pre(self._wrap_code(source))) |
|
220 | return self._wrap_div(self._wrap_pre(self._wrap_code(source))) | |
221 |
|
221 | |||
222 | def _wrap_code(self, source): |
|
222 | def _wrap_code(self, source): | |
223 | for cnt, it in enumerate(source, 1): |
|
223 | for cnt, it in enumerate(source, 1): | |
224 | i, t = it |
|
224 | i, t = it | |
225 | t = '<div id="#S-%s">%s</div>' % (cnt, t) |
|
225 | t = '<div id="#S-%s">%s</div>' % (cnt, t) | |
226 | yield i, t |
|
226 | yield i, t | |
227 | def pygmentize(filenode, **kwargs): |
|
227 | def pygmentize(filenode, **kwargs): | |
228 | """ |
|
228 | """ | |
229 | pygmentize function using pygments |
|
229 | pygmentize function using pygments | |
230 | :param filenode: |
|
230 | :param filenode: | |
231 | """ |
|
231 | """ | |
232 | return literal(code_highlight(filenode.content, |
|
232 | return literal(code_highlight(filenode.content, | |
233 | filenode.lexer, CodeHtmlFormatter(**kwargs))) |
|
233 | filenode.lexer, CodeHtmlFormatter(**kwargs))) | |
234 |
|
234 | |||
235 | def pygmentize_annotation(filenode, **kwargs): |
|
235 | def pygmentize_annotation(filenode, **kwargs): | |
236 | """ |
|
236 | """ | |
237 | pygmentize function for annotation |
|
237 | pygmentize function for annotation | |
238 | :param filenode: |
|
238 | :param filenode: | |
239 | """ |
|
239 | """ | |
240 |
|
240 | |||
241 | color_dict = {} |
|
241 | color_dict = {} | |
242 | def gen_color(): |
|
242 | def gen_color(): | |
243 | """generator for getting 10k of evenly distibuted colors using hsv color |
|
243 | """generator for getting 10k of evenly distibuted colors using hsv color | |
244 | and golden ratio. |
|
244 | and golden ratio. | |
245 | """ |
|
245 | """ | |
246 | import colorsys |
|
246 | import colorsys | |
247 | n = 10000 |
|
247 | n = 10000 | |
248 | golden_ratio = 0.618033988749895 |
|
248 | golden_ratio = 0.618033988749895 | |
249 | h = 0.22717784590367374 |
|
249 | h = 0.22717784590367374 | |
250 | #generate 10k nice web friendly colors in the same order |
|
250 | #generate 10k nice web friendly colors in the same order | |
251 | for c in xrange(n): |
|
251 | for c in xrange(n): | |
252 | h += golden_ratio |
|
252 | h += golden_ratio | |
253 | h %= 1 |
|
253 | h %= 1 | |
254 | HSV_tuple = [h, 0.95, 0.95] |
|
254 | HSV_tuple = [h, 0.95, 0.95] | |
255 | RGB_tuple = colorsys.hsv_to_rgb(*HSV_tuple) |
|
255 | RGB_tuple = colorsys.hsv_to_rgb(*HSV_tuple) | |
256 | yield map(lambda x:str(int(x * 256)), RGB_tuple) |
|
256 | yield map(lambda x:str(int(x * 256)), RGB_tuple) | |
257 |
|
257 | |||
258 | cgenerator = gen_color() |
|
258 | cgenerator = gen_color() | |
259 |
|
259 | |||
260 | def get_color_string(cs): |
|
260 | def get_color_string(cs): | |
261 | if color_dict.has_key(cs): |
|
261 | if color_dict.has_key(cs): | |
262 | col = color_dict[cs] |
|
262 | col = color_dict[cs] | |
263 | else: |
|
263 | else: | |
264 | col = color_dict[cs] = cgenerator.next() |
|
264 | col = color_dict[cs] = cgenerator.next() | |
265 | return "color: rgb(%s)! important;" % (', '.join(col)) |
|
265 | return "color: rgb(%s)! important;" % (', '.join(col)) | |
266 |
|
266 | |||
267 | def url_func(changeset): |
|
267 | def url_func(changeset): | |
268 | tooltip_html = "<div style='font-size:0.8em'><b>Author:</b>" + \ |
|
268 | tooltip_html = "<div style='font-size:0.8em'><b>Author:</b>" + \ | |
269 | " %s<br/><b>Date:</b> %s</b><br/><b>Message:</b> %s<br/></div>" |
|
269 | " %s<br/><b>Date:</b> %s</b><br/><b>Message:</b> %s<br/></div>" | |
270 |
|
270 | |||
271 | tooltip_html = tooltip_html % (changeset.author, |
|
271 | tooltip_html = tooltip_html % (changeset.author, | |
272 | changeset.date, |
|
272 | changeset.date, | |
273 | tooltip(changeset.message)) |
|
273 | tooltip(changeset.message)) | |
274 |
lnk_format = ' |
|
274 | lnk_format = '%5s:%s' % ('r%s' % changeset.revision, | |
275 | changeset.raw_id) |
|
275 | short_id(changeset.raw_id)) | |
276 | uri = link_to( |
|
276 | uri = link_to( | |
277 | lnk_format, |
|
277 | lnk_format, | |
278 | url('changeset_home', repo_name=changeset.repository.name, |
|
278 | url('changeset_home', repo_name=changeset.repository.name, | |
279 | revision=changeset.raw_id), |
|
279 | revision=changeset.raw_id), | |
280 | style=get_color_string(changeset.raw_id), |
|
280 | style=get_color_string(changeset.raw_id), | |
281 | class_='tooltip', |
|
281 | class_='tooltip', | |
282 | tooltip_title=tooltip_html |
|
282 | tooltip_title=tooltip_html | |
283 | ) |
|
283 | ) | |
284 |
|
284 | |||
285 | uri += '\n' |
|
285 | uri += '\n' | |
286 | return uri |
|
286 | return uri | |
287 | return literal(annotate_highlight(filenode, url_func, **kwargs)) |
|
287 | return literal(annotate_highlight(filenode, url_func, **kwargs)) | |
288 |
|
288 | |||
289 | def repo_name_slug(value): |
|
289 | def repo_name_slug(value): | |
290 | """Return slug of name of repository |
|
290 | """Return slug of name of repository | |
291 | This function is called on each creation/modification |
|
291 | This function is called on each creation/modification | |
292 | of repository to prevent bad names in repo |
|
292 | of repository to prevent bad names in repo | |
293 | """ |
|
293 | """ | |
294 | slug = remove_formatting(value) |
|
294 | slug = remove_formatting(value) | |
295 | slug = strip_tags(slug) |
|
295 | slug = strip_tags(slug) | |
296 |
|
296 | |||
297 | for c in """=[]\;'"<>,/~!@#$%^&*()+{}|: """: |
|
297 | for c in """=[]\;'"<>,/~!@#$%^&*()+{}|: """: | |
298 | slug = slug.replace(c, '-') |
|
298 | slug = slug.replace(c, '-') | |
299 | slug = recursive_replace(slug, '-') |
|
299 | slug = recursive_replace(slug, '-') | |
300 | slug = collapse(slug, '-') |
|
300 | slug = collapse(slug, '-') | |
301 | return slug |
|
301 | return slug | |
302 |
|
302 | |||
303 | def get_changeset_safe(repo, rev): |
|
303 | def get_changeset_safe(repo, rev): | |
304 | from vcs.backends.base import BaseRepository |
|
304 | from vcs.backends.base import BaseRepository | |
305 | from vcs.exceptions import RepositoryError |
|
305 | from vcs.exceptions import RepositoryError | |
306 | if not isinstance(repo, BaseRepository): |
|
306 | if not isinstance(repo, BaseRepository): | |
307 | raise Exception('You must pass an Repository ' |
|
307 | raise Exception('You must pass an Repository ' | |
308 | 'object as first argument got %s', type(repo)) |
|
308 | 'object as first argument got %s', type(repo)) | |
309 |
|
309 | |||
310 | try: |
|
310 | try: | |
311 | cs = repo.get_changeset(rev) |
|
311 | cs = repo.get_changeset(rev) | |
312 | except RepositoryError: |
|
312 | except RepositoryError: | |
313 | from rhodecode.lib.utils import EmptyChangeset |
|
313 | from rhodecode.lib.utils import EmptyChangeset | |
314 | cs = EmptyChangeset() |
|
314 | cs = EmptyChangeset() | |
315 | return cs |
|
315 | return cs | |
316 |
|
316 | |||
317 |
|
317 | |||
318 | flash = _Flash() |
|
318 | flash = _Flash() | |
319 |
|
319 | |||
320 |
|
320 | |||
321 | #============================================================================== |
|
321 | #============================================================================== | |
322 | # MERCURIAL FILTERS available via h. |
|
322 | # MERCURIAL FILTERS available via h. | |
323 | #============================================================================== |
|
323 | #============================================================================== | |
324 | from mercurial import util |
|
324 | from mercurial import util | |
325 | from mercurial.templatefilters import person as _person |
|
325 | from mercurial.templatefilters import person as _person | |
326 |
|
326 | |||
327 |
|
327 | |||
328 |
|
328 | |||
329 | def _age(curdate): |
|
329 | def _age(curdate): | |
330 | """turns a datetime into an age string.""" |
|
330 | """turns a datetime into an age string.""" | |
331 |
|
331 | |||
332 | if not curdate: |
|
332 | if not curdate: | |
333 | return '' |
|
333 | return '' | |
334 |
|
334 | |||
335 | from datetime import timedelta, datetime |
|
335 | from datetime import timedelta, datetime | |
336 |
|
336 | |||
337 | agescales = [("year", 3600 * 24 * 365), |
|
337 | agescales = [("year", 3600 * 24 * 365), | |
338 | ("month", 3600 * 24 * 30), |
|
338 | ("month", 3600 * 24 * 30), | |
339 | ("day", 3600 * 24), |
|
339 | ("day", 3600 * 24), | |
340 | ("hour", 3600), |
|
340 | ("hour", 3600), | |
341 | ("minute", 60), |
|
341 | ("minute", 60), | |
342 | ("second", 1), ] |
|
342 | ("second", 1), ] | |
343 |
|
343 | |||
344 | age = datetime.now() - curdate |
|
344 | age = datetime.now() - curdate | |
345 | age_seconds = (age.days * agescales[2][1]) + age.seconds |
|
345 | age_seconds = (age.days * agescales[2][1]) + age.seconds | |
346 | pos = 1 |
|
346 | pos = 1 | |
347 | for scale in agescales: |
|
347 | for scale in agescales: | |
348 | if scale[1] <= age_seconds: |
|
348 | if scale[1] <= age_seconds: | |
349 | if pos == 6:pos = 5 |
|
349 | if pos == 6:pos = 5 | |
350 | return time_ago_in_words(curdate, agescales[pos][0]) |
|
350 | return time_ago_in_words(curdate, agescales[pos][0]) | |
351 | pos += 1 |
|
351 | pos += 1 | |
352 |
|
352 | |||
353 | age = lambda x:_age(x) |
|
353 | age = lambda x:_age(x) | |
354 | capitalize = lambda x: x.capitalize() |
|
354 | capitalize = lambda x: x.capitalize() | |
355 | email = util.email |
|
355 | email = util.email | |
356 | email_or_none = lambda x: util.email(x) if util.email(x) != x else None |
|
356 | email_or_none = lambda x: util.email(x) if util.email(x) != x else None | |
357 | person = lambda x: _person(x) |
|
357 | person = lambda x: _person(x) | |
358 | short_id = lambda x: x[:12] |
|
358 | short_id = lambda x: x[:12] | |
359 |
|
359 | |||
360 |
|
360 | |||
361 | def action_parser(user_log): |
|
361 | def action_parser(user_log): | |
362 | """ |
|
362 | """ | |
363 | This helper will map the specified string action into translated |
|
363 | This helper will map the specified string action into translated | |
364 | fancy names with icons and links |
|
364 | fancy names with icons and links | |
365 |
|
365 | |||
366 | @param action: |
|
366 | @param action: | |
367 | """ |
|
367 | """ | |
368 | action = user_log.action |
|
368 | action = user_log.action | |
369 | action_params = None |
|
369 | action_params = None | |
370 | cs_links = '' |
|
370 | cs_links = '' | |
371 |
|
371 | |||
372 | x = action.split(':') |
|
372 | x = action.split(':') | |
373 |
|
373 | |||
374 | if len(x) > 1: |
|
374 | if len(x) > 1: | |
375 | action, action_params = x |
|
375 | action, action_params = x | |
376 |
|
376 | |||
377 | if action == 'push': |
|
377 | if action == 'push': | |
378 | revs_limit = 5 |
|
378 | revs_limit = 5 | |
379 | revs = action_params.split(',') |
|
379 | revs = action_params.split(',') | |
380 | cs_links = " " + ', '.join ([link(rev, |
|
380 | cs_links = " " + ', '.join ([link(rev, | |
381 | url('changeset_home', |
|
381 | url('changeset_home', | |
382 | repo_name=user_log.repository.repo_name, |
|
382 | repo_name=user_log.repository.repo_name, | |
383 | revision=rev)) for rev in revs[:revs_limit] ]) |
|
383 | revision=rev)) for rev in revs[:revs_limit] ]) | |
384 | if len(revs) > revs_limit: |
|
384 | if len(revs) > revs_limit: | |
385 | html_tmpl = '<span title="%s"> %s </span>' |
|
385 | html_tmpl = '<span title="%s"> %s </span>' | |
386 | cs_links += html_tmpl % (', '.join(r for r in revs[revs_limit:]), |
|
386 | cs_links += html_tmpl % (', '.join(r for r in revs[revs_limit:]), | |
387 | _('and %s more revisions') % (len(revs) - revs_limit)) |
|
387 | _('and %s more revisions') % (len(revs) - revs_limit)) | |
388 |
|
388 | |||
389 | map = {'user_deleted_repo':_('User deleted repository'), |
|
389 | map = {'user_deleted_repo':_('User deleted repository'), | |
390 | 'user_created_repo':_('User created repository'), |
|
390 | 'user_created_repo':_('User created repository'), | |
391 | 'user_forked_repo':_('User forked repository'), |
|
391 | 'user_forked_repo':_('User forked repository'), | |
392 | 'user_updated_repo':_('User updated repository'), |
|
392 | 'user_updated_repo':_('User updated repository'), | |
393 | 'admin_deleted_repo':_('Admin delete repository'), |
|
393 | 'admin_deleted_repo':_('Admin delete repository'), | |
394 | 'admin_created_repo':_('Admin created repository'), |
|
394 | 'admin_created_repo':_('Admin created repository'), | |
395 | 'admin_forked_repo':_('Admin forked repository'), |
|
395 | 'admin_forked_repo':_('Admin forked repository'), | |
396 | 'admin_updated_repo':_('Admin updated repository'), |
|
396 | 'admin_updated_repo':_('Admin updated repository'), | |
397 | 'push':_('Pushed') + literal(cs_links), |
|
397 | 'push':_('Pushed') + literal(cs_links), | |
398 | 'pull':_('Pulled'), } |
|
398 | 'pull':_('Pulled'), } | |
399 |
|
399 | |||
400 | print action, action_params |
|
400 | print action, action_params | |
401 | return map.get(action, action) |
|
401 | return map.get(action, action) | |
402 |
|
402 | |||
403 |
|
403 | |||
404 | #============================================================================== |
|
404 | #============================================================================== | |
405 | # PERMS |
|
405 | # PERMS | |
406 | #============================================================================== |
|
406 | #============================================================================== | |
407 | from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \ |
|
407 | from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \ | |
408 | HasRepoPermissionAny, HasRepoPermissionAll |
|
408 | HasRepoPermissionAny, HasRepoPermissionAll | |
409 |
|
409 | |||
410 | #============================================================================== |
|
410 | #============================================================================== | |
411 | # GRAVATAR URL |
|
411 | # GRAVATAR URL | |
412 | #============================================================================== |
|
412 | #============================================================================== | |
413 | import hashlib |
|
413 | import hashlib | |
414 | import urllib |
|
414 | import urllib | |
415 | from pylons import request |
|
415 | from pylons import request | |
416 |
|
416 | |||
417 | def gravatar_url(email_address, size=30): |
|
417 | def gravatar_url(email_address, size=30): | |
418 | ssl_enabled = 'https' == request.environ.get('HTTP_X_URL_SCHEME') |
|
418 | ssl_enabled = 'https' == request.environ.get('HTTP_X_URL_SCHEME') | |
419 | default = 'identicon' |
|
419 | default = 'identicon' | |
420 | baseurl_nossl = "http://www.gravatar.com/avatar/" |
|
420 | baseurl_nossl = "http://www.gravatar.com/avatar/" | |
421 | baseurl_ssl = "https://secure.gravatar.com/avatar/" |
|
421 | baseurl_ssl = "https://secure.gravatar.com/avatar/" | |
422 | baseurl = baseurl_ssl if ssl_enabled else baseurl_nossl |
|
422 | baseurl = baseurl_ssl if ssl_enabled else baseurl_nossl | |
423 |
|
423 | |||
424 |
|
424 | |||
425 | # construct the url |
|
425 | # construct the url | |
426 | gravatar_url = baseurl + hashlib.md5(email_address.lower()).hexdigest() + "?" |
|
426 | gravatar_url = baseurl + hashlib.md5(email_address.lower()).hexdigest() + "?" | |
427 | gravatar_url += urllib.urlencode({'d':default, 's':str(size)}) |
|
427 | gravatar_url += urllib.urlencode({'d':default, 's':str(size)}) | |
428 |
|
428 | |||
429 | return gravatar_url |
|
429 | return gravatar_url | |
430 |
|
430 | |||
431 | def safe_unicode(str): |
|
431 | def safe_unicode(str): | |
432 | """safe unicode function. In case of UnicodeDecode error we try to return |
|
432 | """safe unicode function. In case of UnicodeDecode error we try to return | |
433 | unicode with errors replace, if this failes we return unicode with |
|
433 | unicode with errors replace, if this failes we return unicode with | |
434 | string_escape decoding """ |
|
434 | string_escape decoding """ | |
435 |
|
435 | |||
436 | try: |
|
436 | try: | |
437 | u_str = unicode(str) |
|
437 | u_str = unicode(str) | |
438 | except UnicodeDecodeError: |
|
438 | except UnicodeDecodeError: | |
439 | try: |
|
439 | try: | |
440 | u_str = unicode(str, 'utf-8', 'replace') |
|
440 | u_str = unicode(str, 'utf-8', 'replace') | |
441 | except UnicodeDecodeError: |
|
441 | except UnicodeDecodeError: | |
442 | #incase we have a decode error just represent as byte string |
|
442 | #incase we have a decode error just represent as byte string | |
443 | u_str = unicode(str(str).encode('string_escape')) |
|
443 | u_str = unicode(str(str).encode('string_escape')) | |
444 |
|
444 | |||
445 | return u_str |
|
445 | return u_str |
@@ -1,221 +1,217 | |||||
1 | #!/usr/bin/env python |
|
1 | #!/usr/bin/env python | |
2 | # encoding: utf-8 |
|
2 | # encoding: utf-8 | |
3 | # whoosh indexer daemon for rhodecode |
|
3 | # whoosh indexer daemon for rhodecode | |
4 | # Copyright (C) 2009-2010 Marcin Kuzminski <marcin@python-works.com> |
|
4 | # Copyright (C) 2009-2010 Marcin Kuzminski <marcin@python-works.com> | |
5 | # |
|
5 | # | |
6 | # This program is free software; you can redistribute it and/or |
|
6 | # This program is free software; you can redistribute it and/or | |
7 | # modify it under the terms of the GNU General Public License |
|
7 | # modify it under the terms of the GNU General Public License | |
8 | # as published by the Free Software Foundation; version 2 |
|
8 | # as published by the Free Software Foundation; version 2 | |
9 | # of the License or (at your opinion) any later version of the license. |
|
9 | # of the License or (at your opinion) any later version of the license. | |
10 | # |
|
10 | # | |
11 | # This program is distributed in the hope that it will be useful, |
|
11 | # This program is distributed in the hope that it will be useful, | |
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | # GNU General Public License for more details. |
|
14 | # GNU General Public License for more details. | |
15 | # |
|
15 | # | |
16 | # You should have received a copy of the GNU General Public License |
|
16 | # You should have received a copy of the GNU General Public License | |
17 | # along with this program; if not, write to the Free Software |
|
17 | # along with this program; if not, write to the Free Software | |
18 | # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, |
|
18 | # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, | |
19 | # MA 02110-1301, USA. |
|
19 | # MA 02110-1301, USA. | |
20 | """ |
|
20 | """ | |
21 | Created on Jan 26, 2010 |
|
21 | Created on Jan 26, 2010 | |
22 |
|
22 | |||
23 | @author: marcink |
|
23 | @author: marcink | |
24 | A deamon will read from task table and run tasks |
|
24 | A deamon will read from task table and run tasks | |
25 | """ |
|
25 | """ | |
26 | import sys |
|
26 | import sys | |
27 | import os |
|
27 | import os | |
28 | from os.path import dirname as dn |
|
28 | from os.path import dirname as dn | |
29 | from os.path import join as jn |
|
29 | from os.path import join as jn | |
30 |
|
30 | |||
31 | #to get the rhodecode import |
|
31 | #to get the rhodecode import | |
32 | project_path = dn(dn(dn(dn(os.path.realpath(__file__))))) |
|
32 | project_path = dn(dn(dn(dn(os.path.realpath(__file__))))) | |
33 | sys.path.append(project_path) |
|
33 | sys.path.append(project_path) | |
34 |
|
34 | |||
35 |
|
35 | |||
36 | from rhodecode.model.hg import HgModel |
|
36 | from rhodecode.model.hg import HgModel | |
37 | from rhodecode.lib.helpers import safe_unicode |
|
37 | from rhodecode.lib.helpers import safe_unicode | |
38 | from whoosh.index import create_in, open_dir |
|
38 | from whoosh.index import create_in, open_dir | |
39 | from shutil import rmtree |
|
39 | from shutil import rmtree | |
40 | from rhodecode.lib.indexers import INDEX_EXTENSIONS, SCHEMA, IDX_NAME |
|
40 | from rhodecode.lib.indexers import INDEX_EXTENSIONS, SCHEMA, IDX_NAME | |
41 |
|
41 | |||
42 | from time import mktime |
|
42 | from time import mktime | |
43 | from vcs.exceptions import ChangesetError, RepositoryError |
|
43 | from vcs.exceptions import ChangesetError, RepositoryError | |
44 |
|
44 | |||
45 | import logging |
|
45 | import logging | |
46 |
|
46 | |||
47 | log = logging.getLogger('whooshIndexer') |
|
47 | log = logging.getLogger('whooshIndexer') | |
48 | # create logger |
|
48 | # create logger | |
49 | log.setLevel(logging.DEBUG) |
|
49 | log.setLevel(logging.DEBUG) | |
50 | log.propagate = False |
|
50 | log.propagate = False | |
51 | # create console handler and set level to debug |
|
51 | # create console handler and set level to debug | |
52 | ch = logging.StreamHandler() |
|
52 | ch = logging.StreamHandler() | |
53 | ch.setLevel(logging.DEBUG) |
|
53 | ch.setLevel(logging.DEBUG) | |
54 |
|
54 | |||
55 | # create formatter |
|
55 | # create formatter | |
56 | formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") |
|
56 | formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") | |
57 |
|
57 | |||
58 | # add formatter to ch |
|
58 | # add formatter to ch | |
59 | ch.setFormatter(formatter) |
|
59 | ch.setFormatter(formatter) | |
60 |
|
60 | |||
61 | # add ch to logger |
|
61 | # add ch to logger | |
62 | log.addHandler(ch) |
|
62 | log.addHandler(ch) | |
63 |
|
63 | |||
64 | def get_repos_location(): |
|
|||
65 | return HgModel.get_repos_location() |
|
|||
66 |
|
||||
67 |
|
||||
68 | class WhooshIndexingDaemon(object): |
|
64 | class WhooshIndexingDaemon(object): | |
69 | """ |
|
65 | """ | |
70 | Deamon for atomic jobs |
|
66 | Deamon for atomic jobs | |
71 | """ |
|
67 | """ | |
72 |
|
68 | |||
73 | def __init__(self, indexname='HG_INDEX', index_location=None, |
|
69 | def __init__(self, indexname='HG_INDEX', index_location=None, | |
74 | repo_location=None): |
|
70 | repo_location=None): | |
75 | self.indexname = indexname |
|
71 | self.indexname = indexname | |
76 |
|
72 | |||
77 | self.index_location = index_location |
|
73 | self.index_location = index_location | |
78 | if not index_location: |
|
74 | if not index_location: | |
79 | raise Exception('You have to provide index location') |
|
75 | raise Exception('You have to provide index location') | |
80 |
|
76 | |||
81 | self.repo_location = repo_location |
|
77 | self.repo_location = repo_location | |
82 | if not repo_location: |
|
78 | if not repo_location: | |
83 | raise Exception('You have to provide repositories location') |
|
79 | raise Exception('You have to provide repositories location') | |
84 |
|
80 | |||
85 |
|
81 | |||
86 |
|
82 | |||
87 | self.repo_paths = HgModel.repo_scan('/', self.repo_location, None, True) |
|
83 | self.repo_paths = HgModel.repo_scan('/', self.repo_location, None, True) | |
88 | self.initial = False |
|
84 | self.initial = False | |
89 | if not os.path.isdir(self.index_location): |
|
85 | if not os.path.isdir(self.index_location): | |
90 | os.mkdir(self.index_location) |
|
86 | os.mkdir(self.index_location) | |
91 | log.info('Cannot run incremental index since it does not' |
|
87 | log.info('Cannot run incremental index since it does not' | |
92 | ' yet exist running full build') |
|
88 | ' yet exist running full build') | |
93 | self.initial = True |
|
89 | self.initial = True | |
94 |
|
90 | |||
95 | def get_paths(self, repo): |
|
91 | def get_paths(self, repo): | |
96 | """ |
|
92 | """ | |
97 | recursive walk in root dir and return a set of all path in that dir |
|
93 | recursive walk in root dir and return a set of all path in that dir | |
98 | based on repository walk function |
|
94 | based on repository walk function | |
99 | """ |
|
95 | """ | |
100 | index_paths_ = set() |
|
96 | index_paths_ = set() | |
101 | try: |
|
97 | try: | |
102 | for topnode, dirs, files in repo.walk('/', 'tip'): |
|
98 | for topnode, dirs, files in repo.walk('/', 'tip'): | |
103 | for f in files: |
|
99 | for f in files: | |
104 | index_paths_.add(jn(repo.path, f.path)) |
|
100 | index_paths_.add(jn(repo.path, f.path)) | |
105 | for dir in dirs: |
|
101 | for dir in dirs: | |
106 | for f in files: |
|
102 | for f in files: | |
107 | index_paths_.add(jn(repo.path, f.path)) |
|
103 | index_paths_.add(jn(repo.path, f.path)) | |
108 |
|
104 | |||
109 | except RepositoryError: |
|
105 | except RepositoryError: | |
110 | pass |
|
106 | pass | |
111 | return index_paths_ |
|
107 | return index_paths_ | |
112 |
|
108 | |||
113 | def get_node(self, repo, path): |
|
109 | def get_node(self, repo, path): | |
114 | n_path = path[len(repo.path) + 1:] |
|
110 | n_path = path[len(repo.path) + 1:] | |
115 | node = repo.get_changeset().get_node(n_path) |
|
111 | node = repo.get_changeset().get_node(n_path) | |
116 | return node |
|
112 | return node | |
117 |
|
113 | |||
118 | def get_node_mtime(self, node): |
|
114 | def get_node_mtime(self, node): | |
119 | return mktime(node.last_changeset.date.timetuple()) |
|
115 | return mktime(node.last_changeset.date.timetuple()) | |
120 |
|
116 | |||
121 | def add_doc(self, writer, path, repo): |
|
117 | def add_doc(self, writer, path, repo): | |
122 | """Adding doc to writer""" |
|
118 | """Adding doc to writer""" | |
123 | node = self.get_node(repo, path) |
|
119 | node = self.get_node(repo, path) | |
124 |
|
120 | |||
125 | #we just index the content of chosen files |
|
121 | #we just index the content of chosen files | |
126 | if node.extension in INDEX_EXTENSIONS: |
|
122 | if node.extension in INDEX_EXTENSIONS: | |
127 | log.debug(' >> %s [WITH CONTENT]' % path) |
|
123 | log.debug(' >> %s [WITH CONTENT]' % path) | |
128 | u_content = node.content |
|
124 | u_content = node.content | |
129 | else: |
|
125 | else: | |
130 | log.debug(' >> %s' % path) |
|
126 | log.debug(' >> %s' % path) | |
131 | #just index file name without it's content |
|
127 | #just index file name without it's content | |
132 | u_content = u'' |
|
128 | u_content = u'' | |
133 |
|
129 | |||
134 | writer.add_document(owner=unicode(repo.contact), |
|
130 | writer.add_document(owner=unicode(repo.contact), | |
135 | repository=safe_unicode(repo.name), |
|
131 | repository=safe_unicode(repo.name), | |
136 | path=safe_unicode(path), |
|
132 | path=safe_unicode(path), | |
137 | content=u_content, |
|
133 | content=u_content, | |
138 | modtime=self.get_node_mtime(node), |
|
134 | modtime=self.get_node_mtime(node), | |
139 | extension=node.extension) |
|
135 | extension=node.extension) | |
140 |
|
136 | |||
141 |
|
137 | |||
142 | def build_index(self): |
|
138 | def build_index(self): | |
143 | if os.path.exists(self.index_location): |
|
139 | if os.path.exists(self.index_location): | |
144 | log.debug('removing previous index') |
|
140 | log.debug('removing previous index') | |
145 | rmtree(self.index_location) |
|
141 | rmtree(self.index_location) | |
146 |
|
142 | |||
147 | if not os.path.exists(self.index_location): |
|
143 | if not os.path.exists(self.index_location): | |
148 | os.mkdir(self.index_location) |
|
144 | os.mkdir(self.index_location) | |
149 |
|
145 | |||
150 | idx = create_in(self.index_location, SCHEMA, indexname=IDX_NAME) |
|
146 | idx = create_in(self.index_location, SCHEMA, indexname=IDX_NAME) | |
151 | writer = idx.writer() |
|
147 | writer = idx.writer() | |
152 |
|
148 | |||
153 | for cnt, repo in enumerate(self.repo_paths.values()): |
|
149 | for cnt, repo in enumerate(self.repo_paths.values()): | |
154 | log.debug('building index @ %s' % repo.path) |
|
150 | log.debug('building index @ %s' % repo.path) | |
155 |
|
151 | |||
156 | for idx_path in self.get_paths(repo): |
|
152 | for idx_path in self.get_paths(repo): | |
157 | self.add_doc(writer, idx_path, repo) |
|
153 | self.add_doc(writer, idx_path, repo) | |
158 |
|
154 | |||
159 | log.debug('>> COMMITING CHANGES <<') |
|
155 | log.debug('>> COMMITING CHANGES <<') | |
160 | writer.commit(merge=True) |
|
156 | writer.commit(merge=True) | |
161 | log.debug('>>> FINISHED BUILDING INDEX <<<') |
|
157 | log.debug('>>> FINISHED BUILDING INDEX <<<') | |
162 |
|
158 | |||
163 |
|
159 | |||
164 | def update_index(self): |
|
160 | def update_index(self): | |
165 | log.debug('STARTING INCREMENTAL INDEXING UPDATE') |
|
161 | log.debug('STARTING INCREMENTAL INDEXING UPDATE') | |
166 |
|
162 | |||
167 | idx = open_dir(self.index_location, indexname=self.indexname) |
|
163 | idx = open_dir(self.index_location, indexname=self.indexname) | |
168 | # The set of all paths in the index |
|
164 | # The set of all paths in the index | |
169 | indexed_paths = set() |
|
165 | indexed_paths = set() | |
170 | # The set of all paths we need to re-index |
|
166 | # The set of all paths we need to re-index | |
171 | to_index = set() |
|
167 | to_index = set() | |
172 |
|
168 | |||
173 | reader = idx.reader() |
|
169 | reader = idx.reader() | |
174 | writer = idx.writer() |
|
170 | writer = idx.writer() | |
175 |
|
171 | |||
176 | # Loop over the stored fields in the index |
|
172 | # Loop over the stored fields in the index | |
177 | for fields in reader.all_stored_fields(): |
|
173 | for fields in reader.all_stored_fields(): | |
178 | indexed_path = fields['path'] |
|
174 | indexed_path = fields['path'] | |
179 | indexed_paths.add(indexed_path) |
|
175 | indexed_paths.add(indexed_path) | |
180 |
|
176 | |||
181 | repo = self.repo_paths[fields['repository']] |
|
177 | repo = self.repo_paths[fields['repository']] | |
182 |
|
178 | |||
183 | try: |
|
179 | try: | |
184 | node = self.get_node(repo, indexed_path) |
|
180 | node = self.get_node(repo, indexed_path) | |
185 | except ChangesetError: |
|
181 | except ChangesetError: | |
186 | # This file was deleted since it was indexed |
|
182 | # This file was deleted since it was indexed | |
187 | log.debug('removing from index %s' % indexed_path) |
|
183 | log.debug('removing from index %s' % indexed_path) | |
188 | writer.delete_by_term('path', indexed_path) |
|
184 | writer.delete_by_term('path', indexed_path) | |
189 |
|
185 | |||
190 | else: |
|
186 | else: | |
191 | # Check if this file was changed since it was indexed |
|
187 | # Check if this file was changed since it was indexed | |
192 | indexed_time = fields['modtime'] |
|
188 | indexed_time = fields['modtime'] | |
193 | mtime = self.get_node_mtime(node) |
|
189 | mtime = self.get_node_mtime(node) | |
194 | if mtime > indexed_time: |
|
190 | if mtime > indexed_time: | |
195 | # The file has changed, delete it and add it to the list of |
|
191 | # The file has changed, delete it and add it to the list of | |
196 | # files to reindex |
|
192 | # files to reindex | |
197 | log.debug('adding to reindex list %s' % indexed_path) |
|
193 | log.debug('adding to reindex list %s' % indexed_path) | |
198 | writer.delete_by_term('path', indexed_path) |
|
194 | writer.delete_by_term('path', indexed_path) | |
199 | to_index.add(indexed_path) |
|
195 | to_index.add(indexed_path) | |
200 |
|
196 | |||
201 | # Loop over the files in the filesystem |
|
197 | # Loop over the files in the filesystem | |
202 | # Assume we have a function that gathers the filenames of the |
|
198 | # Assume we have a function that gathers the filenames of the | |
203 | # documents to be indexed |
|
199 | # documents to be indexed | |
204 | for repo in self.repo_paths.values(): |
|
200 | for repo in self.repo_paths.values(): | |
205 | for path in self.get_paths(repo): |
|
201 | for path in self.get_paths(repo): | |
206 | if path in to_index or path not in indexed_paths: |
|
202 | if path in to_index or path not in indexed_paths: | |
207 | # This is either a file that's changed, or a new file |
|
203 | # This is either a file that's changed, or a new file | |
208 | # that wasn't indexed before. So index it! |
|
204 | # that wasn't indexed before. So index it! | |
209 | self.add_doc(writer, path, repo) |
|
205 | self.add_doc(writer, path, repo) | |
210 | log.debug('re indexing %s' % path) |
|
206 | log.debug('re indexing %s' % path) | |
211 |
|
207 | |||
212 | log.debug('>> COMMITING CHANGES <<') |
|
208 | log.debug('>> COMMITING CHANGES <<') | |
213 | writer.commit(merge=True) |
|
209 | writer.commit(merge=True) | |
214 | log.debug('>>> FINISHED REBUILDING INDEX <<<') |
|
210 | log.debug('>>> FINISHED REBUILDING INDEX <<<') | |
215 |
|
211 | |||
216 | def run(self, full_index=False): |
|
212 | def run(self, full_index=False): | |
217 | """Run daemon""" |
|
213 | """Run daemon""" | |
218 | if full_index or self.initial: |
|
214 | if full_index or self.initial: | |
219 | self.build_index() |
|
215 | self.build_index() | |
220 | else: |
|
216 | else: | |
221 | self.update_index() |
|
217 | self.update_index() |
General Comments 0
You need to be logged in to leave comments.
Login now