##// END OF EJS Templates
fixed annotation bug, added history to annotation....
marcink -
r662:373ee703 beta
parent child Browse files
Show More
@@ -1,219 +1,218
1 1 #!/usr/bin/env python
2 2 # encoding: utf-8
3 3 # files controller for pylons
4 4 # Copyright (C) 2009-2010 Marcin Kuzminski <marcin@python-works.com>
5 5
6 6 # This program is free software; you can redistribute it and/or
7 7 # modify it under the terms of the GNU General Public License
8 8 # as published by the Free Software Foundation; version 2
9 9 # of the License or (at your opinion) any later version of the license.
10 10 #
11 11 # This program is distributed in the hope that it will be useful,
12 12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 14 # GNU General Public License for more details.
15 15 #
16 16 # You should have received a copy of the GNU General Public License
17 17 # along with this program; if not, write to the Free Software
18 18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
19 19 # MA 02110-1301, USA.
20 20 """
21 21 Created on April 21, 2010
22 22 files controller for pylons
23 23 @author: marcink
24 24 """
25 25 from mercurial import archival
26 26 from pylons import request, response, session, tmpl_context as c, url
27 27 from pylons.i18n.translation import _
28 28 from pylons.controllers.util import redirect
29 29 from rhodecode.lib.auth import LoginRequired, HasRepoPermissionAnyDecorator
30 30 from rhodecode.lib.base import BaseController, render
31 31 from rhodecode.lib.utils import EmptyChangeset
32 32 from rhodecode.model.hg import HgModel
33 33 from vcs.exceptions import RepositoryError, ChangesetError
34 34 from vcs.nodes import FileNode
35 35 from vcs.utils import diffs as differ
36 36 import logging
37 37 import rhodecode.lib.helpers as h
38 38 import tempfile
39 39
40 40 log = logging.getLogger(__name__)
41 41
42 42 class FilesController(BaseController):
43 43
44 44 @LoginRequired()
45 45 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
46 46 'repository.admin')
47 47 def __before__(self):
48 48 super(FilesController, self).__before__()
49 49 c.file_size_limit = 250 * 1024 #limit of file size to display
50 50
51 51 def index(self, repo_name, revision, f_path):
52 52 hg_model = HgModel()
53 c.repo = repo = hg_model.get_repo(c.repo_name)
53 c.repo = hg_model.get_repo(c.repo_name)
54 54 revision = request.POST.get('at_rev', None) or revision
55 55
56 56 def get_next_rev(cur):
57 57 max_rev = len(c.repo.revisions) - 1
58 58 r = cur + 1
59 59 if r > max_rev:
60 60 r = max_rev
61 61 return r
62 62
63 63 def get_prev_rev(cur):
64 64 r = cur - 1
65 65 return r
66 66
67 67 c.f_path = f_path
68 68
69 69
70 70 try:
71 c.changeset = repo.get_changeset(revision)
71 c.changeset = c.repo.get_changeset(revision)
72 72 cur_rev = c.changeset.revision
73 prev_rev = repo.get_changeset(get_prev_rev(cur_rev)).raw_id
74 next_rev = repo.get_changeset(get_next_rev(cur_rev)).raw_id
73 prev_rev = c.repo.get_changeset(get_prev_rev(cur_rev)).raw_id
74 next_rev = c.repo.get_changeset(get_next_rev(cur_rev)).raw_id
75 75
76 76 c.url_prev = url('files_home', repo_name=c.repo_name,
77 77 revision=prev_rev, f_path=f_path)
78 78 c.url_next = url('files_home', repo_name=c.repo_name,
79 79 revision=next_rev, f_path=f_path)
80 80
81 81 try:
82 82 c.files_list = c.changeset.get_node(f_path)
83 c.file_history = self._get_history(repo, c.files_list, f_path)
83 c.file_history = self._get_history(c.repo, c.files_list, f_path)
84 84
85 85 except RepositoryError, e:
86 86 h.flash(str(e), category='warning')
87 87 redirect(h.url('files_home', repo_name=repo_name, revision=revision))
88 88
89 89 except RepositoryError, e:
90 90 h.flash(str(e), category='warning')
91 91 redirect(h.url('files_home', repo_name=repo_name, revision='tip'))
92 92
93 93
94 94
95 95 return render('files/files.html')
96 96
97 97 def rawfile(self, repo_name, revision, f_path):
98 98 hg_model = HgModel()
99 99 c.repo = hg_model.get_repo(c.repo_name)
100 100 file_node = c.repo.get_changeset(revision).get_node(f_path)
101 101 response.content_type = file_node.mimetype
102 102 response.content_disposition = 'attachment; filename=%s' \
103 103 % f_path.split('/')[-1]
104 104 return file_node.content
105 105
106 106 def raw(self, repo_name, revision, f_path):
107 107 hg_model = HgModel()
108 108 c.repo = hg_model.get_repo(c.repo_name)
109 109 file_node = c.repo.get_changeset(revision).get_node(f_path)
110 110 response.content_type = 'text/plain'
111 111
112 112 return file_node.content
113 113
114 114 def annotate(self, repo_name, revision, f_path):
115 115 hg_model = HgModel()
116 116 c.repo = hg_model.get_repo(c.repo_name)
117 cs = c.repo.get_changeset(revision)
118 c.file = cs.get_node(f_path)
119 c.file_msg = cs.get_file_message(f_path)
120 c.cur_rev = cs.raw_id
121 c.rev_nr = cs.revision
117 c.cs = c.repo.get_changeset(revision)
118 c.file = c.cs.get_node(f_path)
119 c.file_history = self._get_history(c.repo, c.file, f_path)
120
122 121 c.f_path = f_path
123 122
124 123 return render('files/files_annotate.html')
125 124
126 125 def archivefile(self, repo_name, revision, fileformat):
127 126 archive_specs = {
128 127 '.tar.bz2': ('application/x-tar', 'tbz2'),
129 128 '.tar.gz': ('application/x-tar', 'tgz'),
130 129 '.zip': ('application/zip', 'zip'),
131 130 }
132 131 if not archive_specs.has_key(fileformat):
133 132 return 'Unknown archive type %s' % fileformat
134 133
135 134 def read_in_chunks(file_object, chunk_size=1024 * 40):
136 135 """Lazy function (generator) to read a file piece by piece.
137 136 Default chunk size: 40k."""
138 137 while True:
139 138 data = file_object.read(chunk_size)
140 139 if not data:
141 140 break
142 141 yield data
143 142
144 143 archive = tempfile.TemporaryFile()
145 144 repo = HgModel().get_repo(repo_name).repo
146 145 fname = '%s-%s%s' % (repo_name, revision, fileformat)
147 146 archival.archive(repo, archive, revision, archive_specs[fileformat][1],
148 147 prefix='%s-%s' % (repo_name, revision))
149 148 response.content_type = archive_specs[fileformat][0]
150 149 response.content_disposition = 'attachment; filename=%s' % fname
151 150 archive.seek(0)
152 151 return read_in_chunks(archive)
153 152
154 153 def diff(self, repo_name, f_path):
155 154 hg_model = HgModel()
156 155 diff1 = request.GET.get('diff1')
157 156 diff2 = request.GET.get('diff2')
158 157 c.action = request.GET.get('diff')
159 158 c.no_changes = diff1 == diff2
160 159 c.f_path = f_path
161 160 c.repo = hg_model.get_repo(c.repo_name)
162 161
163 162 try:
164 163 if diff1 not in ['', None, 'None', '0' * 12, '0' * 40]:
165 164 c.changeset_1 = c.repo.get_changeset(diff1)
166 165 node1 = c.changeset_1.get_node(f_path)
167 166 else:
168 167 c.changeset_1 = EmptyChangeset()
169 168 node1 = FileNode('.', '', changeset=c.changeset_1)
170 169
171 170 if diff2 not in ['', None, 'None', '0' * 12, '0' * 40]:
172 171 c.changeset_2 = c.repo.get_changeset(diff2)
173 172 node2 = c.changeset_2.get_node(f_path)
174 173 else:
175 174 c.changeset_2 = EmptyChangeset()
176 175 node2 = FileNode('.', '', changeset=c.changeset_2)
177 176 except RepositoryError:
178 177 return redirect(url('files_home',
179 178 repo_name=c.repo_name, f_path=f_path))
180 179
181 180 f_udiff = differ.get_udiff(node1, node2)
182 181 diff = differ.DiffProcessor(f_udiff)
183 182
184 183 if c.action == 'download':
185 184 diff_name = '%s_vs_%s.diff' % (diff1, diff2)
186 185 response.content_type = 'text/plain'
187 186 response.content_disposition = 'attachment; filename=%s' \
188 187 % diff_name
189 188 return diff.raw_diff()
190 189
191 190 elif c.action == 'raw':
192 191 response.content_type = 'text/plain'
193 192 return diff.raw_diff()
194 193
195 194 elif c.action == 'diff':
196 195 if node1.size > c.file_size_limit or node2.size > c.file_size_limit:
197 196 c.cur_diff = _('Diff is to big to display')
198 197 else:
199 198 c.cur_diff = diff.as_html()
200 199 else:
201 200 #default option
202 201 if node1.size > c.file_size_limit or node2.size > c.file_size_limit:
203 202 c.cur_diff = _('Diff is to big to display')
204 203 else:
205 204 c.cur_diff = diff.as_html()
206 205
207 206 if not c.cur_diff: c.no_changes = True
208 207 return render('files/file_diff.html')
209 208
210 209 def _get_history(self, repo, node, f_path):
211 210 from vcs.nodes import NodeKind
212 211 if not node.kind is NodeKind.FILE:
213 212 return []
214 213 changesets = node.history
215 214 hist_l = []
216 215 for chs in changesets:
217 216 n_desc = 'r%s:%s' % (chs.revision, chs.short_id)
218 217 hist_l.append((chs.raw_id, n_desc,))
219 218 return hist_l
@@ -1,336 +1,338
1 1 from celery.decorators import task
2 2
3 3 from operator import itemgetter
4 4 from pylons.i18n.translation import _
5 5 from rhodecode.lib.celerylib import run_task, locked_task
6 6 from rhodecode.lib.helpers import person
7 7 from rhodecode.lib.smtp_mailer import SmtpMailer
8 8 from rhodecode.lib.utils import OrderedDict
9 9 from time import mktime
10 10 from vcs.backends.hg import MercurialRepository
11 11 from vcs.backends.git import GitRepository
12 12 import os
13 13 import traceback
14 14 from vcs.backends import get_repo
15 15 from vcs.utils.helpers import get_scm
16 16
17 17 try:
18 18 import json
19 19 except ImportError:
20 20 #python 2.5 compatibility
21 21 import simplejson as json
22 22
23 23 try:
24 24 from celeryconfig import PYLONS_CONFIG as config
25 25 celery_on = True
26 26 except ImportError:
27 27 #if celeryconfig is not present let's just load our pylons
28 28 #config instead
29 29 from pylons import config
30 30 celery_on = False
31 31
32 32
33 33 __all__ = ['whoosh_index', 'get_commits_stats',
34 34 'reset_user_password', 'send_email']
35 35
36 36 def get_session():
37 37 if celery_on:
38 38 from sqlalchemy import engine_from_config
39 39 from sqlalchemy.orm import sessionmaker, scoped_session
40 40 engine = engine_from_config(dict(config.items('app:main')), 'sqlalchemy.db1.')
41 41 sa = scoped_session(sessionmaker(bind=engine))
42 42 else:
43 43 #If we don't use celery reuse our current application Session
44 44 from rhodecode.model.meta import Session
45 45 sa = Session()
46 46
47 47 return sa
48 48
49 49 def get_hg_settings():
50 50 from rhodecode.model.db import RhodeCodeSettings
51 51 sa = get_session()
52 52 ret = sa.query(RhodeCodeSettings).all()
53 53
54 54 if not ret:
55 55 raise Exception('Could not get application settings !')
56 56 settings = {}
57 57 for each in ret:
58 58 settings['rhodecode_' + each.app_settings_name] = each.app_settings_value
59 59
60 60 return settings
61 61
62 62 def get_hg_ui_settings():
63 63 from rhodecode.model.db import RhodeCodeUi
64 64 sa = get_session()
65 65 ret = sa.query(RhodeCodeUi).all()
66 66
67 67 if not ret:
68 68 raise Exception('Could not get application ui settings !')
69 69 settings = {}
70 70 for each in ret:
71 71 k = each.ui_key
72 72 v = each.ui_value
73 73 if k == '/':
74 74 k = 'root_path'
75 75
76 76 if k.find('.') != -1:
77 77 k = k.replace('.', '_')
78 78
79 79 if each.ui_section == 'hooks':
80 80 v = each.ui_active
81 81
82 82 settings[each.ui_section + '_' + k] = v
83 83
84 84 return settings
85 85
86 86 @task
87 87 @locked_task
88 88 def whoosh_index(repo_location, full_index):
89 89 log = whoosh_index.get_logger()
90 90 from rhodecode.lib.indexers.daemon import WhooshIndexingDaemon
91 WhooshIndexingDaemon(repo_location=repo_location).run(full_index=full_index)
91 index_location = ''
92 WhooshIndexingDaemon(index_location=index_location,
93 repo_location=repo_location).run(full_index=full_index)
92 94
93 95 @task
94 96 @locked_task
95 97 def get_commits_stats(repo_name, ts_min_y, ts_max_y):
96 98 from rhodecode.model.db import Statistics, Repository
97 99 log = get_commits_stats.get_logger()
98 100 author_key_cleaner = lambda k: person(k).replace('"', "") #for js data compatibilty
99 101
100 102 commits_by_day_author_aggregate = {}
101 103 commits_by_day_aggregate = {}
102 104 repos_path = get_hg_ui_settings()['paths_root_path']
103 105 p = os.path.join(repos_path, repo_name)
104 106 repo = get_repo(p)
105 107
106 108 skip_date_limit = True
107 109 parse_limit = 250 #limit for single task changeset parsing optimal for
108 110 last_rev = 0
109 111 last_cs = None
110 112 timegetter = itemgetter('time')
111 113
112 114 sa = get_session()
113 115
114 116 dbrepo = sa.query(Repository)\
115 117 .filter(Repository.repo_name == repo_name).scalar()
116 118 cur_stats = sa.query(Statistics)\
117 119 .filter(Statistics.repository == dbrepo).scalar()
118 120 if cur_stats:
119 121 last_rev = cur_stats.stat_on_revision
120 122 if not repo.revisions:
121 123 return True
122 124
123 125 if last_rev == repo.revisions[-1] and len(repo.revisions) > 1:
124 126 #pass silently without any work if we're not on first revision or current
125 127 #state of parsing revision(from db marker) is the last revision
126 128 return True
127 129
128 130 if cur_stats:
129 131 commits_by_day_aggregate = OrderedDict(
130 132 json.loads(
131 133 cur_stats.commit_activity_combined))
132 134 commits_by_day_author_aggregate = json.loads(cur_stats.commit_activity)
133 135
134 136 log.debug('starting parsing %s', parse_limit)
135 137 lmktime = mktime
136 138
137 139 for cnt, rev in enumerate(repo.revisions[last_rev:]):
138 140 last_cs = cs = repo.get_changeset(rev)
139 141 k = '%s-%s-%s' % (cs.date.timetuple()[0], cs.date.timetuple()[1],
140 142 cs.date.timetuple()[2])
141 143 timetupple = [int(x) for x in k.split('-')]
142 144 timetupple.extend([0 for _ in xrange(6)])
143 145 k = lmktime(timetupple)
144 146 if commits_by_day_author_aggregate.has_key(author_key_cleaner(cs.author)):
145 147 try:
146 148 l = [timegetter(x) for x in commits_by_day_author_aggregate\
147 149 [author_key_cleaner(cs.author)]['data']]
148 150 time_pos = l.index(k)
149 151 except ValueError:
150 152 time_pos = False
151 153
152 154 if time_pos >= 0 and time_pos is not False:
153 155
154 156 datadict = commits_by_day_author_aggregate\
155 157 [author_key_cleaner(cs.author)]['data'][time_pos]
156 158
157 159 datadict["commits"] += 1
158 160 datadict["added"] += len(cs.added)
159 161 datadict["changed"] += len(cs.changed)
160 162 datadict["removed"] += len(cs.removed)
161 163
162 164 else:
163 165 if k >= ts_min_y and k <= ts_max_y or skip_date_limit:
164 166
165 167 datadict = {"time":k,
166 168 "commits":1,
167 169 "added":len(cs.added),
168 170 "changed":len(cs.changed),
169 171 "removed":len(cs.removed),
170 172 }
171 173 commits_by_day_author_aggregate\
172 174 [author_key_cleaner(cs.author)]['data'].append(datadict)
173 175
174 176 else:
175 177 if k >= ts_min_y and k <= ts_max_y or skip_date_limit:
176 178 commits_by_day_author_aggregate[author_key_cleaner(cs.author)] = {
177 179 "label":author_key_cleaner(cs.author),
178 180 "data":[{"time":k,
179 181 "commits":1,
180 182 "added":len(cs.added),
181 183 "changed":len(cs.changed),
182 184 "removed":len(cs.removed),
183 185 }],
184 186 "schema":["commits"],
185 187 }
186 188
187 189 #gather all data by day
188 190 if commits_by_day_aggregate.has_key(k):
189 191 commits_by_day_aggregate[k] += 1
190 192 else:
191 193 commits_by_day_aggregate[k] = 1
192 194
193 195 if cnt >= parse_limit:
194 196 #don't fetch to much data since we can freeze application
195 197 break
196 198 overview_data = []
197 199 for k, v in commits_by_day_aggregate.items():
198 200 overview_data.append([k, v])
199 201 overview_data = sorted(overview_data, key=itemgetter(0))
200 202 if not commits_by_day_author_aggregate:
201 203 commits_by_day_author_aggregate[author_key_cleaner(repo.contact)] = {
202 204 "label":author_key_cleaner(repo.contact),
203 205 "data":[0, 1],
204 206 "schema":["commits"],
205 207 }
206 208
207 209 stats = cur_stats if cur_stats else Statistics()
208 210 stats.commit_activity = json.dumps(commits_by_day_author_aggregate)
209 211 stats.commit_activity_combined = json.dumps(overview_data)
210 212
211 213 log.debug('last revison %s', last_rev)
212 214 leftovers = len(repo.revisions[last_rev:])
213 215 log.debug('revisions to parse %s', leftovers)
214 216
215 217 if last_rev == 0 or leftovers < parse_limit:
216 218 stats.languages = json.dumps(__get_codes_stats(repo_name))
217 219
218 220 stats.repository = dbrepo
219 221 stats.stat_on_revision = last_cs.revision
220 222
221 223 try:
222 224 sa.add(stats)
223 225 sa.commit()
224 226 except:
225 227 log.error(traceback.format_exc())
226 228 sa.rollback()
227 229 return False
228 230 if len(repo.revisions) > 1:
229 231 run_task(get_commits_stats, repo_name, ts_min_y, ts_max_y)
230 232
231 233 return True
232 234
233 235 @task
234 236 def reset_user_password(user_email):
235 237 log = reset_user_password.get_logger()
236 238 from rhodecode.lib import auth
237 239 from rhodecode.model.db import User
238 240
239 241 try:
240 242 try:
241 243 sa = get_session()
242 244 user = sa.query(User).filter(User.email == user_email).scalar()
243 245 new_passwd = auth.PasswordGenerator().gen_password(8,
244 246 auth.PasswordGenerator.ALPHABETS_BIG_SMALL)
245 247 if user:
246 248 user.password = auth.get_crypt_password(new_passwd)
247 249 sa.add(user)
248 250 sa.commit()
249 251 log.info('change password for %s', user_email)
250 252 if new_passwd is None:
251 253 raise Exception('unable to generate new password')
252 254
253 255 except:
254 256 log.error(traceback.format_exc())
255 257 sa.rollback()
256 258
257 259 run_task(send_email, user_email,
258 260 "Your new rhodecode password",
259 261 'Your new rhodecode password:%s' % (new_passwd))
260 262 log.info('send new password mail to %s', user_email)
261 263
262 264
263 265 except:
264 266 log.error('Failed to update user password')
265 267 log.error(traceback.format_exc())
266 268 return True
267 269
268 270 @task
269 271 def send_email(recipients, subject, body):
270 272 log = send_email.get_logger()
271 273 email_config = dict(config.items('DEFAULT'))
272 274 mail_from = email_config.get('app_email_from')
273 275 user = email_config.get('smtp_username')
274 276 passwd = email_config.get('smtp_password')
275 277 mail_server = email_config.get('smtp_server')
276 278 mail_port = email_config.get('smtp_port')
277 279 tls = email_config.get('smtp_use_tls')
278 280 ssl = False
279 281
280 282 try:
281 283 m = SmtpMailer(mail_from, user, passwd, mail_server,
282 284 mail_port, ssl, tls)
283 285 m.send(recipients, subject, body)
284 286 except:
285 287 log.error('Mail sending failed')
286 288 log.error(traceback.format_exc())
287 289 return False
288 290 return True
289 291
290 292 @task
291 293 def create_repo_fork(form_data, cur_user):
292 294 from rhodecode.model.repo import RepoModel
293 295 from vcs import get_backend
294 296 log = create_repo_fork.get_logger()
295 297 repo_model = RepoModel(get_session())
296 298 repo_model.create(form_data, cur_user, just_db=True, fork=True)
297 299 repo_name = form_data['repo_name']
298 300 repos_path = get_hg_ui_settings()['paths_root_path']
299 301 repo_path = os.path.join(repos_path, repo_name)
300 302 repo_fork_path = os.path.join(repos_path, form_data['fork_name'])
301 303 alias = form_data['repo_type']
302 304
303 305 log.info('creating repo fork %s as %s', repo_name, repo_path)
304 306 backend = get_backend(alias)
305 307 backend(str(repo_fork_path), create=True, src_url=str(repo_path))
306 308
307 309 def __get_codes_stats(repo_name):
308 310 LANGUAGES_EXTENSIONS = ['action', 'adp', 'ashx', 'asmx',
309 311 'aspx', 'asx', 'axd', 'c', 'cfg', 'cfm', 'cpp', 'cs', 'diff', 'do', 'el',
310 312 'erl', 'h', 'java', 'js', 'jsp', 'jspx', 'lisp', 'lua', 'm', 'mako', 'ml',
311 313 'pas', 'patch', 'php', 'php3', 'php4', 'phtml', 'pm', 'py', 'rb', 'rst',
312 314 's', 'sh', 'tpl', 'txt', 'vim', 'wss', 'xhtml', 'xml', 'xsl', 'xslt', 'yaws']
313 315
314 316
315 317 repos_path = get_hg_ui_settings()['paths_root_path']
316 318 p = os.path.join(repos_path, repo_name)
317 319 repo = get_repo(p)
318 320 tip = repo.get_changeset()
319 321 code_stats = {}
320 322
321 323 def aggregate(cs):
322 324 for f in cs[2]:
323 325 k = f.mimetype
324 326 if f.extension in LANGUAGES_EXTENSIONS:
325 327 if code_stats.has_key(k):
326 328 code_stats[k] += 1
327 329 else:
328 330 code_stats[k] = 1
329 331
330 332 map(aggregate, tip.walk('/'))
331 333
332 334 return code_stats or {}
333 335
334 336
335 337
336 338
@@ -1,445 +1,445
1 1 """Helper functions
2 2
3 3 Consists of functions to typically be used within templates, but also
4 4 available to Controllers. This module is available to both as 'h'.
5 5 """
6 6 from pygments.formatters import HtmlFormatter
7 7 from pygments import highlight as code_highlight
8 8 from pylons import url, app_globals as g
9 9 from pylons.i18n.translation import _, ungettext
10 10 from vcs.utils.annotate import annotate_highlight
11 11 from webhelpers.html import literal, HTML, escape
12 12 from webhelpers.html.tools import *
13 13 from webhelpers.html.builder import make_tag
14 14 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
15 15 end_form, file, form, hidden, image, javascript_link, link_to, link_to_if, \
16 16 link_to_unless, ol, required_legend, select, stylesheet_link, submit, text, \
17 17 password, textarea, title, ul, xml_declaration, radio
18 18 from webhelpers.html.tools import auto_link, button_to, highlight, js_obfuscate, \
19 19 mail_to, strip_links, strip_tags, tag_re
20 20 from webhelpers.number import format_byte_size, format_bit_size
21 21 from webhelpers.pylonslib import Flash as _Flash
22 22 from webhelpers.pylonslib.secure_form import secure_form
23 23 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
24 24 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
25 25 replace_whitespace, urlify, truncate, wrap_paragraphs
26 26 from webhelpers.date import time_ago_in_words
27 27
28 28 #Custom helpers here :)
29 29 class _Link(object):
30 30 '''
31 31 Make a url based on label and url with help of url_for
32 32 :param label:name of link if not defined url is used
33 33 :param url: the url for link
34 34 '''
35 35
36 36 def __call__(self, label='', *url_, **urlargs):
37 37 if label is None or '':
38 38 label = url
39 39 link_fn = link_to(label, url(*url_, **urlargs))
40 40 return link_fn
41 41
42 42 link = _Link()
43 43
44 44 class _GetError(object):
45 45
46 46 def __call__(self, field_name, form_errors):
47 47 tmpl = """<span class="error_msg">%s</span>"""
48 48 if form_errors and form_errors.has_key(field_name):
49 49 return literal(tmpl % form_errors.get(field_name))
50 50
51 51 get_error = _GetError()
52 52
53 53 def recursive_replace(str, replace=' '):
54 54 """
55 55 Recursive replace of given sign to just one instance
56 56 :param str: given string
57 57 :param replace:char to find and replace multiple instances
58 58
59 59 Examples::
60 60 >>> recursive_replace("Mighty---Mighty-Bo--sstones",'-')
61 61 'Mighty-Mighty-Bo-sstones'
62 62 """
63 63
64 64 if str.find(replace * 2) == -1:
65 65 return str
66 66 else:
67 67 str = str.replace(replace * 2, replace)
68 68 return recursive_replace(str, replace)
69 69
70 70 class _ToolTip(object):
71 71
72 72 def __call__(self, tooltip_title, trim_at=50):
73 73 """
74 74 Special function just to wrap our text into nice formatted autowrapped
75 75 text
76 76 :param tooltip_title:
77 77 """
78 78
79 79 return wrap_paragraphs(escape(tooltip_title), trim_at)\
80 80 .replace('\n', '<br/>')
81 81
82 82 def activate(self):
83 83 """
84 84 Adds tooltip mechanism to the given Html all tooltips have to have
85 85 set class tooltip and set attribute tooltip_title.
86 86 Then a tooltip will be generated based on that
87 87 All with yui js tooltip
88 88 """
89 89
90 90 js = '''
91 91 YAHOO.util.Event.onDOMReady(function(){
92 92 function toolTipsId(){
93 93 var ids = [];
94 94 var tts = YAHOO.util.Dom.getElementsByClassName('tooltip');
95 95
96 96 for (var i = 0; i < tts.length; i++) {
97 97 //if element doesn not have and id autgenerate one for tooltip
98 98
99 99 if (!tts[i].id){
100 100 tts[i].id='tt'+i*100;
101 101 }
102 102 ids.push(tts[i].id);
103 103 }
104 104 return ids
105 105 };
106 106 var myToolTips = new YAHOO.widget.Tooltip("tooltip", {
107 107 context: toolTipsId(),
108 108 monitorresize:false,
109 109 xyoffset :[0,0],
110 110 autodismissdelay:300000,
111 111 hidedelay:5,
112 112 showdelay:20,
113 113 });
114 114
115 115 //Mouse Over event disabled for new repositories since they dont
116 116 //have last commit message
117 117 myToolTips.contextMouseOverEvent.subscribe(
118 118 function(type, args) {
119 119 var context = args[0];
120 120 var txt = context.getAttribute('tooltip_title');
121 121 if(txt){
122 122 return true;
123 123 }
124 124 else{
125 125 return false;
126 126 }
127 127 });
128 128
129 129
130 130 // Set the text for the tooltip just before we display it. Lazy method
131 131 myToolTips.contextTriggerEvent.subscribe(
132 132 function(type, args) {
133 133
134 134
135 135 var context = args[0];
136 136
137 137 var txt = context.getAttribute('tooltip_title');
138 138 this.cfg.setProperty("text", txt);
139 139
140 140
141 141 // positioning of tooltip
142 142 var tt_w = this.element.clientWidth;
143 143 var tt_h = this.element.clientHeight;
144 144
145 145 var context_w = context.offsetWidth;
146 146 var context_h = context.offsetHeight;
147 147
148 148 var pos_x = YAHOO.util.Dom.getX(context);
149 149 var pos_y = YAHOO.util.Dom.getY(context);
150 150
151 151 var display_strategy = 'top';
152 152 var xy_pos = [0,0];
153 153 switch (display_strategy){
154 154
155 155 case 'top':
156 156 var cur_x = (pos_x+context_w/2)-(tt_w/2);
157 157 var cur_y = pos_y-tt_h-4;
158 158 xy_pos = [cur_x,cur_y];
159 159 break;
160 160 case 'bottom':
161 161 var cur_x = (pos_x+context_w/2)-(tt_w/2);
162 162 var cur_y = pos_y+context_h+4;
163 163 xy_pos = [cur_x,cur_y];
164 164 break;
165 165 case 'left':
166 166 var cur_x = (pos_x-tt_w-4);
167 167 var cur_y = pos_y-((tt_h/2)-context_h/2);
168 168 xy_pos = [cur_x,cur_y];
169 169 break;
170 170 case 'right':
171 171 var cur_x = (pos_x+context_w+4);
172 172 var cur_y = pos_y-((tt_h/2)-context_h/2);
173 173 xy_pos = [cur_x,cur_y];
174 174 break;
175 175 default:
176 176 var cur_x = (pos_x+context_w/2)-(tt_w/2);
177 177 var cur_y = pos_y-tt_h-4;
178 178 xy_pos = [cur_x,cur_y];
179 179 break;
180 180
181 181 }
182 182
183 183 this.cfg.setProperty("xy",xy_pos);
184 184
185 185 });
186 186
187 187 //Mouse out
188 188 myToolTips.contextMouseOutEvent.subscribe(
189 189 function(type, args) {
190 190 var context = args[0];
191 191
192 192 });
193 193 });
194 194 '''
195 195 return literal(js)
196 196
197 197 tooltip = _ToolTip()
198 198
199 199 class _FilesBreadCrumbs(object):
200 200
201 201 def __call__(self, repo_name, rev, paths):
202 202 url_l = [link_to(repo_name, url('files_home',
203 203 repo_name=repo_name,
204 204 revision=rev, f_path=''))]
205 205 paths_l = paths.split('/')
206 206
207 207 for cnt, p in enumerate(paths_l, 1):
208 208 if p != '':
209 209 url_l.append(link_to(p, url('files_home',
210 210 repo_name=repo_name,
211 211 revision=rev,
212 212 f_path='/'.join(paths_l[:cnt]))))
213 213
214 214 return literal('/'.join(url_l))
215 215
216 216 files_breadcrumbs = _FilesBreadCrumbs()
217 217 class CodeHtmlFormatter(HtmlFormatter):
218 218
219 219 def wrap(self, source, outfile):
220 220 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
221 221
222 222 def _wrap_code(self, source):
223 223 for cnt, it in enumerate(source, 1):
224 224 i, t = it
225 225 t = '<div id="#S-%s">%s</div>' % (cnt, t)
226 226 yield i, t
227 227 def pygmentize(filenode, **kwargs):
228 228 """
229 229 pygmentize function using pygments
230 230 :param filenode:
231 231 """
232 232 return literal(code_highlight(filenode.content,
233 233 filenode.lexer, CodeHtmlFormatter(**kwargs)))
234 234
235 235 def pygmentize_annotation(filenode, **kwargs):
236 236 """
237 237 pygmentize function for annotation
238 238 :param filenode:
239 239 """
240 240
241 241 color_dict = {}
242 242 def gen_color():
243 243 """generator for getting 10k of evenly distibuted colors using hsv color
244 244 and golden ratio.
245 245 """
246 246 import colorsys
247 247 n = 10000
248 248 golden_ratio = 0.618033988749895
249 249 h = 0.22717784590367374
250 250 #generate 10k nice web friendly colors in the same order
251 251 for c in xrange(n):
252 252 h += golden_ratio
253 253 h %= 1
254 254 HSV_tuple = [h, 0.95, 0.95]
255 255 RGB_tuple = colorsys.hsv_to_rgb(*HSV_tuple)
256 256 yield map(lambda x:str(int(x * 256)), RGB_tuple)
257 257
258 258 cgenerator = gen_color()
259 259
260 260 def get_color_string(cs):
261 261 if color_dict.has_key(cs):
262 262 col = color_dict[cs]
263 263 else:
264 264 col = color_dict[cs] = cgenerator.next()
265 265 return "color: rgb(%s)! important;" % (', '.join(col))
266 266
267 267 def url_func(changeset):
268 268 tooltip_html = "<div style='font-size:0.8em'><b>Author:</b>" + \
269 269 " %s<br/><b>Date:</b> %s</b><br/><b>Message:</b> %s<br/></div>"
270 270
271 271 tooltip_html = tooltip_html % (changeset.author,
272 272 changeset.date,
273 273 tooltip(changeset.message))
274 lnk_format = 'r%-5s:%s' % (changeset.revision,
275 changeset.raw_id)
274 lnk_format = '%5s:%s' % ('r%s' % changeset.revision,
275 short_id(changeset.raw_id))
276 276 uri = link_to(
277 277 lnk_format,
278 278 url('changeset_home', repo_name=changeset.repository.name,
279 279 revision=changeset.raw_id),
280 280 style=get_color_string(changeset.raw_id),
281 281 class_='tooltip',
282 282 tooltip_title=tooltip_html
283 283 )
284 284
285 285 uri += '\n'
286 286 return uri
287 287 return literal(annotate_highlight(filenode, url_func, **kwargs))
288 288
289 289 def repo_name_slug(value):
290 290 """Return slug of name of repository
291 291 This function is called on each creation/modification
292 292 of repository to prevent bad names in repo
293 293 """
294 294 slug = remove_formatting(value)
295 295 slug = strip_tags(slug)
296 296
297 297 for c in """=[]\;'"<>,/~!@#$%^&*()+{}|: """:
298 298 slug = slug.replace(c, '-')
299 299 slug = recursive_replace(slug, '-')
300 300 slug = collapse(slug, '-')
301 301 return slug
302 302
303 303 def get_changeset_safe(repo, rev):
304 304 from vcs.backends.base import BaseRepository
305 305 from vcs.exceptions import RepositoryError
306 306 if not isinstance(repo, BaseRepository):
307 307 raise Exception('You must pass an Repository '
308 308 'object as first argument got %s', type(repo))
309 309
310 310 try:
311 311 cs = repo.get_changeset(rev)
312 312 except RepositoryError:
313 313 from rhodecode.lib.utils import EmptyChangeset
314 314 cs = EmptyChangeset()
315 315 return cs
316 316
317 317
318 318 flash = _Flash()
319 319
320 320
321 321 #==============================================================================
322 322 # MERCURIAL FILTERS available via h.
323 323 #==============================================================================
324 324 from mercurial import util
325 325 from mercurial.templatefilters import person as _person
326 326
327 327
328 328
329 329 def _age(curdate):
330 330 """turns a datetime into an age string."""
331 331
332 332 if not curdate:
333 333 return ''
334 334
335 335 from datetime import timedelta, datetime
336 336
337 337 agescales = [("year", 3600 * 24 * 365),
338 338 ("month", 3600 * 24 * 30),
339 339 ("day", 3600 * 24),
340 340 ("hour", 3600),
341 341 ("minute", 60),
342 342 ("second", 1), ]
343 343
344 344 age = datetime.now() - curdate
345 345 age_seconds = (age.days * agescales[2][1]) + age.seconds
346 346 pos = 1
347 347 for scale in agescales:
348 348 if scale[1] <= age_seconds:
349 349 if pos == 6:pos = 5
350 350 return time_ago_in_words(curdate, agescales[pos][0])
351 351 pos += 1
352 352
353 353 age = lambda x:_age(x)
354 354 capitalize = lambda x: x.capitalize()
355 355 email = util.email
356 356 email_or_none = lambda x: util.email(x) if util.email(x) != x else None
357 357 person = lambda x: _person(x)
358 358 short_id = lambda x: x[:12]
359 359
360 360
361 361 def action_parser(user_log):
362 362 """
363 363 This helper will map the specified string action into translated
364 364 fancy names with icons and links
365 365
366 366 @param action:
367 367 """
368 368 action = user_log.action
369 369 action_params = None
370 370 cs_links = ''
371 371
372 372 x = action.split(':')
373 373
374 374 if len(x) > 1:
375 375 action, action_params = x
376 376
377 377 if action == 'push':
378 378 revs_limit = 5
379 379 revs = action_params.split(',')
380 380 cs_links = " " + ', '.join ([link(rev,
381 381 url('changeset_home',
382 382 repo_name=user_log.repository.repo_name,
383 383 revision=rev)) for rev in revs[:revs_limit] ])
384 384 if len(revs) > revs_limit:
385 385 html_tmpl = '<span title="%s"> %s </span>'
386 386 cs_links += html_tmpl % (', '.join(r for r in revs[revs_limit:]),
387 387 _('and %s more revisions') % (len(revs) - revs_limit))
388 388
389 389 map = {'user_deleted_repo':_('User deleted repository'),
390 390 'user_created_repo':_('User created repository'),
391 391 'user_forked_repo':_('User forked repository'),
392 392 'user_updated_repo':_('User updated repository'),
393 393 'admin_deleted_repo':_('Admin delete repository'),
394 394 'admin_created_repo':_('Admin created repository'),
395 395 'admin_forked_repo':_('Admin forked repository'),
396 396 'admin_updated_repo':_('Admin updated repository'),
397 397 'push':_('Pushed') + literal(cs_links),
398 398 'pull':_('Pulled'), }
399 399
400 400 print action, action_params
401 401 return map.get(action, action)
402 402
403 403
404 404 #==============================================================================
405 405 # PERMS
406 406 #==============================================================================
407 407 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
408 408 HasRepoPermissionAny, HasRepoPermissionAll
409 409
410 410 #==============================================================================
411 411 # GRAVATAR URL
412 412 #==============================================================================
413 413 import hashlib
414 414 import urllib
415 415 from pylons import request
416 416
417 417 def gravatar_url(email_address, size=30):
418 418 ssl_enabled = 'https' == request.environ.get('HTTP_X_URL_SCHEME')
419 419 default = 'identicon'
420 420 baseurl_nossl = "http://www.gravatar.com/avatar/"
421 421 baseurl_ssl = "https://secure.gravatar.com/avatar/"
422 422 baseurl = baseurl_ssl if ssl_enabled else baseurl_nossl
423 423
424 424
425 425 # construct the url
426 426 gravatar_url = baseurl + hashlib.md5(email_address.lower()).hexdigest() + "?"
427 427 gravatar_url += urllib.urlencode({'d':default, 's':str(size)})
428 428
429 429 return gravatar_url
430 430
431 431 def safe_unicode(str):
432 432 """safe unicode function. In case of UnicodeDecode error we try to return
433 433 unicode with errors replace, if this failes we return unicode with
434 434 string_escape decoding """
435 435
436 436 try:
437 437 u_str = unicode(str)
438 438 except UnicodeDecodeError:
439 439 try:
440 440 u_str = unicode(str, 'utf-8', 'replace')
441 441 except UnicodeDecodeError:
442 442 #incase we have a decode error just represent as byte string
443 443 u_str = unicode(str(str).encode('string_escape'))
444 444
445 445 return u_str
@@ -1,221 +1,217
1 1 #!/usr/bin/env python
2 2 # encoding: utf-8
3 3 # whoosh indexer daemon for rhodecode
4 4 # Copyright (C) 2009-2010 Marcin Kuzminski <marcin@python-works.com>
5 5 #
6 6 # This program is free software; you can redistribute it and/or
7 7 # modify it under the terms of the GNU General Public License
8 8 # as published by the Free Software Foundation; version 2
9 9 # of the License or (at your opinion) any later version of the license.
10 10 #
11 11 # This program is distributed in the hope that it will be useful,
12 12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 14 # GNU General Public License for more details.
15 15 #
16 16 # You should have received a copy of the GNU General Public License
17 17 # along with this program; if not, write to the Free Software
18 18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
19 19 # MA 02110-1301, USA.
20 20 """
21 21 Created on Jan 26, 2010
22 22
23 23 @author: marcink
24 24 A deamon will read from task table and run tasks
25 25 """
26 26 import sys
27 27 import os
28 28 from os.path import dirname as dn
29 29 from os.path import join as jn
30 30
31 31 #to get the rhodecode import
32 32 project_path = dn(dn(dn(dn(os.path.realpath(__file__)))))
33 33 sys.path.append(project_path)
34 34
35 35
36 36 from rhodecode.model.hg import HgModel
37 37 from rhodecode.lib.helpers import safe_unicode
38 38 from whoosh.index import create_in, open_dir
39 39 from shutil import rmtree
40 40 from rhodecode.lib.indexers import INDEX_EXTENSIONS, SCHEMA, IDX_NAME
41 41
42 42 from time import mktime
43 43 from vcs.exceptions import ChangesetError, RepositoryError
44 44
45 45 import logging
46 46
47 47 log = logging.getLogger('whooshIndexer')
48 48 # create logger
49 49 log.setLevel(logging.DEBUG)
50 50 log.propagate = False
51 51 # create console handler and set level to debug
52 52 ch = logging.StreamHandler()
53 53 ch.setLevel(logging.DEBUG)
54 54
55 55 # create formatter
56 56 formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
57 57
58 58 # add formatter to ch
59 59 ch.setFormatter(formatter)
60 60
61 61 # add ch to logger
62 62 log.addHandler(ch)
63 63
64 def get_repos_location():
65 return HgModel.get_repos_location()
66
67
68 64 class WhooshIndexingDaemon(object):
69 65 """
70 66 Deamon for atomic jobs
71 67 """
72 68
73 69 def __init__(self, indexname='HG_INDEX', index_location=None,
74 70 repo_location=None):
75 71 self.indexname = indexname
76 72
77 73 self.index_location = index_location
78 74 if not index_location:
79 75 raise Exception('You have to provide index location')
80 76
81 77 self.repo_location = repo_location
82 78 if not repo_location:
83 79 raise Exception('You have to provide repositories location')
84 80
85 81
86 82
87 83 self.repo_paths = HgModel.repo_scan('/', self.repo_location, None, True)
88 84 self.initial = False
89 85 if not os.path.isdir(self.index_location):
90 86 os.mkdir(self.index_location)
91 87 log.info('Cannot run incremental index since it does not'
92 88 ' yet exist running full build')
93 89 self.initial = True
94 90
95 91 def get_paths(self, repo):
96 92 """
97 93 recursive walk in root dir and return a set of all path in that dir
98 94 based on repository walk function
99 95 """
100 96 index_paths_ = set()
101 97 try:
102 98 for topnode, dirs, files in repo.walk('/', 'tip'):
103 99 for f in files:
104 100 index_paths_.add(jn(repo.path, f.path))
105 101 for dir in dirs:
106 102 for f in files:
107 103 index_paths_.add(jn(repo.path, f.path))
108 104
109 105 except RepositoryError:
110 106 pass
111 107 return index_paths_
112 108
113 109 def get_node(self, repo, path):
114 110 n_path = path[len(repo.path) + 1:]
115 111 node = repo.get_changeset().get_node(n_path)
116 112 return node
117 113
118 114 def get_node_mtime(self, node):
119 115 return mktime(node.last_changeset.date.timetuple())
120 116
121 117 def add_doc(self, writer, path, repo):
122 118 """Adding doc to writer"""
123 119 node = self.get_node(repo, path)
124 120
125 121 #we just index the content of chosen files
126 122 if node.extension in INDEX_EXTENSIONS:
127 123 log.debug(' >> %s [WITH CONTENT]' % path)
128 124 u_content = node.content
129 125 else:
130 126 log.debug(' >> %s' % path)
131 127 #just index file name without it's content
132 128 u_content = u''
133 129
134 130 writer.add_document(owner=unicode(repo.contact),
135 131 repository=safe_unicode(repo.name),
136 132 path=safe_unicode(path),
137 133 content=u_content,
138 134 modtime=self.get_node_mtime(node),
139 135 extension=node.extension)
140 136
141 137
142 138 def build_index(self):
143 139 if os.path.exists(self.index_location):
144 140 log.debug('removing previous index')
145 141 rmtree(self.index_location)
146 142
147 143 if not os.path.exists(self.index_location):
148 144 os.mkdir(self.index_location)
149 145
150 146 idx = create_in(self.index_location, SCHEMA, indexname=IDX_NAME)
151 147 writer = idx.writer()
152 148
153 149 for cnt, repo in enumerate(self.repo_paths.values()):
154 150 log.debug('building index @ %s' % repo.path)
155 151
156 152 for idx_path in self.get_paths(repo):
157 153 self.add_doc(writer, idx_path, repo)
158 154
159 155 log.debug('>> COMMITING CHANGES <<')
160 156 writer.commit(merge=True)
161 157 log.debug('>>> FINISHED BUILDING INDEX <<<')
162 158
163 159
164 160 def update_index(self):
165 161 log.debug('STARTING INCREMENTAL INDEXING UPDATE')
166 162
167 163 idx = open_dir(self.index_location, indexname=self.indexname)
168 164 # The set of all paths in the index
169 165 indexed_paths = set()
170 166 # The set of all paths we need to re-index
171 167 to_index = set()
172 168
173 169 reader = idx.reader()
174 170 writer = idx.writer()
175 171
176 172 # Loop over the stored fields in the index
177 173 for fields in reader.all_stored_fields():
178 174 indexed_path = fields['path']
179 175 indexed_paths.add(indexed_path)
180 176
181 177 repo = self.repo_paths[fields['repository']]
182 178
183 179 try:
184 180 node = self.get_node(repo, indexed_path)
185 181 except ChangesetError:
186 182 # This file was deleted since it was indexed
187 183 log.debug('removing from index %s' % indexed_path)
188 184 writer.delete_by_term('path', indexed_path)
189 185
190 186 else:
191 187 # Check if this file was changed since it was indexed
192 188 indexed_time = fields['modtime']
193 189 mtime = self.get_node_mtime(node)
194 190 if mtime > indexed_time:
195 191 # The file has changed, delete it and add it to the list of
196 192 # files to reindex
197 193 log.debug('adding to reindex list %s' % indexed_path)
198 194 writer.delete_by_term('path', indexed_path)
199 195 to_index.add(indexed_path)
200 196
201 197 # Loop over the files in the filesystem
202 198 # Assume we have a function that gathers the filenames of the
203 199 # documents to be indexed
204 200 for repo in self.repo_paths.values():
205 201 for path in self.get_paths(repo):
206 202 if path in to_index or path not in indexed_paths:
207 203 # This is either a file that's changed, or a new file
208 204 # that wasn't indexed before. So index it!
209 205 self.add_doc(writer, path, repo)
210 206 log.debug('re indexing %s' % path)
211 207
212 208 log.debug('>> COMMITING CHANGES <<')
213 209 writer.commit(merge=True)
214 210 log.debug('>>> FINISHED REBUILDING INDEX <<<')
215 211
216 212 def run(self, full_index=False):
217 213 """Run daemon"""
218 214 if full_index or self.initial:
219 215 self.build_index()
220 216 else:
221 217 self.update_index()
General Comments 0
You need to be logged in to leave comments. Login now