##// END OF EJS Templates
diffs: drop the noop as_raw method - just use the raw diff directly and with proper variable naming
Mads Kiilerich -
r6834:54199f3a default
parent child Browse files
Show More
@@ -1,461 +1,460 b''
1 1 # -*- coding: utf-8 -*-
2 2 # This program is free software: you can redistribute it and/or modify
3 3 # it under the terms of the GNU General Public License as published by
4 4 # the Free Software Foundation, either version 3 of the License, or
5 5 # (at your option) any later version.
6 6 #
7 7 # This program is distributed in the hope that it will be useful,
8 8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 10 # GNU General Public License for more details.
11 11 #
12 12 # You should have received a copy of the GNU General Public License
13 13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
14 14 """
15 15 kallithea.controllers.changeset
16 16 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
17 17
18 18 changeset controller showing changes between revisions
19 19
20 20 This file was forked by the Kallithea project in July 2014.
21 21 Original author and date, and relevant copyright and licensing information is below:
22 22 :created_on: Apr 25, 2010
23 23 :author: marcink
24 24 :copyright: (c) 2013 RhodeCode GmbH, and others.
25 25 :license: GPLv3, see LICENSE.md for more details.
26 26 """
27 27
28 28 import logging
29 29 import traceback
30 30 from collections import defaultdict
31 31
32 32 from tg import tmpl_context as c, request, response
33 33 from tg.i18n import ugettext as _
34 34 from webob.exc import HTTPFound, HTTPForbidden, HTTPBadRequest, HTTPNotFound
35 35
36 36 from kallithea.lib.vcs.exceptions import RepositoryError, \
37 37 ChangesetDoesNotExistError, EmptyRepositoryError
38 38
39 39 import kallithea.lib.helpers as h
40 40 from kallithea.lib.auth import LoginRequired, HasRepoPermissionLevelDecorator, \
41 41 NotAnonymous
42 42 from kallithea.lib.base import BaseRepoController, render, jsonify
43 43 from kallithea.lib.utils import action_logger
44 44 from kallithea.lib.compat import OrderedDict
45 45 from kallithea.lib import diffs
46 46 from kallithea.model.db import ChangesetComment, ChangesetStatus
47 47 from kallithea.model.comment import ChangesetCommentsModel
48 48 from kallithea.model.changeset_status import ChangesetStatusModel
49 49 from kallithea.model.meta import Session
50 50 from kallithea.model.repo import RepoModel
51 51 from kallithea.lib.diffs import LimitedDiffContainer
52 52 from kallithea.lib.exceptions import StatusChangeOnClosedPullRequestError
53 53 from kallithea.lib.vcs.backends.base import EmptyChangeset
54 54 from kallithea.lib.utils2 import safe_unicode
55 55 from kallithea.lib.graphmod import graph_data
56 56
57 57 log = logging.getLogger(__name__)
58 58
59 59
60 60 def _update_with_GET(params, GET):
61 61 for k in ['diff1', 'diff2', 'diff']:
62 62 params[k] += GET.getall(k)
63 63
64 64
65 65 def anchor_url(revision, path, GET):
66 66 fid = h.FID(revision, path)
67 67 return h.url.current(anchor=fid, **dict(GET))
68 68
69 69
70 70 def get_ignore_ws(fid, GET):
71 71 ig_ws_global = GET.get('ignorews')
72 72 ig_ws = filter(lambda k: k.startswith('WS'), GET.getall(fid))
73 73 if ig_ws:
74 74 try:
75 75 return int(ig_ws[0].split(':')[-1])
76 76 except ValueError:
77 77 raise HTTPBadRequest()
78 78 return ig_ws_global
79 79
80 80
81 81 def _ignorews_url(GET, fileid=None):
82 82 fileid = str(fileid) if fileid else None
83 83 params = defaultdict(list)
84 84 _update_with_GET(params, GET)
85 85 lbl = _('Show whitespace')
86 86 ig_ws = get_ignore_ws(fileid, GET)
87 87 ln_ctx = get_line_ctx(fileid, GET)
88 88 # global option
89 89 if fileid is None:
90 90 if ig_ws is None:
91 91 params['ignorews'] += [1]
92 92 lbl = _('Ignore whitespace')
93 93 ctx_key = 'context'
94 94 ctx_val = ln_ctx
95 95 # per file options
96 96 else:
97 97 if ig_ws is None:
98 98 params[fileid] += ['WS:1']
99 99 lbl = _('Ignore whitespace')
100 100
101 101 ctx_key = fileid
102 102 ctx_val = 'C:%s' % ln_ctx
103 103 # if we have passed in ln_ctx pass it along to our params
104 104 if ln_ctx:
105 105 params[ctx_key] += [ctx_val]
106 106
107 107 params['anchor'] = fileid
108 108 icon = h.literal('<i class="icon-strike"></i>')
109 109 return h.link_to(icon, h.url.current(**params), title=lbl, **{'data-toggle': 'tooltip'})
110 110
111 111
112 112 def get_line_ctx(fid, GET):
113 113 ln_ctx_global = GET.get('context')
114 114 if fid:
115 115 ln_ctx = filter(lambda k: k.startswith('C'), GET.getall(fid))
116 116 else:
117 117 _ln_ctx = filter(lambda k: k.startswith('C'), GET)
118 118 ln_ctx = GET.get(_ln_ctx[0]) if _ln_ctx else ln_ctx_global
119 119 if ln_ctx:
120 120 ln_ctx = [ln_ctx]
121 121
122 122 if ln_ctx:
123 123 retval = ln_ctx[0].split(':')[-1]
124 124 else:
125 125 retval = ln_ctx_global
126 126
127 127 try:
128 128 return int(retval)
129 129 except Exception:
130 130 return 3
131 131
132 132
133 133 def _context_url(GET, fileid=None):
134 134 """
135 135 Generates url for context lines
136 136
137 137 :param fileid:
138 138 """
139 139
140 140 fileid = str(fileid) if fileid else None
141 141 ig_ws = get_ignore_ws(fileid, GET)
142 142 ln_ctx = (get_line_ctx(fileid, GET) or 3) * 2
143 143
144 144 params = defaultdict(list)
145 145 _update_with_GET(params, GET)
146 146
147 147 # global option
148 148 if fileid is None:
149 149 if ln_ctx > 0:
150 150 params['context'] += [ln_ctx]
151 151
152 152 if ig_ws:
153 153 ig_ws_key = 'ignorews'
154 154 ig_ws_val = 1
155 155
156 156 # per file option
157 157 else:
158 158 params[fileid] += ['C:%s' % ln_ctx]
159 159 ig_ws_key = fileid
160 160 ig_ws_val = 'WS:%s' % 1
161 161
162 162 if ig_ws:
163 163 params[ig_ws_key] += [ig_ws_val]
164 164
165 165 lbl = _('Increase diff context to %(num)s lines') % {'num': ln_ctx}
166 166
167 167 params['anchor'] = fileid
168 168 icon = h.literal('<i class="icon-sort"></i>')
169 169 return h.link_to(icon, h.url.current(**params), title=lbl, **{'data-toggle': 'tooltip'})
170 170
171 171
172 172 # Could perhaps be nice to have in the model but is too high level ...
173 173 def create_comment(text, status, f_path, line_no, revision=None, pull_request_id=None, closing_pr=None):
174 174 """Comment functionality shared between changesets and pullrequests"""
175 175 f_path = f_path or None
176 176 line_no = line_no or None
177 177
178 178 comment = ChangesetCommentsModel().create(
179 179 text=text,
180 180 repo=c.db_repo.repo_id,
181 181 author=request.authuser.user_id,
182 182 revision=revision,
183 183 pull_request=pull_request_id,
184 184 f_path=f_path,
185 185 line_no=line_no,
186 186 status_change=ChangesetStatus.get_status_lbl(status) if status else None,
187 187 closing_pr=closing_pr,
188 188 )
189 189
190 190 return comment
191 191
192 192
193 193 class ChangesetController(BaseRepoController):
194 194
195 195 def _before(self, *args, **kwargs):
196 196 super(ChangesetController, self)._before(*args, **kwargs)
197 197 c.affected_files_cut_off = 60
198 198
199 199 def __load_data(self):
200 200 repo_model = RepoModel()
201 201 c.users_array = repo_model.get_users_js()
202 202 c.user_groups_array = repo_model.get_user_groups_js()
203 203
204 204 def _index(self, revision, method):
205 205 c.pull_request = None
206 206 c.anchor_url = anchor_url
207 207 c.ignorews_url = _ignorews_url
208 208 c.context_url = _context_url
209 209 c.fulldiff = request.GET.get('fulldiff') # for reporting number of changed files
210 210 # get ranges of revisions if preset
211 211 rev_range = revision.split('...')[:2]
212 212 enable_comments = True
213 213 c.cs_repo = c.db_repo
214 214 try:
215 215 if len(rev_range) == 2:
216 216 enable_comments = False
217 217 rev_start = rev_range[0]
218 218 rev_end = rev_range[1]
219 219 rev_ranges = c.db_repo_scm_instance.get_changesets(start=rev_start,
220 220 end=rev_end)
221 221 else:
222 222 rev_ranges = [c.db_repo_scm_instance.get_changeset(revision)]
223 223
224 224 c.cs_ranges = list(rev_ranges)
225 225 if not c.cs_ranges:
226 226 raise RepositoryError('Changeset range returned empty result')
227 227
228 228 except (ChangesetDoesNotExistError, EmptyRepositoryError):
229 229 log.debug(traceback.format_exc())
230 230 msg = _('Such revision does not exist for this repository')
231 231 h.flash(msg, category='error')
232 232 raise HTTPNotFound()
233 233
234 234 c.changes = OrderedDict()
235 235
236 236 c.lines_added = 0 # count of lines added
237 237 c.lines_deleted = 0 # count of lines removes
238 238
239 239 c.changeset_statuses = ChangesetStatus.STATUSES
240 240 comments = dict()
241 241 c.statuses = []
242 242 c.inline_comments = []
243 243 c.inline_cnt = 0
244 244
245 245 # Iterate over ranges (default changeset view is always one changeset)
246 246 for changeset in c.cs_ranges:
247 247 if method == 'show':
248 248 c.statuses.extend([ChangesetStatusModel().get_status(
249 249 c.db_repo.repo_id, changeset.raw_id)])
250 250
251 251 # Changeset comments
252 252 comments.update((com.comment_id, com)
253 253 for com in ChangesetCommentsModel()
254 254 .get_comments(c.db_repo.repo_id,
255 255 revision=changeset.raw_id))
256 256
257 257 # Status change comments - mostly from pull requests
258 258 comments.update((st.comment_id, st.comment)
259 259 for st in ChangesetStatusModel()
260 260 .get_statuses(c.db_repo.repo_id,
261 261 changeset.raw_id, with_revisions=True)
262 262 if st.comment_id is not None)
263 263
264 264 inlines = ChangesetCommentsModel() \
265 265 .get_inline_comments(c.db_repo.repo_id,
266 266 revision=changeset.raw_id)
267 267 c.inline_comments.extend(inlines)
268 268
269 269 cs2 = changeset.raw_id
270 270 cs1 = changeset.parents[0].raw_id if changeset.parents else EmptyChangeset().raw_id
271 271 context_lcl = get_line_ctx('', request.GET)
272 272 ign_whitespace_lcl = get_ignore_ws('', request.GET)
273 273
274 _diff = c.db_repo_scm_instance.get_diff(cs1, cs2,
274 raw_diff = c.db_repo_scm_instance.get_diff(cs1, cs2,
275 275 ignore_whitespace=ign_whitespace_lcl, context=context_lcl)
276 276 diff_limit = None if c.fulldiff else self.cut_off_limit
277 diff_processor = diffs.DiffProcessor(_diff,
278 vcs=c.db_repo_scm_instance.alias,
279 diff_limit=diff_limit)
280 277 file_diff_data = []
281 278 if method == 'show':
279 diff_processor = diffs.DiffProcessor(raw_diff,
280 vcs=c.db_repo_scm_instance.alias,
281 diff_limit=diff_limit)
282 282 _parsed = diff_processor.prepare()
283 283 c.limited_diff = False
284 284 if isinstance(_parsed, LimitedDiffContainer):
285 285 c.limited_diff = True
286 286 for f in _parsed:
287 287 st = f['stats']
288 288 c.lines_added += st['added']
289 289 c.lines_deleted += st['deleted']
290 290 filename = f['filename']
291 291 fid = h.FID(changeset.raw_id, filename)
292 292 url_fid = h.FID('', filename)
293 293 diff = diff_processor.as_html(enable_comments=enable_comments,
294 294 parsed_lines=[f])
295 295 file_diff_data.append((fid, url_fid, f['operation'], f['old_filename'], filename, diff, st))
296 296 else:
297 297 # downloads/raw we only need RAW diff nothing else
298 diff = diff_processor.as_raw()
299 file_diff_data.append(('', None, None, None, diff, None))
298 file_diff_data.append(('', None, None, None, raw_diff, None))
300 299 c.changes[changeset.raw_id] = (cs1, cs2, file_diff_data)
301 300
302 301 # sort comments in creation order
303 302 c.comments = [com for com_id, com in sorted(comments.items())]
304 303
305 304 # count inline comments
306 305 for __, lines in c.inline_comments:
307 306 for comments in lines.values():
308 307 c.inline_cnt += len(comments)
309 308
310 309 if len(c.cs_ranges) == 1:
311 310 c.changeset = c.cs_ranges[0]
312 311 c.parent_tmpl = ''.join(['# Parent %s\n' % x.raw_id
313 312 for x in c.changeset.parents])
314 313 if method == 'download':
315 314 response.content_type = 'text/plain'
316 315 response.content_disposition = 'attachment; filename=%s.diff' \
317 316 % revision[:12]
318 return diff
317 return raw_diff
319 318 elif method == 'patch':
320 319 response.content_type = 'text/plain'
321 c.diff = safe_unicode(diff)
320 c.diff = safe_unicode(raw_diff)
322 321 return render('changeset/patch_changeset.html')
323 322 elif method == 'raw':
324 323 response.content_type = 'text/plain'
325 return diff
324 return raw_diff
326 325 elif method == 'show':
327 326 self.__load_data()
328 327 if len(c.cs_ranges) == 1:
329 328 return render('changeset/changeset.html')
330 329 else:
331 330 c.cs_ranges_org = None
332 331 c.cs_comments = {}
333 332 revs = [ctx.revision for ctx in reversed(c.cs_ranges)]
334 333 c.jsdata = graph_data(c.db_repo_scm_instance, revs)
335 334 return render('changeset/changeset_range.html')
336 335
337 336 @LoginRequired()
338 337 @HasRepoPermissionLevelDecorator('read')
339 338 def index(self, revision, method='show'):
340 339 return self._index(revision, method=method)
341 340
342 341 @LoginRequired()
343 342 @HasRepoPermissionLevelDecorator('read')
344 343 def changeset_raw(self, revision):
345 344 return self._index(revision, method='raw')
346 345
347 346 @LoginRequired()
348 347 @HasRepoPermissionLevelDecorator('read')
349 348 def changeset_patch(self, revision):
350 349 return self._index(revision, method='patch')
351 350
352 351 @LoginRequired()
353 352 @HasRepoPermissionLevelDecorator('read')
354 353 def changeset_download(self, revision):
355 354 return self._index(revision, method='download')
356 355
357 356 @LoginRequired()
358 357 @NotAnonymous()
359 358 @HasRepoPermissionLevelDecorator('read')
360 359 @jsonify
361 360 def comment(self, repo_name, revision):
362 361 assert request.environ.get('HTTP_X_PARTIAL_XHR')
363 362
364 363 status = request.POST.get('changeset_status')
365 364 text = request.POST.get('text', '').strip()
366 365
367 366 c.comment = create_comment(
368 367 text,
369 368 status,
370 369 revision=revision,
371 370 f_path=request.POST.get('f_path'),
372 371 line_no=request.POST.get('line'),
373 372 )
374 373
375 374 # get status if set !
376 375 if status:
377 376 # if latest status was from pull request and it's closed
378 377 # disallow changing status ! RLY?
379 378 try:
380 379 ChangesetStatusModel().set_status(
381 380 c.db_repo.repo_id,
382 381 status,
383 382 request.authuser.user_id,
384 383 c.comment,
385 384 revision=revision,
386 385 dont_allow_on_closed_pull_request=True,
387 386 )
388 387 except StatusChangeOnClosedPullRequestError:
389 388 log.debug('cannot change status on %s with closed pull request', revision)
390 389 raise HTTPBadRequest()
391 390
392 391 action_logger(request.authuser,
393 392 'user_commented_revision:%s' % revision,
394 393 c.db_repo, request.ip_addr)
395 394
396 395 Session().commit()
397 396
398 397 data = {
399 398 'target_id': h.safeid(h.safe_unicode(request.POST.get('f_path'))),
400 399 }
401 400 if c.comment is not None:
402 401 data.update(c.comment.get_dict())
403 402 data.update({'rendered_text':
404 403 render('changeset/changeset_comment_block.html')})
405 404
406 405 return data
407 406
408 407 @LoginRequired()
409 408 @NotAnonymous()
410 409 @HasRepoPermissionLevelDecorator('read')
411 410 @jsonify
412 411 def delete_comment(self, repo_name, comment_id):
413 412 co = ChangesetComment.get_or_404(comment_id)
414 413 if co.repo.repo_name != repo_name:
415 414 raise HTTPNotFound()
416 415 owner = co.author_id == request.authuser.user_id
417 416 repo_admin = h.HasRepoPermissionLevel('admin')(repo_name)
418 417 if h.HasPermissionAny('hg.admin')() or repo_admin or owner:
419 418 ChangesetCommentsModel().delete(comment=co)
420 419 Session().commit()
421 420 return True
422 421 else:
423 422 raise HTTPForbidden()
424 423
425 424 @LoginRequired()
426 425 @HasRepoPermissionLevelDecorator('read')
427 426 @jsonify
428 427 def changeset_info(self, repo_name, revision):
429 428 if request.is_xhr:
430 429 try:
431 430 return c.db_repo_scm_instance.get_changeset(revision)
432 431 except ChangesetDoesNotExistError as e:
433 432 return EmptyChangeset(message=str(e))
434 433 else:
435 434 raise HTTPBadRequest()
436 435
437 436 @LoginRequired()
438 437 @HasRepoPermissionLevelDecorator('read')
439 438 @jsonify
440 439 def changeset_children(self, repo_name, revision):
441 440 if request.is_xhr:
442 441 changeset = c.db_repo_scm_instance.get_changeset(revision)
443 442 result = {"results": []}
444 443 if changeset.children:
445 444 result = {"results": changeset.children}
446 445 return result
447 446 else:
448 447 raise HTTPBadRequest()
449 448
450 449 @LoginRequired()
451 450 @HasRepoPermissionLevelDecorator('read')
452 451 @jsonify
453 452 def changeset_parents(self, repo_name, revision):
454 453 if request.is_xhr:
455 454 changeset = c.db_repo_scm_instance.get_changeset(revision)
456 455 result = {"results": []}
457 456 if changeset.parents:
458 457 result = {"results": changeset.parents}
459 458 return result
460 459 else:
461 460 raise HTTPBadRequest()
@@ -1,171 +1,170 b''
1 1 # -*- coding: utf-8 -*-
2 2 # This program is free software: you can redistribute it and/or modify
3 3 # it under the terms of the GNU General Public License as published by
4 4 # the Free Software Foundation, either version 3 of the License, or
5 5 # (at your option) any later version.
6 6 #
7 7 # This program is distributed in the hope that it will be useful,
8 8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 10 # GNU General Public License for more details.
11 11 #
12 12 # You should have received a copy of the GNU General Public License
13 13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
14 14 """
15 15 kallithea.controllers.feed
16 16 ~~~~~~~~~~~~~~~~~~~~~~~~~~
17 17
18 18 Feed controller for Kallithea
19 19
20 20 This file was forked by the Kallithea project in July 2014.
21 21 Original author and date, and relevant copyright and licensing information is below:
22 22 :created_on: Apr 23, 2010
23 23 :author: marcink
24 24 :copyright: (c) 2013 RhodeCode GmbH, and others.
25 25 :license: GPLv3, see LICENSE.md for more details.
26 26 """
27 27
28 28
29 29 import logging
30 30
31 31 from tg import response, tmpl_context as c
32 32 from tg.i18n import ugettext as _
33 33
34 34 from beaker.cache import cache_region, region_invalidate
35 35 from webhelpers.feedgenerator import Atom1Feed, Rss201rev2Feed
36 36
37 37 from kallithea import CONFIG
38 38 from kallithea.lib import helpers as h
39 39 from kallithea.lib.auth import LoginRequired, HasRepoPermissionLevelDecorator
40 40 from kallithea.lib.base import BaseRepoController
41 41 from kallithea.lib.diffs import DiffProcessor, LimitedDiffContainer
42 42 from kallithea.model.db import CacheInvalidation
43 43 from kallithea.lib.utils2 import safe_int, str2bool, safe_unicode
44 44
45 45 log = logging.getLogger(__name__)
46 46
47 47
48 48 language = 'en-us'
49 49 ttl = "5"
50 50
51 51
52 52 class FeedController(BaseRepoController):
53 53
54 54 @LoginRequired(api_access=True)
55 55 @HasRepoPermissionLevelDecorator('read')
56 56 def _before(self, *args, **kwargs):
57 57 super(FeedController, self)._before(*args, **kwargs)
58 58
59 59 def _get_title(self, cs):
60 60 return h.shorter(cs.message, 160)
61 61
62 def __changes(self, cs):
62 def __get_desc(self, cs):
63 desc_msg = [(_('%s committed on %s')
64 % (h.person(cs.author), h.fmt_date(cs.date))) + '<br/>']
65 # branches, tags, bookmarks
66 if cs.branch:
67 desc_msg.append('branch: %s<br/>' % cs.branch)
68 for book in cs.bookmarks:
69 desc_msg.append('bookmark: %s<br/>' % book)
70 for tag in cs.tags:
71 desc_msg.append('tag: %s<br/>' % tag)
72
63 73 changes = []
64 74 diff_limit = safe_int(CONFIG.get('rss_cut_off_limit', 32 * 1024))
65 diff_processor = DiffProcessor(cs.diff(),
75 raw_diff = cs.diff()
76 diff_processor = DiffProcessor(raw_diff,
66 77 diff_limit=diff_limit)
67 78 _parsed = diff_processor.prepare(inline_diff=False)
68 79 limited_diff = False
69 80 if isinstance(_parsed, LimitedDiffContainer):
70 81 limited_diff = True
71 82
72 83 for st in _parsed:
73 84 st.update({'added': st['stats']['added'],
74 85 'removed': st['stats']['deleted']})
75 86 changes.append('\n %(operation)s %(filename)s '
76 87 '(%(added)s lines added, %(removed)s lines removed)'
77 88 % st)
78 89 if limited_diff:
79 90 changes = changes + ['\n ' +
80 91 _('Changeset was too big and was cut off...')]
81 return diff_processor, changes
82 92
83 def __get_desc(self, cs):
84 desc_msg = [(_('%s committed on %s')
85 % (h.person(cs.author), h.fmt_date(cs.date))) + '<br/>']
86 # branches, tags, bookmarks
87 if cs.branch:
88 desc_msg.append('branch: %s<br/>' % cs.branch)
89 for book in cs.bookmarks:
90 desc_msg.append('bookmark: %s<br/>' % book)
91 for tag in cs.tags:
92 desc_msg.append('tag: %s<br/>' % tag)
93 diff_processor, changes = self.__changes(cs)
94 93 # rev link
95 94 _url = h.canonical_url('changeset_home', repo_name=c.db_repo.repo_name,
96 95 revision=cs.raw_id)
97 96 desc_msg.append('changeset: <a href="%s">%s</a>' % (_url, cs.raw_id[:8]))
98 97
99 98 desc_msg.append('<pre>')
100 99 desc_msg.append(h.urlify_text(cs.message))
101 100 desc_msg.append('\n')
102 101 desc_msg.extend(changes)
103 102 if str2bool(CONFIG.get('rss_include_diff', False)):
104 103 desc_msg.append('\n\n')
105 desc_msg.append(diff_processor.as_raw())
104 desc_msg.append(raw_diff)
106 105 desc_msg.append('</pre>')
107 106 return map(safe_unicode, desc_msg)
108 107
109 108 def atom(self, repo_name):
110 109 """Produce an atom-1.0 feed via feedgenerator module"""
111 110
112 111 @cache_region('long_term', '_get_feed_from_cache')
113 112 def _get_feed_from_cache(key, kind):
114 113 feed = Atom1Feed(
115 114 title=_('%s %s feed') % (c.site_name, repo_name),
116 115 link=h.canonical_url('summary_home', repo_name=repo_name),
117 116 description=_('Changes on %s repository') % repo_name,
118 117 language=language,
119 118 ttl=ttl
120 119 )
121 120
122 121 rss_items_per_page = safe_int(CONFIG.get('rss_items_per_page', 20))
123 122 for cs in reversed(list(c.db_repo_scm_instance[-rss_items_per_page:])):
124 123 feed.add_item(title=self._get_title(cs),
125 124 link=h.canonical_url('changeset_home', repo_name=repo_name,
126 125 revision=cs.raw_id),
127 126 author_name=cs.author,
128 127 description=''.join(self.__get_desc(cs)),
129 128 pubdate=cs.date,
130 129 )
131 130
132 131 response.content_type = feed.mime_type
133 132 return feed.writeString('utf-8')
134 133
135 134 kind = 'ATOM'
136 135 valid = CacheInvalidation.test_and_set_valid(repo_name, kind)
137 136 if not valid:
138 137 region_invalidate(_get_feed_from_cache, None, '_get_feed_from_cache', repo_name, kind)
139 138 return _get_feed_from_cache(repo_name, kind)
140 139
141 140 def rss(self, repo_name):
142 141 """Produce an rss2 feed via feedgenerator module"""
143 142
144 143 @cache_region('long_term', '_get_feed_from_cache')
145 144 def _get_feed_from_cache(key, kind):
146 145 feed = Rss201rev2Feed(
147 146 title=_('%s %s feed') % (c.site_name, repo_name),
148 147 link=h.canonical_url('summary_home', repo_name=repo_name),
149 148 description=_('Changes on %s repository') % repo_name,
150 149 language=language,
151 150 ttl=ttl
152 151 )
153 152
154 153 rss_items_per_page = safe_int(CONFIG.get('rss_items_per_page', 20))
155 154 for cs in reversed(list(c.db_repo_scm_instance[-rss_items_per_page:])):
156 155 feed.add_item(title=self._get_title(cs),
157 156 link=h.canonical_url('changeset_home', repo_name=repo_name,
158 157 revision=cs.raw_id),
159 158 author_name=cs.author,
160 159 description=''.join(self.__get_desc(cs)),
161 160 pubdate=cs.date,
162 161 )
163 162
164 163 response.content_type = feed.mime_type
165 164 return feed.writeString('utf-8')
166 165
167 166 kind = 'RSS'
168 167 valid = CacheInvalidation.test_and_set_valid(repo_name, kind)
169 168 if not valid:
170 169 region_invalidate(_get_feed_from_cache, None, '_get_feed_from_cache', repo_name, kind)
171 170 return _get_feed_from_cache(repo_name, kind)
@@ -1,785 +1,782 b''
1 1 # -*- coding: utf-8 -*-
2 2 # This program is free software: you can redistribute it and/or modify
3 3 # it under the terms of the GNU General Public License as published by
4 4 # the Free Software Foundation, either version 3 of the License, or
5 5 # (at your option) any later version.
6 6 #
7 7 # This program is distributed in the hope that it will be useful,
8 8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 10 # GNU General Public License for more details.
11 11 #
12 12 # You should have received a copy of the GNU General Public License
13 13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
14 14 """
15 15 kallithea.controllers.files
16 16 ~~~~~~~~~~~~~~~~~~~~~~~~~~~
17 17
18 18 Files controller for Kallithea
19 19
20 20 This file was forked by the Kallithea project in July 2014.
21 21 Original author and date, and relevant copyright and licensing information is below:
22 22 :created_on: Apr 21, 2010
23 23 :author: marcink
24 24 :copyright: (c) 2013 RhodeCode GmbH, and others.
25 25 :license: GPLv3, see LICENSE.md for more details.
26 26 """
27 27
28 28 import os
29 29 import posixpath
30 30 import logging
31 31 import traceback
32 32 import tempfile
33 33 import shutil
34 34
35 35 from tg import request, response, tmpl_context as c
36 36 from tg.i18n import ugettext as _
37 37 from webob.exc import HTTPFound
38 38
39 39 from kallithea.config.routing import url
40 40 from kallithea.lib.utils import action_logger
41 41 from kallithea.lib import diffs
42 42 from kallithea.lib import helpers as h
43 43
44 44 from kallithea.lib.compat import OrderedDict
45 45 from kallithea.lib.utils2 import convert_line_endings, detect_mode, safe_str, \
46 46 str2bool, safe_int
47 47 from kallithea.lib.auth import LoginRequired, HasRepoPermissionLevelDecorator
48 48 from kallithea.lib.base import BaseRepoController, render, jsonify
49 49 from kallithea.lib.vcs.backends.base import EmptyChangeset
50 50 from kallithea.lib.vcs.conf import settings
51 51 from kallithea.lib.vcs.exceptions import RepositoryError, \
52 52 ChangesetDoesNotExistError, EmptyRepositoryError, \
53 53 ImproperArchiveTypeError, VCSError, NodeAlreadyExistsError, \
54 54 NodeDoesNotExistError, ChangesetError, NodeError
55 55 from kallithea.lib.vcs.nodes import FileNode
56 56
57 57 from kallithea.model.repo import RepoModel
58 58 from kallithea.model.scm import ScmModel
59 59 from kallithea.model.db import Repository
60 60
61 61 from kallithea.controllers.changeset import anchor_url, _ignorews_url, \
62 62 _context_url, get_line_ctx, get_ignore_ws
63 63 from webob.exc import HTTPNotFound
64 64 from kallithea.lib.exceptions import NonRelativePathError
65 65
66 66
67 67 log = logging.getLogger(__name__)
68 68
69 69
70 70 class FilesController(BaseRepoController):
71 71
72 72 def _before(self, *args, **kwargs):
73 73 super(FilesController, self)._before(*args, **kwargs)
74 74
75 75 def __get_cs(self, rev, silent_empty=False):
76 76 """
77 77 Safe way to get changeset if error occur it redirects to tip with
78 78 proper message
79 79
80 80 :param rev: revision to fetch
81 81 :silent_empty: return None if repository is empty
82 82 """
83 83
84 84 try:
85 85 return c.db_repo_scm_instance.get_changeset(rev)
86 86 except EmptyRepositoryError as e:
87 87 if silent_empty:
88 88 return None
89 89 url_ = url('files_add_home',
90 90 repo_name=c.repo_name,
91 91 revision=0, f_path='', anchor='edit')
92 92 add_new = h.link_to(_('Click here to add new file'), url_, class_="alert-link")
93 93 h.flash(h.literal(_('There are no files yet. %s') % add_new),
94 94 category='warning')
95 95 raise HTTPNotFound()
96 96 except (ChangesetDoesNotExistError, LookupError):
97 97 msg = _('Such revision does not exist for this repository')
98 98 h.flash(msg, category='error')
99 99 raise HTTPNotFound()
100 100 except RepositoryError as e:
101 101 h.flash(safe_str(e), category='error')
102 102 raise HTTPNotFound()
103 103
104 104 def __get_filenode(self, cs, path):
105 105 """
106 106 Returns file_node or raise HTTP error.
107 107
108 108 :param cs: given changeset
109 109 :param path: path to lookup
110 110 """
111 111
112 112 try:
113 113 file_node = cs.get_node(path)
114 114 if file_node.is_dir():
115 115 raise RepositoryError('given path is a directory')
116 116 except ChangesetDoesNotExistError:
117 117 msg = _('Such revision does not exist for this repository')
118 118 h.flash(msg, category='error')
119 119 raise HTTPNotFound()
120 120 except RepositoryError as e:
121 121 h.flash(safe_str(e), category='error')
122 122 raise HTTPNotFound()
123 123
124 124 return file_node
125 125
126 126 @LoginRequired()
127 127 @HasRepoPermissionLevelDecorator('read')
128 128 def index(self, repo_name, revision, f_path, annotate=False):
129 129 # redirect to given revision from form if given
130 130 post_revision = request.POST.get('at_rev', None)
131 131 if post_revision:
132 132 cs = self.__get_cs(post_revision) # FIXME - unused!
133 133
134 134 c.revision = revision
135 135 c.changeset = self.__get_cs(revision)
136 136 c.branch = request.GET.get('branch', None)
137 137 c.f_path = f_path
138 138 c.annotate = annotate
139 139 cur_rev = c.changeset.revision
140 140 # used in files_source.html:
141 141 c.cut_off_limit = self.cut_off_limit
142 142 c.fulldiff = request.GET.get('fulldiff')
143 143
144 144 # prev link
145 145 try:
146 146 prev_rev = c.db_repo_scm_instance.get_changeset(cur_rev).prev(c.branch)
147 147 c.url_prev = url('files_home', repo_name=c.repo_name,
148 148 revision=prev_rev.raw_id, f_path=f_path)
149 149 if c.branch:
150 150 c.url_prev += '?branch=%s' % c.branch
151 151 except (ChangesetDoesNotExistError, VCSError):
152 152 c.url_prev = '#'
153 153
154 154 # next link
155 155 try:
156 156 next_rev = c.db_repo_scm_instance.get_changeset(cur_rev).next(c.branch)
157 157 c.url_next = url('files_home', repo_name=c.repo_name,
158 158 revision=next_rev.raw_id, f_path=f_path)
159 159 if c.branch:
160 160 c.url_next += '?branch=%s' % c.branch
161 161 except (ChangesetDoesNotExistError, VCSError):
162 162 c.url_next = '#'
163 163
164 164 # files or dirs
165 165 try:
166 166 c.file = c.changeset.get_node(f_path)
167 167
168 168 if c.file.is_file():
169 169 c.load_full_history = False
170 170 # determine if we're on branch head
171 171 _branches = c.db_repo_scm_instance.branches
172 172 c.on_branch_head = revision in _branches.keys() + _branches.values()
173 173 _hist = []
174 174 c.file_history = []
175 175 if c.load_full_history:
176 176 c.file_history, _hist = self._get_node_history(c.changeset, f_path)
177 177
178 178 c.authors = []
179 179 for a in set([x.author for x in _hist]):
180 180 c.authors.append((h.email(a), h.person(a)))
181 181 else:
182 182 c.authors = c.file_history = []
183 183 except RepositoryError as e:
184 184 h.flash(safe_str(e), category='error')
185 185 raise HTTPNotFound()
186 186
187 187 if request.environ.get('HTTP_X_PARTIAL_XHR'):
188 188 return render('files/files_ypjax.html')
189 189
190 190 # TODO: tags and bookmarks?
191 191 c.revision_options = [(c.changeset.raw_id,
192 192 _('%s at %s') % (c.changeset.branch, h.short_id(c.changeset.raw_id)))] + \
193 193 [(n, b) for b, n in c.db_repo_scm_instance.branches.items()]
194 194 if c.db_repo_scm_instance.closed_branches:
195 195 prefix = _('(closed)') + ' '
196 196 c.revision_options += [('-', '-')] + \
197 197 [(n, prefix + b) for b, n in c.db_repo_scm_instance.closed_branches.items()]
198 198
199 199 return render('files/files.html')
200 200
201 201 @LoginRequired()
202 202 @HasRepoPermissionLevelDecorator('read')
203 203 @jsonify
204 204 def history(self, repo_name, revision, f_path):
205 205 changeset = self.__get_cs(revision)
206 206 _file = changeset.get_node(f_path)
207 207 if _file.is_file():
208 208 file_history, _hist = self._get_node_history(changeset, f_path)
209 209
210 210 res = []
211 211 for obj in file_history:
212 212 res.append({
213 213 'text': obj[1],
214 214 'children': [{'id': o[0], 'text': o[1]} for o in obj[0]]
215 215 })
216 216
217 217 data = {
218 218 'more': False,
219 219 'results': res
220 220 }
221 221 return data
222 222
223 223 @LoginRequired()
224 224 @HasRepoPermissionLevelDecorator('read')
225 225 def authors(self, repo_name, revision, f_path):
226 226 changeset = self.__get_cs(revision)
227 227 _file = changeset.get_node(f_path)
228 228 if _file.is_file():
229 229 file_history, _hist = self._get_node_history(changeset, f_path)
230 230 c.authors = []
231 231 for a in set([x.author for x in _hist]):
232 232 c.authors.append((h.email(a), h.person(a)))
233 233 return render('files/files_history_box.html')
234 234
235 235 @LoginRequired()
236 236 @HasRepoPermissionLevelDecorator('read')
237 237 def rawfile(self, repo_name, revision, f_path):
238 238 cs = self.__get_cs(revision)
239 239 file_node = self.__get_filenode(cs, f_path)
240 240
241 241 response.content_disposition = 'attachment; filename=%s' % \
242 242 safe_str(f_path.split(Repository.url_sep())[-1])
243 243
244 244 response.content_type = file_node.mimetype
245 245 return file_node.content
246 246
247 247 @LoginRequired()
248 248 @HasRepoPermissionLevelDecorator('read')
249 249 def raw(self, repo_name, revision, f_path):
250 250 cs = self.__get_cs(revision)
251 251 file_node = self.__get_filenode(cs, f_path)
252 252
253 253 raw_mimetype_mapping = {
254 254 # map original mimetype to a mimetype used for "show as raw"
255 255 # you can also provide a content-disposition to override the
256 256 # default "attachment" disposition.
257 257 # orig_type: (new_type, new_dispo)
258 258
259 259 # show images inline:
260 260 'image/x-icon': ('image/x-icon', 'inline'),
261 261 'image/png': ('image/png', 'inline'),
262 262 'image/gif': ('image/gif', 'inline'),
263 263 'image/jpeg': ('image/jpeg', 'inline'),
264 264 'image/svg+xml': ('image/svg+xml', 'inline'),
265 265 }
266 266
267 267 mimetype = file_node.mimetype
268 268 try:
269 269 mimetype, dispo = raw_mimetype_mapping[mimetype]
270 270 except KeyError:
271 271 # we don't know anything special about this, handle it safely
272 272 if file_node.is_binary:
273 273 # do same as download raw for binary files
274 274 mimetype, dispo = 'application/octet-stream', 'attachment'
275 275 else:
276 276 # do not just use the original mimetype, but force text/plain,
277 277 # otherwise it would serve text/html and that might be unsafe.
278 278 # Note: underlying vcs library fakes text/plain mimetype if the
279 279 # mimetype can not be determined and it thinks it is not
280 280 # binary.This might lead to erroneous text display in some
281 281 # cases, but helps in other cases, like with text files
282 282 # without extension.
283 283 mimetype, dispo = 'text/plain', 'inline'
284 284
285 285 if dispo == 'attachment':
286 286 dispo = 'attachment; filename=%s' % \
287 287 safe_str(f_path.split(os.sep)[-1])
288 288
289 289 response.content_disposition = dispo
290 290 response.content_type = mimetype
291 291 return file_node.content
292 292
293 293 @LoginRequired()
294 294 @HasRepoPermissionLevelDecorator('write')
295 295 def delete(self, repo_name, revision, f_path):
296 296 repo = c.db_repo
297 297 if repo.enable_locking and repo.locked[0]:
298 298 h.flash(_('This repository has been locked by %s on %s')
299 299 % (h.person_by_id(repo.locked[0]),
300 300 h.fmt_date(h.time_to_datetime(repo.locked[1]))),
301 301 'warning')
302 302 raise HTTPFound(location=h.url('files_home',
303 303 repo_name=repo_name, revision='tip'))
304 304
305 305 # check if revision is a branch identifier- basically we cannot
306 306 # create multiple heads via file editing
307 307 _branches = repo.scm_instance.branches
308 308 # check if revision is a branch name or branch hash
309 309 if revision not in _branches.keys() + _branches.values():
310 310 h.flash(_('You can only delete files with revision '
311 311 'being a valid branch'), category='warning')
312 312 raise HTTPFound(location=h.url('files_home',
313 313 repo_name=repo_name, revision='tip',
314 314 f_path=f_path))
315 315
316 316 r_post = request.POST
317 317
318 318 c.cs = self.__get_cs(revision)
319 319 c.file = self.__get_filenode(c.cs, f_path)
320 320
321 321 c.default_message = _('Deleted file %s via Kallithea') % (f_path)
322 322 c.f_path = f_path
323 323 node_path = f_path
324 324 author = request.authuser.full_contact
325 325
326 326 if r_post:
327 327 message = r_post.get('message') or c.default_message
328 328
329 329 try:
330 330 nodes = {
331 331 node_path: {
332 332 'content': ''
333 333 }
334 334 }
335 335 self.scm_model.delete_nodes(
336 336 user=request.authuser.user_id, repo=c.db_repo,
337 337 message=message,
338 338 nodes=nodes,
339 339 parent_cs=c.cs,
340 340 author=author,
341 341 )
342 342
343 343 h.flash(_('Successfully deleted file %s') % f_path,
344 344 category='success')
345 345 except Exception:
346 346 log.error(traceback.format_exc())
347 347 h.flash(_('Error occurred during commit'), category='error')
348 348 raise HTTPFound(location=url('changeset_home',
349 349 repo_name=c.repo_name, revision='tip'))
350 350
351 351 return render('files/files_delete.html')
352 352
353 353 @LoginRequired()
354 354 @HasRepoPermissionLevelDecorator('write')
355 355 def edit(self, repo_name, revision, f_path):
356 356 repo = c.db_repo
357 357 if repo.enable_locking and repo.locked[0]:
358 358 h.flash(_('This repository has been locked by %s on %s')
359 359 % (h.person_by_id(repo.locked[0]),
360 360 h.fmt_date(h.time_to_datetime(repo.locked[1]))),
361 361 'warning')
362 362 raise HTTPFound(location=h.url('files_home',
363 363 repo_name=repo_name, revision='tip'))
364 364
365 365 # check if revision is a branch identifier- basically we cannot
366 366 # create multiple heads via file editing
367 367 _branches = repo.scm_instance.branches
368 368 # check if revision is a branch name or branch hash
369 369 if revision not in _branches.keys() + _branches.values():
370 370 h.flash(_('You can only edit files with revision '
371 371 'being a valid branch'), category='warning')
372 372 raise HTTPFound(location=h.url('files_home',
373 373 repo_name=repo_name, revision='tip',
374 374 f_path=f_path))
375 375
376 376 r_post = request.POST
377 377
378 378 c.cs = self.__get_cs(revision)
379 379 c.file = self.__get_filenode(c.cs, f_path)
380 380
381 381 if c.file.is_binary:
382 382 raise HTTPFound(location=url('files_home', repo_name=c.repo_name,
383 383 revision=c.cs.raw_id, f_path=f_path))
384 384 c.default_message = _('Edited file %s via Kallithea') % (f_path)
385 385 c.f_path = f_path
386 386
387 387 if r_post:
388 388
389 389 old_content = c.file.content
390 390 sl = old_content.splitlines(1)
391 391 first_line = sl[0] if sl else ''
392 392 # modes: 0 - Unix, 1 - Mac, 2 - DOS
393 393 mode = detect_mode(first_line, 0)
394 394 content = convert_line_endings(r_post.get('content', ''), mode)
395 395
396 396 message = r_post.get('message') or c.default_message
397 397 author = request.authuser.full_contact
398 398
399 399 if content == old_content:
400 400 h.flash(_('No changes'), category='warning')
401 401 raise HTTPFound(location=url('changeset_home', repo_name=c.repo_name,
402 402 revision='tip'))
403 403 try:
404 404 self.scm_model.commit_change(repo=c.db_repo_scm_instance,
405 405 repo_name=repo_name, cs=c.cs,
406 406 user=request.authuser.user_id,
407 407 author=author, message=message,
408 408 content=content, f_path=f_path)
409 409 h.flash(_('Successfully committed to %s') % f_path,
410 410 category='success')
411 411 except Exception:
412 412 log.error(traceback.format_exc())
413 413 h.flash(_('Error occurred during commit'), category='error')
414 414 raise HTTPFound(location=url('changeset_home',
415 415 repo_name=c.repo_name, revision='tip'))
416 416
417 417 return render('files/files_edit.html')
418 418
419 419 @LoginRequired()
420 420 @HasRepoPermissionLevelDecorator('write')
421 421 def add(self, repo_name, revision, f_path):
422 422
423 423 repo = c.db_repo
424 424 if repo.enable_locking and repo.locked[0]:
425 425 h.flash(_('This repository has been locked by %s on %s')
426 426 % (h.person_by_id(repo.locked[0]),
427 427 h.fmt_date(h.time_to_datetime(repo.locked[1]))),
428 428 'warning')
429 429 raise HTTPFound(location=h.url('files_home',
430 430 repo_name=repo_name, revision='tip'))
431 431
432 432 r_post = request.POST
433 433 c.cs = self.__get_cs(revision, silent_empty=True)
434 434 if c.cs is None:
435 435 c.cs = EmptyChangeset(alias=c.db_repo_scm_instance.alias)
436 436 c.default_message = (_('Added file via Kallithea'))
437 437 c.f_path = f_path
438 438
439 439 if r_post:
440 440 unix_mode = 0
441 441 content = convert_line_endings(r_post.get('content', ''), unix_mode)
442 442
443 443 message = r_post.get('message') or c.default_message
444 444 filename = r_post.get('filename')
445 445 location = r_post.get('location', '')
446 446 file_obj = r_post.get('upload_file', None)
447 447
448 448 if file_obj is not None and hasattr(file_obj, 'filename'):
449 449 filename = file_obj.filename
450 450 content = file_obj.file
451 451
452 452 if hasattr(content, 'file'):
453 453 # non posix systems store real file under file attr
454 454 content = content.file
455 455
456 456 if not content:
457 457 h.flash(_('No content'), category='warning')
458 458 raise HTTPFound(location=url('changeset_home', repo_name=c.repo_name,
459 459 revision='tip'))
460 460 if not filename:
461 461 h.flash(_('No filename'), category='warning')
462 462 raise HTTPFound(location=url('changeset_home', repo_name=c.repo_name,
463 463 revision='tip'))
464 464 # strip all crap out of file, just leave the basename
465 465 filename = os.path.basename(filename)
466 466 node_path = posixpath.join(location, filename)
467 467 author = request.authuser.full_contact
468 468
469 469 try:
470 470 nodes = {
471 471 node_path: {
472 472 'content': content
473 473 }
474 474 }
475 475 self.scm_model.create_nodes(
476 476 user=request.authuser.user_id, repo=c.db_repo,
477 477 message=message,
478 478 nodes=nodes,
479 479 parent_cs=c.cs,
480 480 author=author,
481 481 )
482 482
483 483 h.flash(_('Successfully committed to %s') % node_path,
484 484 category='success')
485 485 except NonRelativePathError as e:
486 486 h.flash(_('Location must be relative path and must not '
487 487 'contain .. in path'), category='warning')
488 488 raise HTTPFound(location=url('changeset_home', repo_name=c.repo_name,
489 489 revision='tip'))
490 490 except (NodeError, NodeAlreadyExistsError) as e:
491 491 h.flash(_(e), category='error')
492 492 except Exception:
493 493 log.error(traceback.format_exc())
494 494 h.flash(_('Error occurred during commit'), category='error')
495 495 raise HTTPFound(location=url('changeset_home',
496 496 repo_name=c.repo_name, revision='tip'))
497 497
498 498 return render('files/files_add.html')
499 499
500 500 @LoginRequired()
501 501 @HasRepoPermissionLevelDecorator('read')
502 502 def archivefile(self, repo_name, fname):
503 503 fileformat = None
504 504 revision = None
505 505 ext = None
506 506 subrepos = request.GET.get('subrepos') == 'true'
507 507
508 508 for a_type, ext_data in settings.ARCHIVE_SPECS.items():
509 509 archive_spec = fname.split(ext_data[1])
510 510 if len(archive_spec) == 2 and archive_spec[1] == '':
511 511 fileformat = a_type or ext_data[1]
512 512 revision = archive_spec[0]
513 513 ext = ext_data[1]
514 514
515 515 try:
516 516 dbrepo = RepoModel().get_by_repo_name(repo_name)
517 517 if not dbrepo.enable_downloads:
518 518 return _('Downloads disabled') # TODO: do something else?
519 519
520 520 if c.db_repo_scm_instance.alias == 'hg':
521 521 # patch and reset hooks section of UI config to not run any
522 522 # hooks on fetching archives with subrepos
523 523 for k, v in c.db_repo_scm_instance._repo.ui.configitems('hooks'):
524 524 c.db_repo_scm_instance._repo.ui.setconfig('hooks', k, None)
525 525
526 526 cs = c.db_repo_scm_instance.get_changeset(revision)
527 527 content_type = settings.ARCHIVE_SPECS[fileformat][0]
528 528 except ChangesetDoesNotExistError:
529 529 return _('Unknown revision %s') % revision
530 530 except EmptyRepositoryError:
531 531 return _('Empty repository')
532 532 except (ImproperArchiveTypeError, KeyError):
533 533 return _('Unknown archive type')
534 534
535 535 from kallithea import CONFIG
536 536 rev_name = cs.raw_id[:12]
537 537 archive_name = '%s-%s%s' % (safe_str(repo_name.replace('/', '_')),
538 538 safe_str(rev_name), ext)
539 539
540 540 archive_path = None
541 541 cached_archive_path = None
542 542 archive_cache_dir = CONFIG.get('archive_cache_dir')
543 543 if archive_cache_dir and not subrepos: # TODO: subrepo caching?
544 544 if not os.path.isdir(archive_cache_dir):
545 545 os.makedirs(archive_cache_dir)
546 546 cached_archive_path = os.path.join(archive_cache_dir, archive_name)
547 547 if os.path.isfile(cached_archive_path):
548 548 log.debug('Found cached archive in %s', cached_archive_path)
549 549 archive_path = cached_archive_path
550 550 else:
551 551 log.debug('Archive %s is not yet cached', archive_name)
552 552
553 553 if archive_path is None:
554 554 # generate new archive
555 555 fd, archive_path = tempfile.mkstemp()
556 556 log.debug('Creating new temp archive in %s', archive_path)
557 557 with os.fdopen(fd, 'wb') as stream:
558 558 cs.fill_archive(stream=stream, kind=fileformat, subrepos=subrepos)
559 559 # stream (and thus fd) has been closed by cs.fill_archive
560 560 if cached_archive_path is not None:
561 561 # we generated the archive - move it to cache
562 562 log.debug('Storing new archive in %s', cached_archive_path)
563 563 shutil.move(archive_path, cached_archive_path)
564 564 archive_path = cached_archive_path
565 565
566 566 def get_chunked_archive(archive_path):
567 567 stream = open(archive_path, 'rb')
568 568 while True:
569 569 data = stream.read(16 * 1024)
570 570 if not data:
571 571 break
572 572 yield data
573 573 stream.close()
574 574 if archive_path != cached_archive_path:
575 575 log.debug('Destroying temp archive %s', archive_path)
576 576 os.remove(archive_path)
577 577
578 578 action_logger(user=request.authuser,
579 579 action='user_downloaded_archive:%s' % (archive_name),
580 580 repo=repo_name, ipaddr=request.ip_addr, commit=True)
581 581
582 582 response.content_disposition = str('attachment; filename=%s' % (archive_name))
583 583 response.content_type = str(content_type)
584 584 return get_chunked_archive(archive_path)
585 585
586 586 @LoginRequired()
587 587 @HasRepoPermissionLevelDecorator('read')
588 588 def diff(self, repo_name, f_path):
589 589 ignore_whitespace = request.GET.get('ignorews') == '1'
590 590 line_context = safe_int(request.GET.get('context'), 3)
591 591 diff2 = request.GET.get('diff2', '')
592 592 diff1 = request.GET.get('diff1', '') or diff2
593 593 c.action = request.GET.get('diff')
594 594 c.no_changes = diff1 == diff2
595 595 c.f_path = f_path
596 596 c.big_diff = False
597 597 fulldiff = request.GET.get('fulldiff')
598 598 c.anchor_url = anchor_url
599 599 c.ignorews_url = _ignorews_url
600 600 c.context_url = _context_url
601 601 c.changes = OrderedDict()
602 602 c.changes[diff2] = []
603 603
604 604 # special case if we want a show rev only, it's impl here
605 605 # to reduce JS and callbacks
606 606
607 607 if request.GET.get('show_rev'):
608 608 if str2bool(request.GET.get('annotate', 'False')):
609 609 _url = url('files_annotate_home', repo_name=c.repo_name,
610 610 revision=diff1, f_path=c.f_path)
611 611 else:
612 612 _url = url('files_home', repo_name=c.repo_name,
613 613 revision=diff1, f_path=c.f_path)
614 614
615 615 raise HTTPFound(location=_url)
616 616 try:
617 617 if diff1 not in ['', None, 'None', '0' * 12, '0' * 40]:
618 618 c.changeset_1 = c.db_repo_scm_instance.get_changeset(diff1)
619 619 try:
620 620 node1 = c.changeset_1.get_node(f_path)
621 621 if node1.is_dir():
622 622 raise NodeError('%s path is a %s not a file'
623 623 % (node1, type(node1)))
624 624 except NodeDoesNotExistError:
625 625 c.changeset_1 = EmptyChangeset(cs=diff1,
626 626 revision=c.changeset_1.revision,
627 627 repo=c.db_repo_scm_instance)
628 628 node1 = FileNode(f_path, '', changeset=c.changeset_1)
629 629 else:
630 630 c.changeset_1 = EmptyChangeset(repo=c.db_repo_scm_instance)
631 631 node1 = FileNode(f_path, '', changeset=c.changeset_1)
632 632
633 633 if diff2 not in ['', None, 'None', '0' * 12, '0' * 40]:
634 634 c.changeset_2 = c.db_repo_scm_instance.get_changeset(diff2)
635 635 try:
636 636 node2 = c.changeset_2.get_node(f_path)
637 637 if node2.is_dir():
638 638 raise NodeError('%s path is a %s not a file'
639 639 % (node2, type(node2)))
640 640 except NodeDoesNotExistError:
641 641 c.changeset_2 = EmptyChangeset(cs=diff2,
642 642 revision=c.changeset_2.revision,
643 643 repo=c.db_repo_scm_instance)
644 644 node2 = FileNode(f_path, '', changeset=c.changeset_2)
645 645 else:
646 646 c.changeset_2 = EmptyChangeset(repo=c.db_repo_scm_instance)
647 647 node2 = FileNode(f_path, '', changeset=c.changeset_2)
648 648 except (RepositoryError, NodeError):
649 649 log.error(traceback.format_exc())
650 650 raise HTTPFound(location=url('files_home', repo_name=c.repo_name,
651 651 f_path=f_path))
652 652
653 653 if c.action == 'download':
654 _diff = diffs.get_gitdiff(node1, node2,
654 raw_diff = diffs.get_gitdiff(node1, node2,
655 655 ignore_whitespace=ignore_whitespace,
656 656 context=line_context)
657 diff = diffs.DiffProcessor(_diff)
658
659 657 diff_name = '%s_vs_%s.diff' % (diff1, diff2)
660 658 response.content_type = 'text/plain'
661 659 response.content_disposition = (
662 660 'attachment; filename=%s' % diff_name
663 661 )
664 return diff.as_raw()
662 return raw_diff
665 663
666 664 elif c.action == 'raw':
667 _diff = diffs.get_gitdiff(node1, node2,
665 raw_diff = diffs.get_gitdiff(node1, node2,
668 666 ignore_whitespace=ignore_whitespace,
669 667 context=line_context)
670 diff = diffs.DiffProcessor(_diff)
671 668 response.content_type = 'text/plain'
672 return diff.as_raw()
669 return raw_diff
673 670
674 671 else:
675 672 fid = h.FID(diff2, node2.path)
676 673 line_context_lcl = get_line_ctx(fid, request.GET)
677 674 ign_whitespace_lcl = get_ignore_ws(fid, request.GET)
678 675
679 676 diff_limit = None if fulldiff else self.cut_off_limit
680 677 c.a_rev, c.cs_rev, a_path, diff, st, op = diffs.wrapped_diff(filenode_old=node1,
681 678 filenode_new=node2,
682 679 diff_limit=diff_limit,
683 680 ignore_whitespace=ign_whitespace_lcl,
684 681 line_context=line_context_lcl,
685 682 enable_comments=False)
686 683 c.file_diff_data = [(fid, fid, op, a_path, node2.path, diff, st)]
687 684
688 685 return render('files/file_diff.html')
689 686
690 687 @LoginRequired()
691 688 @HasRepoPermissionLevelDecorator('read')
692 689 def diff_2way(self, repo_name, f_path):
693 690 diff1 = request.GET.get('diff1', '')
694 691 diff2 = request.GET.get('diff2', '')
695 692 try:
696 693 if diff1 not in ['', None, 'None', '0' * 12, '0' * 40]:
697 694 c.changeset_1 = c.db_repo_scm_instance.get_changeset(diff1)
698 695 try:
699 696 node1 = c.changeset_1.get_node(f_path)
700 697 if node1.is_dir():
701 698 raise NodeError('%s path is a %s not a file'
702 699 % (node1, type(node1)))
703 700 except NodeDoesNotExistError:
704 701 c.changeset_1 = EmptyChangeset(cs=diff1,
705 702 revision=c.changeset_1.revision,
706 703 repo=c.db_repo_scm_instance)
707 704 node1 = FileNode(f_path, '', changeset=c.changeset_1)
708 705 else:
709 706 c.changeset_1 = EmptyChangeset(repo=c.db_repo_scm_instance)
710 707 node1 = FileNode(f_path, '', changeset=c.changeset_1)
711 708
712 709 if diff2 not in ['', None, 'None', '0' * 12, '0' * 40]:
713 710 c.changeset_2 = c.db_repo_scm_instance.get_changeset(diff2)
714 711 try:
715 712 node2 = c.changeset_2.get_node(f_path)
716 713 if node2.is_dir():
717 714 raise NodeError('%s path is a %s not a file'
718 715 % (node2, type(node2)))
719 716 except NodeDoesNotExistError:
720 717 c.changeset_2 = EmptyChangeset(cs=diff2,
721 718 revision=c.changeset_2.revision,
722 719 repo=c.db_repo_scm_instance)
723 720 node2 = FileNode(f_path, '', changeset=c.changeset_2)
724 721 else:
725 722 c.changeset_2 = EmptyChangeset(repo=c.db_repo_scm_instance)
726 723 node2 = FileNode(f_path, '', changeset=c.changeset_2)
727 724 except ChangesetDoesNotExistError as e:
728 725 msg = _('Such revision does not exist for this repository')
729 726 h.flash(msg, category='error')
730 727 raise HTTPNotFound()
731 728 c.node1 = node1
732 729 c.node2 = node2
733 730 c.cs1 = c.changeset_1
734 731 c.cs2 = c.changeset_2
735 732
736 733 return render('files/diff_2way.html')
737 734
738 735 def _get_node_history(self, cs, f_path, changesets=None):
739 736 """
740 737 get changesets history for given node
741 738
742 739 :param cs: changeset to calculate history
743 740 :param f_path: path for node to calculate history for
744 741 :param changesets: if passed don't calculate history and take
745 742 changesets defined in this list
746 743 """
747 744 # calculate history based on tip
748 745 tip_cs = c.db_repo_scm_instance.get_changeset()
749 746 if changesets is None:
750 747 try:
751 748 changesets = tip_cs.get_file_history(f_path)
752 749 except (NodeDoesNotExistError, ChangesetError):
753 750 # this node is not present at tip !
754 751 changesets = cs.get_file_history(f_path)
755 752 hist_l = []
756 753
757 754 changesets_group = ([], _("Changesets"))
758 755 branches_group = ([], _("Branches"))
759 756 tags_group = ([], _("Tags"))
760 757 for chs in changesets:
761 758 #_branch = '(%s)' % chs.branch if (cs.repository.alias == 'hg') else ''
762 759 _branch = chs.branch
763 760 n_desc = '%s (%s)' % (h.show_id(chs), _branch)
764 761 changesets_group[0].append((chs.raw_id, n_desc,))
765 762 hist_l.append(changesets_group)
766 763
767 764 for name, chs in c.db_repo_scm_instance.branches.items():
768 765 branches_group[0].append((chs, name),)
769 766 hist_l.append(branches_group)
770 767
771 768 for name, chs in c.db_repo_scm_instance.tags.items():
772 769 tags_group[0].append((chs, name),)
773 770 hist_l.append(tags_group)
774 771
775 772 return hist_l, changesets
776 773
777 774 @LoginRequired()
778 775 @HasRepoPermissionLevelDecorator('read')
779 776 @jsonify
780 777 def nodelist(self, repo_name, revision, f_path):
781 778 if request.environ.get('HTTP_X_PARTIAL_XHR'):
782 779 cs = self.__get_cs(revision)
783 780 _d, _f = ScmModel().get_nodes(repo_name, cs.raw_id, f_path,
784 781 flat=False)
785 782 return {'nodes': _d + _f}
@@ -1,713 +1,707 b''
1 1 # -*- coding: utf-8 -*-
2 2 # This program is free software: you can redistribute it and/or modify
3 3 # it under the terms of the GNU General Public License as published by
4 4 # the Free Software Foundation, either version 3 of the License, or
5 5 # (at your option) any later version.
6 6 #
7 7 # This program is distributed in the hope that it will be useful,
8 8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 10 # GNU General Public License for more details.
11 11 #
12 12 # You should have received a copy of the GNU General Public License
13 13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
14 14 """
15 15 kallithea.lib.diffs
16 16 ~~~~~~~~~~~~~~~~~~~
17 17
18 18 Set of diffing helpers, previously part of vcs
19 19
20 20
21 21 This file was forked by the Kallithea project in July 2014.
22 22 Original author and date, and relevant copyright and licensing information is below:
23 23 :created_on: Dec 4, 2011
24 24 :author: marcink
25 25 :copyright: (c) 2013 RhodeCode GmbH, and others.
26 26 :license: GPLv3, see LICENSE.md for more details.
27 27 """
28 28 import re
29 29 import difflib
30 30 import logging
31 31
32 32 from itertools import imap
33 33
34 34 from tg.i18n import ugettext as _
35 35
36 36 from kallithea.lib.vcs.exceptions import VCSError
37 37 from kallithea.lib.vcs.nodes import FileNode, SubModuleNode
38 38 from kallithea.lib.vcs.backends.base import EmptyChangeset
39 39 from kallithea.lib.helpers import escape
40 40 from kallithea.lib.utils2 import safe_unicode
41 41
42 42 log = logging.getLogger(__name__)
43 43
44 44
45 45 def wrap_to_table(html):
46 46 """Given a string with html, return it wrapped in a table, similar to what
47 47 DiffProcessor returns."""
48 48 return '''\
49 49 <table class="code-difftable">
50 50 <tr class="line no-comment">
51 51 <td class="lineno new"></td>
52 52 <td class="code no-comment"><pre>%s</pre></td>
53 53 </tr>
54 54 </table>''' % html
55 55
56 56
57 57 def wrapped_diff(filenode_old, filenode_new, diff_limit=None,
58 58 ignore_whitespace=True, line_context=3,
59 59 enable_comments=False):
60 60 """
61 61 Returns a file diff wrapped into a table.
62 62 Checks for diff_limit and presents a message if the diff is too big.
63 63 """
64 64 if filenode_old is None:
65 65 filenode_old = FileNode(filenode_new.path, '', EmptyChangeset())
66 66
67 67 op = None
68 68 a_path = filenode_old.path # default, might be overriden by actual rename in diff
69 69 if filenode_old.is_binary or filenode_new.is_binary:
70 70 diff = wrap_to_table(_('Binary file'))
71 71 stats = (0, 0)
72 72
73 73 elif diff_limit != -1 and (
74 74 diff_limit is None or
75 75 (filenode_old.size < diff_limit and filenode_new.size < diff_limit)):
76 76
77 77 f_gitdiff = get_gitdiff(filenode_old, filenode_new,
78 78 ignore_whitespace=ignore_whitespace,
79 79 context=line_context)
80 80 diff_processor = DiffProcessor(f_gitdiff)
81 81 _parsed = diff_processor.prepare()
82 82 if _parsed: # there should be exactly one element, for the specified file
83 83 f = _parsed[0]
84 84 op = f['operation']
85 85 a_path = f['old_filename']
86 86
87 87 diff = diff_processor.as_html(enable_comments=enable_comments)
88 88 stats = diff_processor.stat()
89 89
90 90 else:
91 91 diff = wrap_to_table(_('Changeset was too big and was cut off, use '
92 92 'diff menu to display this diff'))
93 93 stats = (0, 0)
94 94
95 95 if not diff:
96 96 submodules = filter(lambda o: isinstance(o, SubModuleNode),
97 97 [filenode_new, filenode_old])
98 98 if submodules:
99 99 diff = wrap_to_table(escape('Submodule %r' % submodules[0]))
100 100 else:
101 101 diff = wrap_to_table(_('No changes detected'))
102 102
103 103 cs1 = filenode_old.changeset.raw_id
104 104 cs2 = filenode_new.changeset.raw_id
105 105
106 106 return cs1, cs2, a_path, diff, stats, op
107 107
108 108
109 109 def get_gitdiff(filenode_old, filenode_new, ignore_whitespace=True, context=3):
110 110 """
111 111 Returns git style diff between given ``filenode_old`` and ``filenode_new``.
112 112 """
113 113 # make sure we pass in default context
114 114 context = context or 3
115 115 submodules = filter(lambda o: isinstance(o, SubModuleNode),
116 116 [filenode_new, filenode_old])
117 117 if submodules:
118 118 return ''
119 119
120 120 for filenode in (filenode_old, filenode_new):
121 121 if not isinstance(filenode, FileNode):
122 122 raise VCSError("Given object should be FileNode object, not %s"
123 123 % filenode.__class__)
124 124
125 125 repo = filenode_new.changeset.repository
126 126 old_raw_id = getattr(filenode_old.changeset, 'raw_id', repo.EMPTY_CHANGESET)
127 127 new_raw_id = getattr(filenode_new.changeset, 'raw_id', repo.EMPTY_CHANGESET)
128 128
129 129 vcs_gitdiff = repo.get_diff(old_raw_id, new_raw_id, filenode_new.path,
130 130 ignore_whitespace, context)
131 131 return vcs_gitdiff
132 132
133 133
134 134 NEW_FILENODE = 1
135 135 DEL_FILENODE = 2
136 136 MOD_FILENODE = 3
137 137 RENAMED_FILENODE = 4
138 138 COPIED_FILENODE = 5
139 139 CHMOD_FILENODE = 6
140 140 BIN_FILENODE = 7
141 141
142 142
143 143 class DiffLimitExceeded(Exception):
144 144 pass
145 145
146 146
147 147 class LimitedDiffContainer(object):
148 148
149 149 def __init__(self, diff_limit, cur_diff_size, diff):
150 150 self.diff = diff
151 151 self.diff_limit = diff_limit
152 152 self.cur_diff_size = cur_diff_size
153 153
154 154 def __iter__(self):
155 155 for l in self.diff:
156 156 yield l
157 157
158 158
159 159 class DiffProcessor(object):
160 160 """
161 161 Give it a unified or git diff and it returns a list of the files that were
162 162 mentioned in the diff together with a dict of meta information that
163 163 can be used to render it in a HTML template.
164 164 """
165 165 _chunk_re = re.compile(r'^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@(.*)')
166 166 _newline_marker = re.compile(r'^\\ No newline at end of file')
167 167 _git_header_re = re.compile(r"""
168 168 # has already been split on this:
169 169 # ^diff[ ]--git
170 170 [ ]a/(?P<a_path>.+?)[ ]b/(?P<b_path>.+?)\n
171 171 (?:^old[ ]mode[ ](?P<old_mode>\d+)\n
172 172 ^new[ ]mode[ ](?P<new_mode>\d+)(?:\n|$))?
173 173 (?:^similarity[ ]index[ ](?P<similarity_index>\d+)%\n
174 174 ^rename[ ]from[ ](?P<rename_from>.+)\n
175 175 ^rename[ ]to[ ](?P<rename_to>.+)(?:\n|$))?
176 176 (?:^new[ ]file[ ]mode[ ](?P<new_file_mode>.+)(?:\n|$))?
177 177 (?:^deleted[ ]file[ ]mode[ ](?P<deleted_file_mode>.+)(?:\n|$))?
178 178 (?:^index[ ](?P<a_blob_id>[0-9A-Fa-f]+)
179 179 \.\.(?P<b_blob_id>[0-9A-Fa-f]+)[ ]?(?P<b_mode>.+)?(?:\n|$))?
180 180 (?:^(?P<bin_patch>GIT[ ]binary[ ]patch)(?:\n|$))?
181 181 (?:^---[ ](a/(?P<a_file>.+?)|/dev/null)\t?(?:\n|$))?
182 182 (?:^\+\+\+[ ](b/(?P<b_file>.+?)|/dev/null)\t?(?:\n|$))?
183 183 """, re.VERBOSE | re.MULTILINE)
184 184 _hg_header_re = re.compile(r"""
185 185 # has already been split on this:
186 186 # ^diff[ ]--git
187 187 [ ]a/(?P<a_path>.+?)[ ]b/(?P<b_path>.+?)\n
188 188 (?:^old[ ]mode[ ](?P<old_mode>\d+)\n
189 189 ^new[ ]mode[ ](?P<new_mode>\d+)(?:\n|$))?
190 190 (?:^similarity[ ]index[ ](?P<similarity_index>\d+)%(?:\n|$))?
191 191 (?:^rename[ ]from[ ](?P<rename_from>.+)\n
192 192 ^rename[ ]to[ ](?P<rename_to>.+)(?:\n|$))?
193 193 (?:^copy[ ]from[ ](?P<copy_from>.+)\n
194 194 ^copy[ ]to[ ](?P<copy_to>.+)(?:\n|$))?
195 195 (?:^new[ ]file[ ]mode[ ](?P<new_file_mode>.+)(?:\n|$))?
196 196 (?:^deleted[ ]file[ ]mode[ ](?P<deleted_file_mode>.+)(?:\n|$))?
197 197 (?:^index[ ](?P<a_blob_id>[0-9A-Fa-f]+)
198 198 \.\.(?P<b_blob_id>[0-9A-Fa-f]+)[ ]?(?P<b_mode>.+)?(?:\n|$))?
199 199 (?:^(?P<bin_patch>GIT[ ]binary[ ]patch)(?:\n|$))?
200 200 (?:^---[ ](a/(?P<a_file>.+?)|/dev/null)\t?(?:\n|$))?
201 201 (?:^\+\+\+[ ](b/(?P<b_file>.+?)|/dev/null)\t?(?:\n|$))?
202 202 """, re.VERBOSE | re.MULTILINE)
203 203
204 204 # Used for inline highlighter word split, must match the substitutions in _escaper
205 205 _token_re = re.compile(r'()(&amp;|&lt;|&gt;|<u>\t</u>|<u class="cr"></u>| <i></i>|\W+?)')
206 206
207 207 _escape_re = re.compile(r'(&)|(<)|(>)|(\t)|(\r)|(?<=.)( \n| $)')
208 208
209 209 def __init__(self, diff, vcs='hg', diff_limit=None):
210 210 """
211 211 :param diff: a text in diff format
212 212 :param vcs: type of version control hg or git
213 213 :param diff_limit: define the size of diff that is considered "big"
214 214 based on that parameter cut off will be triggered, set to None
215 215 to show full diff
216 216 """
217 217 if not isinstance(diff, basestring):
218 218 raise Exception('Diff must be a basestring got %s instead' % type(diff))
219 219
220 220 self._diff = diff
221 221 self.adds = 0
222 222 self.removes = 0
223 223 # calculate diff size
224 224 self.diff_size = len(diff)
225 225 self.diff_limit = diff_limit
226 226 self.cur_diff_size = 0
227 227 self.parsed = False
228 228 self.parsed_diff = []
229 229 self.vcs = vcs
230 230
231 231 def _escaper(self, string):
232 232 """
233 233 Do HTML escaping/markup and check the diff limit
234 234 """
235 235 self.cur_diff_size += len(string)
236 236
237 237 # escaper gets iterated on each .next() call and it checks if each
238 238 # parsed line doesn't exceed the diff limit
239 239 if self.diff_limit is not None and self.cur_diff_size > self.diff_limit:
240 240 raise DiffLimitExceeded('Diff Limit Exceeded')
241 241
242 242 def substitute(m):
243 243 groups = m.groups()
244 244 if groups[0]:
245 245 return '&amp;'
246 246 if groups[1]:
247 247 return '&lt;'
248 248 if groups[2]:
249 249 return '&gt;'
250 250 if groups[3]:
251 251 return '<u>\t</u>'
252 252 if groups[4]:
253 253 return '<u class="cr"></u>'
254 254 if groups[5]:
255 255 return ' <i></i>'
256 256 assert False
257 257
258 258 return self._escape_re.sub(substitute, safe_unicode(string))
259 259
260 260 def _highlight_inline_diff(self, old, new):
261 261 """
262 262 Highlight simple add/remove in two lines given as info dicts. They are
263 263 modified in place and given markup with <del>/<ins>.
264 264 """
265 265 assert old['action'] == 'del'
266 266 assert new['action'] == 'add'
267 267
268 268 oldwords = self._token_re.split(old['line'])
269 269 newwords = self._token_re.split(new['line'])
270 270 sequence = difflib.SequenceMatcher(None, oldwords, newwords)
271 271
272 272 oldfragments, newfragments = [], []
273 273 for tag, i1, i2, j1, j2 in sequence.get_opcodes():
274 274 oldfrag = ''.join(oldwords[i1:i2])
275 275 newfrag = ''.join(newwords[j1:j2])
276 276 if tag != 'equal':
277 277 if oldfrag:
278 278 oldfrag = '<del>%s</del>' % oldfrag
279 279 if newfrag:
280 280 newfrag = '<ins>%s</ins>' % newfrag
281 281 oldfragments.append(oldfrag)
282 282 newfragments.append(newfrag)
283 283
284 284 old['line'] = "".join(oldfragments)
285 285 new['line'] = "".join(newfragments)
286 286
287 287 def _get_header(self, diff_chunk):
288 288 """
289 289 Parses a Git diff for a single file (header and chunks) and returns a tuple with:
290 290
291 291 1. A dict with meta info:
292 292
293 293 a_path, b_path, similarity_index, rename_from, rename_to,
294 294 old_mode, new_mode, new_file_mode, deleted_file_mode,
295 295 a_blob_id, b_blob_id, b_mode, a_file, b_file
296 296
297 297 2. An iterator yielding lines with simple HTML markup.
298 298 """
299 299 match = None
300 300 if self.vcs == 'git':
301 301 match = self._git_header_re.match(diff_chunk)
302 302 elif self.vcs == 'hg':
303 303 match = self._hg_header_re.match(diff_chunk)
304 304 if match is None:
305 305 raise Exception('diff not recognized as valid %s diff' % self.vcs)
306 306 meta_info = match.groupdict()
307 307 rest = diff_chunk[match.end():]
308 308 if rest and not rest.startswith('@') and not rest.startswith('literal ') and not rest.startswith('delta '):
309 309 raise Exception('cannot parse %s diff header: %r followed by %r' % (self.vcs, diff_chunk[:match.end()], rest[:1000]))
310 310 difflines = imap(self._escaper, re.findall(r'.*\n|.+$', rest)) # don't split on \r as str.splitlines do
311 311 return meta_info, difflines
312 312
313 313 def _clean_line(self, line, command):
314 314 """Given a diff line, strip the leading character if it is a plus/minus/context line."""
315 315 if command in ['+', '-', ' ']:
316 316 line = line[1:]
317 317 return line
318 318
319 319 def _parse_gitdiff(self, inline_diff=True):
320 320 """Parse self._diff and return a list of dicts with meta info and chunks for each file.
321 321 If diff is truncated, wrap it in LimitedDiffContainer.
322 322 Optionally, do an extra pass and to extra markup of one-liner changes.
323 323 """
324 324 _files = [] # list of dicts with meta info and chunks
325 325 diff_container = lambda arg: arg
326 326
327 327 # split the diff in chunks of separate --git a/file b/file chunks
328 328 for raw_diff in ('\n' + self._diff).split('\ndiff --git')[1:]:
329 329 head, diff = self._get_header(raw_diff)
330 330
331 331 op = None
332 332 stats = {
333 333 'added': 0,
334 334 'deleted': 0,
335 335 'binary': False,
336 336 'ops': {},
337 337 }
338 338
339 339 if head['deleted_file_mode']:
340 340 op = 'D'
341 341 stats['binary'] = True
342 342 stats['ops'][DEL_FILENODE] = 'deleted file'
343 343
344 344 elif head['new_file_mode']:
345 345 op = 'A'
346 346 stats['binary'] = True
347 347 stats['ops'][NEW_FILENODE] = 'new file %s' % head['new_file_mode']
348 348 else: # modify operation, can be cp, rename, chmod
349 349 # CHMOD
350 350 if head['new_mode'] and head['old_mode']:
351 351 op = 'M'
352 352 stats['binary'] = True
353 353 stats['ops'][CHMOD_FILENODE] = ('modified file chmod %s => %s'
354 354 % (head['old_mode'], head['new_mode']))
355 355 # RENAME
356 356 if (head['rename_from'] and head['rename_to']
357 357 and head['rename_from'] != head['rename_to']):
358 358 op = 'R'
359 359 stats['binary'] = True
360 360 stats['ops'][RENAMED_FILENODE] = ('file renamed from %s to %s'
361 361 % (head['rename_from'], head['rename_to']))
362 362 # COPY
363 363 if head.get('copy_from') and head.get('copy_to'):
364 364 op = 'M'
365 365 stats['binary'] = True
366 366 stats['ops'][COPIED_FILENODE] = ('file copied from %s to %s'
367 367 % (head['copy_from'], head['copy_to']))
368 368 # FALL BACK: detect missed old style add or remove
369 369 if op is None:
370 370 if not head['a_file'] and head['b_file']:
371 371 op = 'A'
372 372 stats['binary'] = True
373 373 stats['ops'][NEW_FILENODE] = 'new file'
374 374
375 375 elif head['a_file'] and not head['b_file']:
376 376 op = 'D'
377 377 stats['binary'] = True
378 378 stats['ops'][DEL_FILENODE] = 'deleted file'
379 379
380 380 # it's not ADD not DELETE
381 381 if op is None:
382 382 op = 'M'
383 383 stats['binary'] = True
384 384 stats['ops'][MOD_FILENODE] = 'modified file'
385 385
386 386 # a real non-binary diff
387 387 if head['a_file'] or head['b_file']:
388 388 try:
389 389 chunks, added, deleted = self._parse_lines(diff)
390 390 stats['binary'] = False
391 391 stats['added'] = added
392 392 stats['deleted'] = deleted
393 393 # explicit mark that it's a modified file
394 394 if op == 'M':
395 395 stats['ops'][MOD_FILENODE] = 'modified file'
396 396
397 397 except DiffLimitExceeded:
398 398 diff_container = lambda _diff: \
399 399 LimitedDiffContainer(self.diff_limit,
400 400 self.cur_diff_size, _diff)
401 401 break
402 402 else: # Git binary patch (or empty diff)
403 403 # Git binary patch
404 404 if head['bin_patch']:
405 405 stats['ops'][BIN_FILENODE] = 'binary diff not shown'
406 406 chunks = []
407 407
408 408 if op == 'D' and chunks:
409 409 # a way of seeing deleted content could perhaps be nice - but
410 410 # not with the current UI
411 411 chunks = []
412 412
413 413 chunks.insert(0, [{
414 414 'old_lineno': '',
415 415 'new_lineno': '',
416 416 'action': 'context',
417 417 'line': msg,
418 418 } for _op, msg in stats['ops'].iteritems()
419 419 if _op not in [MOD_FILENODE]])
420 420
421 421 _files.append({
422 422 'old_filename': head['a_path'],
423 423 'filename': head['b_path'],
424 424 'old_revision': head['a_blob_id'],
425 425 'new_revision': head['b_blob_id'],
426 426 'chunks': chunks,
427 427 'operation': op,
428 428 'stats': stats,
429 429 })
430 430
431 431 if not inline_diff:
432 432 return diff_container(_files)
433 433
434 434 # highlight inline changes when one del is followed by one add
435 435 for diff_data in _files:
436 436 for chunk in diff_data['chunks']:
437 437 lineiter = iter(chunk)
438 438 try:
439 439 peekline = lineiter.next()
440 440 while True:
441 441 # find a first del line
442 442 while peekline['action'] != 'del':
443 443 peekline = lineiter.next()
444 444 delline = peekline
445 445 peekline = lineiter.next()
446 446 # if not followed by add, eat all following del lines
447 447 if peekline['action'] != 'add':
448 448 while peekline['action'] == 'del':
449 449 peekline = lineiter.next()
450 450 continue
451 451 # found an add - make sure it is the only one
452 452 addline = peekline
453 453 try:
454 454 peekline = lineiter.next()
455 455 except StopIteration:
456 456 # add was last line - ok
457 457 self._highlight_inline_diff(delline, addline)
458 458 raise
459 459 if peekline['action'] != 'add':
460 460 # there was only one add line - ok
461 461 self._highlight_inline_diff(delline, addline)
462 462 except StopIteration:
463 463 pass
464 464
465 465 return diff_container(_files)
466 466
467 467 def _parse_lines(self, diff):
468 468 """
469 469 Given an iterator of diff body lines, parse them and return a dict per
470 470 line and added/removed totals.
471 471 """
472 472 added = deleted = 0
473 473 old_line = old_end = new_line = new_end = None
474 474
475 475 try:
476 476 chunks = []
477 477 line = diff.next()
478 478
479 479 while True:
480 480 lines = []
481 481 chunks.append(lines)
482 482
483 483 match = self._chunk_re.match(line)
484 484
485 485 if not match:
486 486 raise Exception('error parsing diff @@ line %r' % line)
487 487
488 488 gr = match.groups()
489 489 (old_line, old_end,
490 490 new_line, new_end) = [int(x or 1) for x in gr[:-1]]
491 491 old_line -= 1
492 492 new_line -= 1
493 493
494 494 context = len(gr) == 5
495 495 old_end += old_line
496 496 new_end += new_line
497 497
498 498 if context:
499 499 # skip context only if it's first line
500 500 if int(gr[0]) > 1:
501 501 lines.append({
502 502 'old_lineno': '...',
503 503 'new_lineno': '...',
504 504 'action': 'context',
505 505 'line': line,
506 506 })
507 507
508 508 line = diff.next()
509 509
510 510 while old_line < old_end or new_line < new_end:
511 511 if not line:
512 512 raise Exception('error parsing diff - empty line at -%s+%s' % (old_line, new_line))
513 513
514 514 affects_old = affects_new = False
515 515
516 516 command = line[0]
517 517 if command == '+':
518 518 affects_new = True
519 519 action = 'add'
520 520 added += 1
521 521 elif command == '-':
522 522 affects_old = True
523 523 action = 'del'
524 524 deleted += 1
525 525 elif command == ' ':
526 526 affects_old = affects_new = True
527 527 action = 'unmod'
528 528 else:
529 529 raise Exception('error parsing diff - unknown command in line %r at -%s+%s' % (line, old_line, new_line))
530 530
531 531 if not self._newline_marker.match(line):
532 532 old_line += affects_old
533 533 new_line += affects_new
534 534 lines.append({
535 535 'old_lineno': affects_old and old_line or '',
536 536 'new_lineno': affects_new and new_line or '',
537 537 'action': action,
538 538 'line': self._clean_line(line, command)
539 539 })
540 540
541 541 line = diff.next()
542 542
543 543 if self._newline_marker.match(line):
544 544 # we need to append to lines, since this is not
545 545 # counted in the line specs of diff
546 546 lines.append({
547 547 'old_lineno': '...',
548 548 'new_lineno': '...',
549 549 'action': 'context',
550 550 'line': self._clean_line(line, command)
551 551 })
552 552 line = diff.next()
553 553 if old_line > old_end:
554 554 raise Exception('error parsing diff - more than %s "-" lines at -%s+%s' % (old_end, old_line, new_line))
555 555 if new_line > new_end:
556 556 raise Exception('error parsing diff - more than %s "+" lines at -%s+%s' % (new_end, old_line, new_line))
557 557 except StopIteration:
558 558 pass
559 559 if old_line != old_end or new_line != new_end:
560 560 raise Exception('diff processing broken when old %s<>%s or new %s<>%s line %r' % (old_line, old_end, new_line, new_end, line))
561 561
562 562 return chunks, added, deleted
563 563
564 564 def _safe_id(self, idstring):
565 565 """Make a string safe for including in an id attribute.
566 566
567 567 The HTML spec says that id attributes 'must begin with
568 568 a letter ([A-Za-z]) and may be followed by any number
569 569 of letters, digits ([0-9]), hyphens ("-"), underscores
570 570 ("_"), colons (":"), and periods (".")'. These regexps
571 571 are slightly over-zealous, in that they remove colons
572 572 and periods unnecessarily.
573 573
574 574 Whitespace is transformed into underscores, and then
575 575 anything which is not a hyphen or a character that
576 576 matches \w (alphanumerics and underscore) is removed.
577 577
578 578 """
579 579 # Transform all whitespace to underscore
580 580 idstring = re.sub(r'\s', "_", idstring)
581 581 # Remove everything that is not a hyphen or a member of \w
582 582 idstring = re.sub(r'(?!-)\W', "", idstring).lower()
583 583 return idstring
584 584
585 585 def prepare(self, inline_diff=True):
586 586 """
587 587 Prepare the passed udiff for HTML rendering. It'll return a list
588 588 of dicts with diff information
589 589 """
590 590 parsed = self._parse_gitdiff(inline_diff=inline_diff)
591 591 self.parsed = True
592 592 self.parsed_diff = parsed
593 593 return parsed
594 594
595 def as_raw(self):
596 """
597 Returns raw string diff, exactly as it was passed in the first place.
598 """
599 return self._diff
600
601 595 def as_html(self, table_class='code-difftable', line_class='line',
602 596 old_lineno_class='lineno old', new_lineno_class='lineno new',
603 597 no_lineno_class='lineno',
604 598 code_class='code', enable_comments=False, parsed_lines=None):
605 599 """
606 600 Return given diff as html table with customized css classes
607 601 """
608 602 def _link_to_if(condition, label, url):
609 603 """
610 604 Generates a link if condition is meet or just the label if not.
611 605 """
612 606
613 607 if condition:
614 608 return '''<a href="%(url)s">%(label)s</a>''' % {
615 609 'url': url,
616 610 'label': label
617 611 }
618 612 else:
619 613 return label
620 614 if not self.parsed:
621 615 self.prepare()
622 616
623 617 diff_lines = self.parsed_diff
624 618 if parsed_lines:
625 619 diff_lines = parsed_lines
626 620
627 621 _html_empty = True
628 622 _html = []
629 623 _html.append('''<table class="%(table_class)s">\n''' % {
630 624 'table_class': table_class
631 625 })
632 626
633 627 for diff in diff_lines:
634 628 for line in diff['chunks']:
635 629 _html_empty = False
636 630 for change in line:
637 631 _html.append('''<tr class="%(lc)s %(action)s">\n''' % {
638 632 'lc': line_class,
639 633 'action': change['action']
640 634 })
641 635 anchor_old_id = ''
642 636 anchor_new_id = ''
643 637 anchor_old = "%(filename)s_o%(oldline_no)s" % {
644 638 'filename': self._safe_id(diff['filename']),
645 639 'oldline_no': change['old_lineno']
646 640 }
647 641 anchor_new = "%(filename)s_n%(oldline_no)s" % {
648 642 'filename': self._safe_id(diff['filename']),
649 643 'oldline_no': change['new_lineno']
650 644 }
651 645 cond_old = (change['old_lineno'] != '...' and
652 646 change['old_lineno'])
653 647 cond_new = (change['new_lineno'] != '...' and
654 648 change['new_lineno'])
655 649 no_lineno = (change['old_lineno'] == '...' and
656 650 change['new_lineno'] == '...')
657 651 if cond_old:
658 652 anchor_old_id = 'id="%s"' % anchor_old
659 653 if cond_new:
660 654 anchor_new_id = 'id="%s"' % anchor_new
661 655 ###########################################################
662 656 # OLD LINE NUMBER
663 657 ###########################################################
664 658 _html.append('''\t<td %(a_id)s class="%(olc)s" %(colspan)s>''' % {
665 659 'a_id': anchor_old_id,
666 660 'olc': no_lineno_class if no_lineno else old_lineno_class,
667 661 'colspan': 'colspan="2"' if no_lineno else ''
668 662 })
669 663
670 664 _html.append('''%(link)s''' % {
671 665 'link': _link_to_if(not no_lineno, change['old_lineno'],
672 666 '#%s' % anchor_old)
673 667 })
674 668 _html.append('''</td>\n''')
675 669 ###########################################################
676 670 # NEW LINE NUMBER
677 671 ###########################################################
678 672
679 673 if not no_lineno:
680 674 _html.append('''\t<td %(a_id)s class="%(nlc)s">''' % {
681 675 'a_id': anchor_new_id,
682 676 'nlc': new_lineno_class
683 677 })
684 678
685 679 _html.append('''%(link)s''' % {
686 680 'link': _link_to_if(True, change['new_lineno'],
687 681 '#%s' % anchor_new)
688 682 })
689 683 _html.append('''</td>\n''')
690 684 ###########################################################
691 685 # CODE
692 686 ###########################################################
693 687 comments = '' if enable_comments else 'no-comment'
694 688 _html.append('''\t<td class="%(cc)s %(inc)s">''' % {
695 689 'cc': code_class,
696 690 'inc': comments
697 691 })
698 692 _html.append('''\n\t\t<div class="add-bubble"><div>&nbsp;</div></div><pre>%(code)s</pre>\n''' % {
699 693 'code': change['line']
700 694 })
701 695
702 696 _html.append('''\t</td>''')
703 697 _html.append('''\n</tr>\n''')
704 698 _html.append('''</table>''')
705 699 if _html_empty:
706 700 return None
707 701 return ''.join(_html)
708 702
709 703 def stat(self):
710 704 """
711 705 Returns tuple of added, and removed lines for this instance
712 706 """
713 707 return self.adds, self.removes
General Comments 0
You need to be logged in to leave comments. Login now