Show More
@@ -1,469 +1,470 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2010-2016 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | """ |
|
22 | 22 | commit controller for RhodeCode showing changes between commits |
|
23 | 23 | """ |
|
24 | 24 | |
|
25 | 25 | import logging |
|
26 | 26 | |
|
27 | 27 | from collections import defaultdict |
|
28 | 28 | from webob.exc import HTTPForbidden, HTTPBadRequest, HTTPNotFound |
|
29 | 29 | |
|
30 | 30 | from pylons import tmpl_context as c, request, response |
|
31 | 31 | from pylons.i18n.translation import _ |
|
32 | 32 | from pylons.controllers.util import redirect |
|
33 | 33 | |
|
34 | 34 | from rhodecode.lib import auth |
|
35 | 35 | from rhodecode.lib import diffs, codeblocks |
|
36 | 36 | from rhodecode.lib.auth import ( |
|
37 | 37 | LoginRequired, HasRepoPermissionAnyDecorator, NotAnonymous) |
|
38 | 38 | from rhodecode.lib.base import BaseRepoController, render |
|
39 | 39 | from rhodecode.lib.compat import OrderedDict |
|
40 | 40 | from rhodecode.lib.exceptions import StatusChangeOnClosedPullRequestError |
|
41 | 41 | import rhodecode.lib.helpers as h |
|
42 | 42 | from rhodecode.lib.utils import action_logger, jsonify |
|
43 | 43 | from rhodecode.lib.utils2 import safe_unicode |
|
44 | 44 | from rhodecode.lib.vcs.backends.base import EmptyCommit |
|
45 | 45 | from rhodecode.lib.vcs.exceptions import ( |
|
46 | 46 | RepositoryError, CommitDoesNotExistError) |
|
47 | 47 | from rhodecode.model.db import ChangesetComment, ChangesetStatus |
|
48 | 48 | from rhodecode.model.changeset_status import ChangesetStatusModel |
|
49 | 49 | from rhodecode.model.comment import ChangesetCommentsModel |
|
50 | 50 | from rhodecode.model.meta import Session |
|
51 | 51 | from rhodecode.model.repo import RepoModel |
|
52 | 52 | |
|
53 | 53 | |
|
54 | 54 | log = logging.getLogger(__name__) |
|
55 | 55 | |
|
56 | 56 | |
|
57 | 57 | def _update_with_GET(params, GET): |
|
58 | 58 | for k in ['diff1', 'diff2', 'diff']: |
|
59 | 59 | params[k] += GET.getall(k) |
|
60 | 60 | |
|
61 | 61 | |
|
62 | 62 | def get_ignore_ws(fid, GET): |
|
63 | 63 | ig_ws_global = GET.get('ignorews') |
|
64 | 64 | ig_ws = filter(lambda k: k.startswith('WS'), GET.getall(fid)) |
|
65 | 65 | if ig_ws: |
|
66 | 66 | try: |
|
67 | 67 | return int(ig_ws[0].split(':')[-1]) |
|
68 | 68 | except Exception: |
|
69 | 69 | pass |
|
70 | 70 | return ig_ws_global |
|
71 | 71 | |
|
72 | 72 | |
|
73 | 73 | def _ignorews_url(GET, fileid=None): |
|
74 | 74 | fileid = str(fileid) if fileid else None |
|
75 | 75 | params = defaultdict(list) |
|
76 | 76 | _update_with_GET(params, GET) |
|
77 | 77 | label = _('Show whitespace') |
|
78 | 78 | tooltiplbl = _('Show whitespace for all diffs') |
|
79 | 79 | ig_ws = get_ignore_ws(fileid, GET) |
|
80 | 80 | ln_ctx = get_line_ctx(fileid, GET) |
|
81 | 81 | |
|
82 | 82 | if ig_ws is None: |
|
83 | 83 | params['ignorews'] += [1] |
|
84 | 84 | label = _('Ignore whitespace') |
|
85 | 85 | tooltiplbl = _('Ignore whitespace for all diffs') |
|
86 | 86 | ctx_key = 'context' |
|
87 | 87 | ctx_val = ln_ctx |
|
88 | 88 | |
|
89 | 89 | # if we have passed in ln_ctx pass it along to our params |
|
90 | 90 | if ln_ctx: |
|
91 | 91 | params[ctx_key] += [ctx_val] |
|
92 | 92 | |
|
93 | 93 | if fileid: |
|
94 | 94 | params['anchor'] = 'a_' + fileid |
|
95 | 95 | return h.link_to(label, h.url.current(**params), title=tooltiplbl, class_='tooltip') |
|
96 | 96 | |
|
97 | 97 | |
|
98 | 98 | def get_line_ctx(fid, GET): |
|
99 | 99 | ln_ctx_global = GET.get('context') |
|
100 | 100 | if fid: |
|
101 | 101 | ln_ctx = filter(lambda k: k.startswith('C'), GET.getall(fid)) |
|
102 | 102 | else: |
|
103 | 103 | _ln_ctx = filter(lambda k: k.startswith('C'), GET) |
|
104 | 104 | ln_ctx = GET.get(_ln_ctx[0]) if _ln_ctx else ln_ctx_global |
|
105 | 105 | if ln_ctx: |
|
106 | 106 | ln_ctx = [ln_ctx] |
|
107 | 107 | |
|
108 | 108 | if ln_ctx: |
|
109 | 109 | retval = ln_ctx[0].split(':')[-1] |
|
110 | 110 | else: |
|
111 | 111 | retval = ln_ctx_global |
|
112 | 112 | |
|
113 | 113 | try: |
|
114 | 114 | return int(retval) |
|
115 | 115 | except Exception: |
|
116 | 116 | return 3 |
|
117 | 117 | |
|
118 | 118 | |
|
119 | 119 | def _context_url(GET, fileid=None): |
|
120 | 120 | """ |
|
121 | 121 | Generates a url for context lines. |
|
122 | 122 | |
|
123 | 123 | :param fileid: |
|
124 | 124 | """ |
|
125 | 125 | |
|
126 | 126 | fileid = str(fileid) if fileid else None |
|
127 | 127 | ig_ws = get_ignore_ws(fileid, GET) |
|
128 | 128 | ln_ctx = (get_line_ctx(fileid, GET) or 3) * 2 |
|
129 | 129 | |
|
130 | 130 | params = defaultdict(list) |
|
131 | 131 | _update_with_GET(params, GET) |
|
132 | 132 | |
|
133 | 133 | if ln_ctx > 0: |
|
134 | 134 | params['context'] += [ln_ctx] |
|
135 | 135 | |
|
136 | 136 | if ig_ws: |
|
137 | 137 | ig_ws_key = 'ignorews' |
|
138 | 138 | ig_ws_val = 1 |
|
139 | 139 | params[ig_ws_key] += [ig_ws_val] |
|
140 | 140 | |
|
141 | 141 | lbl = _('Increase context') |
|
142 | 142 | tooltiplbl = _('Increase context for all diffs') |
|
143 | 143 | |
|
144 | 144 | if fileid: |
|
145 | 145 | params['anchor'] = 'a_' + fileid |
|
146 | 146 | return h.link_to(lbl, h.url.current(**params), title=tooltiplbl, class_='tooltip') |
|
147 | 147 | |
|
148 | 148 | |
|
149 | 149 | class ChangesetController(BaseRepoController): |
|
150 | 150 | |
|
151 | 151 | def __before__(self): |
|
152 | 152 | super(ChangesetController, self).__before__() |
|
153 | 153 | c.affected_files_cut_off = 60 |
|
154 | 154 | |
|
155 | 155 | def _index(self, commit_id_range, method): |
|
156 | 156 | c.ignorews_url = _ignorews_url |
|
157 | 157 | c.context_url = _context_url |
|
158 | 158 | c.fulldiff = fulldiff = request.GET.get('fulldiff') |
|
159 | 159 | # get ranges of commit ids if preset |
|
160 | 160 | commit_range = commit_id_range.split('...')[:2] |
|
161 | 161 | enable_comments = True |
|
162 | 162 | try: |
|
163 | 163 | pre_load = ['affected_files', 'author', 'branch', 'date', |
|
164 | 164 | 'message', 'parents'] |
|
165 | 165 | |
|
166 | 166 | if len(commit_range) == 2: |
|
167 | 167 | enable_comments = False |
|
168 | 168 | commits = c.rhodecode_repo.get_commits( |
|
169 | 169 | start_id=commit_range[0], end_id=commit_range[1], |
|
170 | 170 | pre_load=pre_load) |
|
171 | 171 | commits = list(commits) |
|
172 | 172 | else: |
|
173 | 173 | commits = [c.rhodecode_repo.get_commit( |
|
174 | 174 | commit_id=commit_id_range, pre_load=pre_load)] |
|
175 | 175 | |
|
176 | 176 | c.commit_ranges = commits |
|
177 | 177 | if not c.commit_ranges: |
|
178 | 178 | raise RepositoryError( |
|
179 | 179 | 'The commit range returned an empty result') |
|
180 | 180 | except CommitDoesNotExistError: |
|
181 | 181 | msg = _('No such commit exists for this repository') |
|
182 | 182 | h.flash(msg, category='error') |
|
183 | 183 | raise HTTPNotFound() |
|
184 | 184 | except Exception: |
|
185 | 185 | log.exception("General failure") |
|
186 | 186 | raise HTTPNotFound() |
|
187 | 187 | |
|
188 | 188 | c.changes = OrderedDict() |
|
189 | 189 | c.lines_added = 0 |
|
190 | 190 | c.lines_deleted = 0 |
|
191 | 191 | |
|
192 | 192 | c.commit_statuses = ChangesetStatus.STATUSES |
|
193 | 193 | c.comments = [] |
|
194 | 194 | c.statuses = [] |
|
195 | 195 | c.inline_comments = [] |
|
196 | 196 | c.inline_cnt = 0 |
|
197 | 197 | c.files = [] |
|
198 | 198 | |
|
199 | 199 | # Iterate over ranges (default commit view is always one commit) |
|
200 | 200 | for commit in c.commit_ranges: |
|
201 | 201 | if method == 'show': |
|
202 | 202 | c.statuses.extend([ChangesetStatusModel().get_status( |
|
203 | 203 | c.rhodecode_db_repo.repo_id, commit.raw_id)]) |
|
204 | 204 | |
|
205 | 205 | c.comments.extend(ChangesetCommentsModel().get_comments( |
|
206 | 206 | c.rhodecode_db_repo.repo_id, |
|
207 | 207 | revision=commit.raw_id)) |
|
208 | 208 | |
|
209 | 209 | # comments from PR |
|
210 | 210 | st = ChangesetStatusModel().get_statuses( |
|
211 | 211 | c.rhodecode_db_repo.repo_id, commit.raw_id, |
|
212 | 212 | with_revisions=True) |
|
213 | 213 | |
|
214 | 214 | # from associated statuses, check the pull requests, and |
|
215 | 215 | # show comments from them |
|
216 | 216 | |
|
217 | 217 | prs = set(x.pull_request for x in |
|
218 | 218 | filter(lambda x: x.pull_request is not None, st)) |
|
219 | 219 | for pr in prs: |
|
220 | 220 | c.comments.extend(pr.comments) |
|
221 | 221 | |
|
222 | 222 | inlines = ChangesetCommentsModel().get_inline_comments( |
|
223 | 223 | c.rhodecode_db_repo.repo_id, revision=commit.raw_id) |
|
224 | 224 | c.inline_comments.extend(inlines.iteritems()) |
|
225 | 225 | |
|
226 | 226 | c.changes[commit.raw_id] = [] |
|
227 | 227 | |
|
228 | 228 | commit2 = commit |
|
229 | 229 | commit1 = commit.parents[0] if commit.parents else EmptyCommit() |
|
230 | 230 | |
|
231 | 231 | # fetch global flags of ignore ws or context lines |
|
232 | 232 | context_lcl = get_line_ctx('', request.GET) |
|
233 | 233 | ign_whitespace_lcl = get_ignore_ws('', request.GET) |
|
234 | 234 | |
|
235 | 235 | _diff = c.rhodecode_repo.get_diff( |
|
236 | 236 | commit1, commit2, |
|
237 | 237 | ignore_whitespace=ign_whitespace_lcl, context=context_lcl) |
|
238 | 238 | |
|
239 | 239 | # diff_limit will cut off the whole diff if the limit is applied |
|
240 | 240 | # otherwise it will just hide the big files from the front-end |
|
241 | 241 | diff_limit = self.cut_off_limit_diff |
|
242 | 242 | file_limit = self.cut_off_limit_file |
|
243 | 243 | |
|
244 | 244 | diff_processor = diffs.DiffProcessor( |
|
245 | 245 | _diff, format='newdiff', diff_limit=diff_limit, |
|
246 | 246 | file_limit=file_limit, show_full_diff=fulldiff) |
|
247 | 247 | commit_changes = OrderedDict() |
|
248 | 248 | if method == 'show': |
|
249 | 249 | _parsed = diff_processor.prepare() |
|
250 | 250 | c.limited_diff = isinstance(_parsed, diffs.LimitedDiffContainer) |
|
251 | 251 | |
|
252 | 252 | _parsed = diff_processor.prepare() |
|
253 | 253 | |
|
254 | 254 | def _node_getter(commit): |
|
255 | 255 | def get_node(fname): |
|
256 | 256 | try: |
|
257 | 257 | return commit.get_node(fname) |
|
258 | 258 | except NodeDoesNotExistError: |
|
259 | 259 | return None |
|
260 | 260 | return get_node |
|
261 | 261 | |
|
262 | 262 | diffset = codeblocks.DiffSet( |
|
263 | repo_name=c.repo_name, | |
|
263 | 264 | source_node_getter=_node_getter(commit1), |
|
264 | 265 | target_node_getter=_node_getter(commit2), |
|
265 | 266 | ).render_patchset(_parsed, commit1.raw_id, commit2.raw_id) |
|
266 | 267 | c.changes[commit.raw_id] = diffset |
|
267 | 268 | else: |
|
268 | 269 | # downloads/raw we only need RAW diff nothing else |
|
269 | 270 | diff = diff_processor.as_raw() |
|
270 | 271 | c.changes[commit.raw_id] = [None, None, None, None, diff, None, None] |
|
271 | 272 | |
|
272 | 273 | # sort comments by how they were generated |
|
273 | 274 | c.comments = sorted(c.comments, key=lambda x: x.comment_id) |
|
274 | 275 | |
|
275 | 276 | # count inline comments |
|
276 | 277 | for __, lines in c.inline_comments: |
|
277 | 278 | for comments in lines.values(): |
|
278 | 279 | c.inline_cnt += len(comments) |
|
279 | 280 | |
|
280 | 281 | if len(c.commit_ranges) == 1: |
|
281 | 282 | c.commit = c.commit_ranges[0] |
|
282 | 283 | c.parent_tmpl = ''.join( |
|
283 | 284 | '# Parent %s\n' % x.raw_id for x in c.commit.parents) |
|
284 | 285 | if method == 'download': |
|
285 | 286 | response.content_type = 'text/plain' |
|
286 | 287 | response.content_disposition = ( |
|
287 | 288 | 'attachment; filename=%s.diff' % commit_id_range[:12]) |
|
288 | 289 | return diff |
|
289 | 290 | elif method == 'patch': |
|
290 | 291 | response.content_type = 'text/plain' |
|
291 | 292 | c.diff = safe_unicode(diff) |
|
292 | 293 | return render('changeset/patch_changeset.html') |
|
293 | 294 | elif method == 'raw': |
|
294 | 295 | response.content_type = 'text/plain' |
|
295 | 296 | return diff |
|
296 | 297 | elif method == 'show': |
|
297 | 298 | if len(c.commit_ranges) == 1: |
|
298 | 299 | return render('changeset/changeset.html') |
|
299 | 300 | else: |
|
300 | 301 | c.ancestor = None |
|
301 | 302 | c.target_repo = c.rhodecode_db_repo |
|
302 | 303 | return render('changeset/changeset_range.html') |
|
303 | 304 | |
|
304 | 305 | @LoginRequired() |
|
305 | 306 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', |
|
306 | 307 | 'repository.admin') |
|
307 | 308 | def index(self, revision, method='show'): |
|
308 | 309 | return self._index(revision, method=method) |
|
309 | 310 | |
|
310 | 311 | @LoginRequired() |
|
311 | 312 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', |
|
312 | 313 | 'repository.admin') |
|
313 | 314 | def changeset_raw(self, revision): |
|
314 | 315 | return self._index(revision, method='raw') |
|
315 | 316 | |
|
316 | 317 | @LoginRequired() |
|
317 | 318 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', |
|
318 | 319 | 'repository.admin') |
|
319 | 320 | def changeset_patch(self, revision): |
|
320 | 321 | return self._index(revision, method='patch') |
|
321 | 322 | |
|
322 | 323 | @LoginRequired() |
|
323 | 324 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', |
|
324 | 325 | 'repository.admin') |
|
325 | 326 | def changeset_download(self, revision): |
|
326 | 327 | return self._index(revision, method='download') |
|
327 | 328 | |
|
328 | 329 | @LoginRequired() |
|
329 | 330 | @NotAnonymous() |
|
330 | 331 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', |
|
331 | 332 | 'repository.admin') |
|
332 | 333 | @auth.CSRFRequired() |
|
333 | 334 | @jsonify |
|
334 | 335 | def comment(self, repo_name, revision): |
|
335 | 336 | commit_id = revision |
|
336 | 337 | status = request.POST.get('changeset_status', None) |
|
337 | 338 | text = request.POST.get('text') |
|
338 | 339 | if status: |
|
339 | 340 | text = text or (_('Status change %(transition_icon)s %(status)s') |
|
340 | 341 | % {'transition_icon': '>', |
|
341 | 342 | 'status': ChangesetStatus.get_status_lbl(status)}) |
|
342 | 343 | |
|
343 | 344 | multi_commit_ids = filter( |
|
344 | 345 | lambda s: s not in ['', None], |
|
345 | 346 | request.POST.get('commit_ids', '').split(','),) |
|
346 | 347 | |
|
347 | 348 | commit_ids = multi_commit_ids or [commit_id] |
|
348 | 349 | comment = None |
|
349 | 350 | for current_id in filter(None, commit_ids): |
|
350 | 351 | c.co = comment = ChangesetCommentsModel().create( |
|
351 | 352 | text=text, |
|
352 | 353 | repo=c.rhodecode_db_repo.repo_id, |
|
353 | 354 | user=c.rhodecode_user.user_id, |
|
354 | 355 | revision=current_id, |
|
355 | 356 | f_path=request.POST.get('f_path'), |
|
356 | 357 | line_no=request.POST.get('line'), |
|
357 | 358 | status_change=(ChangesetStatus.get_status_lbl(status) |
|
358 | 359 | if status else None), |
|
359 | 360 | status_change_type=status |
|
360 | 361 | ) |
|
361 | 362 | # get status if set ! |
|
362 | 363 | if status: |
|
363 | 364 | # if latest status was from pull request and it's closed |
|
364 | 365 | # disallow changing status ! |
|
365 | 366 | # dont_allow_on_closed_pull_request = True ! |
|
366 | 367 | |
|
367 | 368 | try: |
|
368 | 369 | ChangesetStatusModel().set_status( |
|
369 | 370 | c.rhodecode_db_repo.repo_id, |
|
370 | 371 | status, |
|
371 | 372 | c.rhodecode_user.user_id, |
|
372 | 373 | comment, |
|
373 | 374 | revision=current_id, |
|
374 | 375 | dont_allow_on_closed_pull_request=True |
|
375 | 376 | ) |
|
376 | 377 | except StatusChangeOnClosedPullRequestError: |
|
377 | 378 | msg = _('Changing the status of a commit associated with ' |
|
378 | 379 | 'a closed pull request is not allowed') |
|
379 | 380 | log.exception(msg) |
|
380 | 381 | h.flash(msg, category='warning') |
|
381 | 382 | return redirect(h.url( |
|
382 | 383 | 'changeset_home', repo_name=repo_name, |
|
383 | 384 | revision=current_id)) |
|
384 | 385 | |
|
385 | 386 | # finalize, commit and redirect |
|
386 | 387 | Session().commit() |
|
387 | 388 | |
|
388 | 389 | data = { |
|
389 | 390 | 'target_id': h.safeid(h.safe_unicode(request.POST.get('f_path'))), |
|
390 | 391 | } |
|
391 | 392 | if comment: |
|
392 | 393 | data.update(comment.get_dict()) |
|
393 | 394 | data.update({'rendered_text': |
|
394 | 395 | render('changeset/changeset_comment_block.html')}) |
|
395 | 396 | |
|
396 | 397 | return data |
|
397 | 398 | |
|
398 | 399 | @LoginRequired() |
|
399 | 400 | @NotAnonymous() |
|
400 | 401 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', |
|
401 | 402 | 'repository.admin') |
|
402 | 403 | @auth.CSRFRequired() |
|
403 | 404 | def preview_comment(self): |
|
404 | 405 | # Technically a CSRF token is not needed as no state changes with this |
|
405 | 406 | # call. However, as this is a POST is better to have it, so automated |
|
406 | 407 | # tools don't flag it as potential CSRF. |
|
407 | 408 | # Post is required because the payload could be bigger than the maximum |
|
408 | 409 | # allowed by GET. |
|
409 | 410 | if not request.environ.get('HTTP_X_PARTIAL_XHR'): |
|
410 | 411 | raise HTTPBadRequest() |
|
411 | 412 | text = request.POST.get('text') |
|
412 | 413 | renderer = request.POST.get('renderer') or 'rst' |
|
413 | 414 | if text: |
|
414 | 415 | return h.render(text, renderer=renderer, mentions=True) |
|
415 | 416 | return '' |
|
416 | 417 | |
|
417 | 418 | @LoginRequired() |
|
418 | 419 | @NotAnonymous() |
|
419 | 420 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', |
|
420 | 421 | 'repository.admin') |
|
421 | 422 | @auth.CSRFRequired() |
|
422 | 423 | @jsonify |
|
423 | 424 | def delete_comment(self, repo_name, comment_id): |
|
424 | 425 | comment = ChangesetComment.get(comment_id) |
|
425 | 426 | owner = (comment.author.user_id == c.rhodecode_user.user_id) |
|
426 | 427 | is_repo_admin = h.HasRepoPermissionAny('repository.admin')(c.repo_name) |
|
427 | 428 | if h.HasPermissionAny('hg.admin')() or is_repo_admin or owner: |
|
428 | 429 | ChangesetCommentsModel().delete(comment=comment) |
|
429 | 430 | Session().commit() |
|
430 | 431 | return True |
|
431 | 432 | else: |
|
432 | 433 | raise HTTPForbidden() |
|
433 | 434 | |
|
434 | 435 | @LoginRequired() |
|
435 | 436 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', |
|
436 | 437 | 'repository.admin') |
|
437 | 438 | @jsonify |
|
438 | 439 | def changeset_info(self, repo_name, revision): |
|
439 | 440 | if request.is_xhr: |
|
440 | 441 | try: |
|
441 | 442 | return c.rhodecode_repo.get_commit(commit_id=revision) |
|
442 | 443 | except CommitDoesNotExistError as e: |
|
443 | 444 | return EmptyCommit(message=str(e)) |
|
444 | 445 | else: |
|
445 | 446 | raise HTTPBadRequest() |
|
446 | 447 | |
|
447 | 448 | @LoginRequired() |
|
448 | 449 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', |
|
449 | 450 | 'repository.admin') |
|
450 | 451 | @jsonify |
|
451 | 452 | def changeset_children(self, repo_name, revision): |
|
452 | 453 | if request.is_xhr: |
|
453 | 454 | commit = c.rhodecode_repo.get_commit(commit_id=revision) |
|
454 | 455 | result = {"results": commit.children} |
|
455 | 456 | return result |
|
456 | 457 | else: |
|
457 | 458 | raise HTTPBadRequest() |
|
458 | 459 | |
|
459 | 460 | @LoginRequired() |
|
460 | 461 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', |
|
461 | 462 | 'repository.admin') |
|
462 | 463 | @jsonify |
|
463 | 464 | def changeset_parents(self, repo_name, revision): |
|
464 | 465 | if request.is_xhr: |
|
465 | 466 | commit = c.rhodecode_repo.get_commit(commit_id=revision) |
|
466 | 467 | result = {"results": commit.parents} |
|
467 | 468 | return result |
|
468 | 469 | else: |
|
469 | 470 | raise HTTPBadRequest() |
@@ -1,263 +1,264 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2012-2016 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | """ |
|
22 | 22 | Compare controller for showing differences between two commits/refs/tags etc. |
|
23 | 23 | """ |
|
24 | 24 | |
|
25 | 25 | import logging |
|
26 | 26 | |
|
27 | 27 | from webob.exc import HTTPBadRequest |
|
28 | 28 | from pylons import request, tmpl_context as c, url |
|
29 | 29 | from pylons.controllers.util import redirect |
|
30 | 30 | from pylons.i18n.translation import _ |
|
31 | 31 | |
|
32 | 32 | from rhodecode.controllers.utils import parse_path_ref, get_commit_from_ref_name |
|
33 | 33 | from rhodecode.lib import helpers as h |
|
34 | 34 | from rhodecode.lib import diffs, codeblocks |
|
35 | 35 | from rhodecode.lib.auth import LoginRequired, HasRepoPermissionAnyDecorator |
|
36 | 36 | from rhodecode.lib.base import BaseRepoController, render |
|
37 | 37 | from rhodecode.lib.utils import safe_str |
|
38 | 38 | from rhodecode.lib.utils2 import safe_unicode, str2bool |
|
39 | 39 | from rhodecode.lib.vcs.exceptions import ( |
|
40 | 40 | EmptyRepositoryError, RepositoryError, RepositoryRequirementError, |
|
41 | 41 | NodeDoesNotExistError) |
|
42 | 42 | from rhodecode.model.db import Repository, ChangesetStatus |
|
43 | 43 | |
|
44 | 44 | log = logging.getLogger(__name__) |
|
45 | 45 | |
|
46 | 46 | |
|
47 | 47 | class CompareController(BaseRepoController): |
|
48 | 48 | |
|
49 | 49 | def __before__(self): |
|
50 | 50 | super(CompareController, self).__before__() |
|
51 | 51 | |
|
52 | 52 | def _get_commit_or_redirect( |
|
53 | 53 | self, ref, ref_type, repo, redirect_after=True, partial=False): |
|
54 | 54 | """ |
|
55 | 55 | This is a safe way to get a commit. If an error occurs it |
|
56 | 56 | redirects to a commit with a proper message. If partial is set |
|
57 | 57 | then it does not do redirect raise and throws an exception instead. |
|
58 | 58 | """ |
|
59 | 59 | try: |
|
60 | 60 | return get_commit_from_ref_name(repo, safe_str(ref), ref_type) |
|
61 | 61 | except EmptyRepositoryError: |
|
62 | 62 | if not redirect_after: |
|
63 | 63 | return repo.scm_instance().EMPTY_COMMIT |
|
64 | 64 | h.flash(h.literal(_('There are no commits yet')), |
|
65 | 65 | category='warning') |
|
66 | 66 | redirect(url('summary_home', repo_name=repo.repo_name)) |
|
67 | 67 | |
|
68 | 68 | except RepositoryError as e: |
|
69 | 69 | msg = safe_str(e) |
|
70 | 70 | log.exception(msg) |
|
71 | 71 | h.flash(msg, category='warning') |
|
72 | 72 | if not partial: |
|
73 | 73 | redirect(h.url('summary_home', repo_name=repo.repo_name)) |
|
74 | 74 | raise HTTPBadRequest() |
|
75 | 75 | |
|
76 | 76 | @LoginRequired() |
|
77 | 77 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', |
|
78 | 78 | 'repository.admin') |
|
79 | 79 | def index(self, repo_name): |
|
80 | 80 | c.compare_home = True |
|
81 | 81 | c.commit_ranges = [] |
|
82 | 82 | c.diffset = None |
|
83 | 83 | c.limited_diff = False |
|
84 | 84 | source_repo = c.rhodecode_db_repo.repo_name |
|
85 | 85 | target_repo = request.GET.get('target_repo', source_repo) |
|
86 | 86 | c.source_repo = Repository.get_by_repo_name(source_repo) |
|
87 | 87 | c.target_repo = Repository.get_by_repo_name(target_repo) |
|
88 | 88 | c.source_ref = c.target_ref = _('Select commit') |
|
89 | 89 | c.source_ref_type = "" |
|
90 | 90 | c.target_ref_type = "" |
|
91 | 91 | c.commit_statuses = ChangesetStatus.STATUSES |
|
92 | 92 | c.preview_mode = False |
|
93 | 93 | return render('compare/compare_diff.html') |
|
94 | 94 | |
|
95 | 95 | @LoginRequired() |
|
96 | 96 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', |
|
97 | 97 | 'repository.admin') |
|
98 | 98 | def compare(self, repo_name, source_ref_type, source_ref, |
|
99 | 99 | target_ref_type, target_ref): |
|
100 | 100 | # source_ref will be evaluated in source_repo |
|
101 | 101 | source_repo_name = c.rhodecode_db_repo.repo_name |
|
102 | 102 | source_path, source_id = parse_path_ref(source_ref) |
|
103 | 103 | |
|
104 | 104 | # target_ref will be evaluated in target_repo |
|
105 | 105 | target_repo_name = request.GET.get('target_repo', source_repo_name) |
|
106 | 106 | target_path, target_id = parse_path_ref(target_ref) |
|
107 | 107 | |
|
108 | 108 | c.commit_statuses = ChangesetStatus.STATUSES |
|
109 | 109 | |
|
110 | 110 | # if merge is True |
|
111 | 111 | # Show what changes since the shared ancestor commit of target/source |
|
112 | 112 | # the source would get if it was merged with target. Only commits |
|
113 | 113 | # which are in target but not in source will be shown. |
|
114 | 114 | merge = str2bool(request.GET.get('merge')) |
|
115 | 115 | # if merge is False |
|
116 | 116 | # Show a raw diff of source/target refs even if no ancestor exists |
|
117 | 117 | |
|
118 | 118 | |
|
119 | 119 | # c.fulldiff disables cut_off_limit |
|
120 | 120 | c.fulldiff = str2bool(request.GET.get('fulldiff')) |
|
121 | 121 | |
|
122 | 122 | # if partial, returns just compare_commits.html (commits log) |
|
123 | 123 | partial = request.is_xhr |
|
124 | 124 | |
|
125 | 125 | # swap url for compare_diff page |
|
126 | 126 | c.swap_url = h.url( |
|
127 | 127 | 'compare_url', |
|
128 | 128 | repo_name=target_repo_name, |
|
129 | 129 | source_ref_type=target_ref_type, |
|
130 | 130 | source_ref=target_ref, |
|
131 | 131 | target_repo=source_repo_name, |
|
132 | 132 | target_ref_type=source_ref_type, |
|
133 | 133 | target_ref=source_ref, |
|
134 | 134 | merge=merge and '1' or '') |
|
135 | 135 | |
|
136 | 136 | source_repo = Repository.get_by_repo_name(source_repo_name) |
|
137 | 137 | target_repo = Repository.get_by_repo_name(target_repo_name) |
|
138 | 138 | |
|
139 | 139 | if source_repo is None: |
|
140 | 140 | msg = _('Could not find the original repo: %(repo)s') % { |
|
141 | 141 | 'repo': source_repo} |
|
142 | 142 | |
|
143 | 143 | log.error(msg) |
|
144 | 144 | h.flash(msg, category='error') |
|
145 | 145 | return redirect(url('compare_home', repo_name=c.repo_name)) |
|
146 | 146 | |
|
147 | 147 | if target_repo is None: |
|
148 | 148 | msg = _('Could not find the other repo: %(repo)s') % { |
|
149 | 149 | 'repo': target_repo_name} |
|
150 | 150 | log.error(msg) |
|
151 | 151 | h.flash(msg, category='error') |
|
152 | 152 | return redirect(url('compare_home', repo_name=c.repo_name)) |
|
153 | 153 | |
|
154 | 154 | source_alias = source_repo.scm_instance().alias |
|
155 | 155 | target_alias = target_repo.scm_instance().alias |
|
156 | 156 | if source_alias != target_alias: |
|
157 | 157 | msg = _('The comparison of two different kinds of remote repos ' |
|
158 | 158 | 'is not available') |
|
159 | 159 | log.error(msg) |
|
160 | 160 | h.flash(msg, category='error') |
|
161 | 161 | return redirect(url('compare_home', repo_name=c.repo_name)) |
|
162 | 162 | |
|
163 | 163 | source_commit = self._get_commit_or_redirect( |
|
164 | 164 | ref=source_id, ref_type=source_ref_type, repo=source_repo, |
|
165 | 165 | partial=partial) |
|
166 | 166 | target_commit = self._get_commit_or_redirect( |
|
167 | 167 | ref=target_id, ref_type=target_ref_type, repo=target_repo, |
|
168 | 168 | partial=partial) |
|
169 | 169 | |
|
170 | 170 | c.compare_home = False |
|
171 | 171 | c.source_repo = source_repo |
|
172 | 172 | c.target_repo = target_repo |
|
173 | 173 | c.source_ref = source_ref |
|
174 | 174 | c.target_ref = target_ref |
|
175 | 175 | c.source_ref_type = source_ref_type |
|
176 | 176 | c.target_ref_type = target_ref_type |
|
177 | 177 | |
|
178 | 178 | source_scm = source_repo.scm_instance() |
|
179 | 179 | target_scm = target_repo.scm_instance() |
|
180 | 180 | |
|
181 | 181 | pre_load = ["author", "branch", "date", "message"] |
|
182 | 182 | c.ancestor = None |
|
183 | 183 | try: |
|
184 | 184 | c.commit_ranges = source_scm.compare( |
|
185 | 185 | source_commit.raw_id, target_commit.raw_id, |
|
186 | 186 | target_scm, merge, pre_load=pre_load) |
|
187 | 187 | if merge: |
|
188 | 188 | c.ancestor = source_scm.get_common_ancestor( |
|
189 | 189 | source_commit.raw_id, target_commit.raw_id, target_scm) |
|
190 | 190 | except RepositoryRequirementError: |
|
191 | 191 | msg = _('Could not compare repos with different ' |
|
192 | 192 | 'large file settings') |
|
193 | 193 | log.error(msg) |
|
194 | 194 | if partial: |
|
195 | 195 | return msg |
|
196 | 196 | h.flash(msg, category='error') |
|
197 | 197 | return redirect(url('compare_home', repo_name=c.repo_name)) |
|
198 | 198 | |
|
199 | 199 | c.statuses = c.rhodecode_db_repo.statuses( |
|
200 | 200 | [x.raw_id for x in c.commit_ranges]) |
|
201 | 201 | |
|
202 | 202 | if partial: # for PR ajax commits loader |
|
203 | 203 | if not c.ancestor: |
|
204 | 204 | return '' # cannot merge if there is no ancestor |
|
205 | 205 | return render('compare/compare_commits.html') |
|
206 | 206 | |
|
207 | 207 | if c.ancestor: |
|
208 | 208 | # case we want a simple diff without incoming commits, |
|
209 | 209 | # previewing what will be merged. |
|
210 | 210 | # Make the diff on target repo (which is known to have target_ref) |
|
211 | 211 | log.debug('Using ancestor %s as source_ref instead of %s' |
|
212 | 212 | % (c.ancestor, source_ref)) |
|
213 | 213 | source_repo = target_repo |
|
214 | 214 | source_commit = target_repo.get_commit(commit_id=c.ancestor) |
|
215 | 215 | |
|
216 | 216 | # diff_limit will cut off the whole diff if the limit is applied |
|
217 | 217 | # otherwise it will just hide the big files from the front-end |
|
218 | 218 | diff_limit = self.cut_off_limit_diff |
|
219 | 219 | file_limit = self.cut_off_limit_file |
|
220 | 220 | |
|
221 | 221 | log.debug('calculating diff between ' |
|
222 | 222 | 'source_ref:%s and target_ref:%s for repo `%s`', |
|
223 | 223 | source_commit, target_commit, |
|
224 | 224 | safe_unicode(source_repo.scm_instance().path)) |
|
225 | 225 | |
|
226 | 226 | if source_commit.repository != target_commit.repository: |
|
227 | 227 | msg = _( |
|
228 | 228 | "Repositories unrelated. " |
|
229 | 229 | "Cannot compare commit %(commit1)s from repository %(repo1)s " |
|
230 | 230 | "with commit %(commit2)s from repository %(repo2)s.") % { |
|
231 | 231 | 'commit1': h.show_id(source_commit), |
|
232 | 232 | 'repo1': source_repo.repo_name, |
|
233 | 233 | 'commit2': h.show_id(target_commit), |
|
234 | 234 | 'repo2': target_repo.repo_name, |
|
235 | 235 | } |
|
236 | 236 | h.flash(msg, category='error') |
|
237 | 237 | raise HTTPBadRequest() |
|
238 | 238 | |
|
239 | 239 | txtdiff = source_repo.scm_instance().get_diff( |
|
240 | 240 | commit1=source_commit, commit2=target_commit, |
|
241 | 241 | path1=source_path, path=target_path) |
|
242 | 242 | diff_processor = diffs.DiffProcessor( |
|
243 | 243 | txtdiff, format='newdiff', diff_limit=diff_limit, |
|
244 | 244 | file_limit=file_limit, show_full_diff=c.fulldiff) |
|
245 | 245 | _parsed = diff_processor.prepare() |
|
246 | 246 | |
|
247 | 247 | def _node_getter(commit): |
|
248 | 248 | """ Returns a function that returns a node for a commit or None """ |
|
249 | 249 | def get_node(fname): |
|
250 | 250 | try: |
|
251 | 251 | return commit.get_node(fname) |
|
252 | 252 | except NodeDoesNotExistError: |
|
253 | 253 | return None |
|
254 | 254 | return get_node |
|
255 | 255 | |
|
256 | 256 | c.diffset = codeblocks.DiffSet( |
|
257 | repo_name=source_repo.repo_name, | |
|
257 | 258 | source_node_getter=_node_getter(source_commit), |
|
258 | 259 | target_node_getter=_node_getter(target_commit), |
|
259 | 260 | ).render_patchset(_parsed, source_ref, target_ref) |
|
260 | 261 | |
|
261 | 262 | c.preview_mode = merge |
|
262 | 263 | |
|
263 | 264 | return render('compare/compare_diff.html') |
@@ -1,641 +1,642 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2011-2016 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | import logging |
|
22 | 22 | import difflib |
|
23 | 23 | from itertools import groupby |
|
24 | 24 | |
|
25 | 25 | from pygments import lex |
|
26 | 26 | from pygments.formatters.html import _get_ttype_class as pygment_token_class |
|
27 | 27 | from rhodecode.lib.helpers import ( |
|
28 | 28 | get_lexer_for_filenode, get_lexer_safe, html_escape) |
|
29 | 29 | from rhodecode.lib.utils2 import AttributeDict |
|
30 | 30 | from rhodecode.lib.vcs.nodes import FileNode |
|
31 | 31 | from rhodecode.lib.diff_match_patch import diff_match_patch |
|
32 | 32 | from rhodecode.lib.diffs import LimitedDiffContainer |
|
33 | 33 | from pygments.lexers import get_lexer_by_name |
|
34 | 34 | |
|
35 | 35 | plain_text_lexer = get_lexer_by_name( |
|
36 | 36 | 'text', stripall=False, stripnl=False, ensurenl=False) |
|
37 | 37 | |
|
38 | 38 | |
|
39 | 39 | log = logging.getLogger() |
|
40 | 40 | |
|
41 | 41 | |
|
42 | 42 | def filenode_as_lines_tokens(filenode, lexer=None): |
|
43 | 43 | lexer = lexer or get_lexer_for_filenode(filenode) |
|
44 | 44 | log.debug('Generating file node pygment tokens for %s, %s', lexer, filenode) |
|
45 | 45 | tokens = tokenize_string(filenode.content, lexer) |
|
46 | 46 | lines = split_token_stream(tokens, split_string='\n') |
|
47 | 47 | rv = list(lines) |
|
48 | 48 | return rv |
|
49 | 49 | |
|
50 | 50 | |
|
51 | 51 | def tokenize_string(content, lexer): |
|
52 | 52 | """ |
|
53 | 53 | Use pygments to tokenize some content based on a lexer |
|
54 | 54 | ensuring all original new lines and whitespace is preserved |
|
55 | 55 | """ |
|
56 | 56 | |
|
57 | 57 | lexer.stripall = False |
|
58 | 58 | lexer.stripnl = False |
|
59 | 59 | lexer.ensurenl = False |
|
60 | 60 | for token_type, token_text in lex(content, lexer): |
|
61 | 61 | yield pygment_token_class(token_type), token_text |
|
62 | 62 | |
|
63 | 63 | |
|
64 | 64 | def split_token_stream(tokens, split_string=u'\n'): |
|
65 | 65 | """ |
|
66 | 66 | Take a list of (TokenType, text) tuples and split them by a string |
|
67 | 67 | |
|
68 | 68 | >>> split_token_stream([(TEXT, 'some\ntext'), (TEXT, 'more\n')]) |
|
69 | 69 | [(TEXT, 'some'), (TEXT, 'text'), |
|
70 | 70 | (TEXT, 'more'), (TEXT, 'text')] |
|
71 | 71 | """ |
|
72 | 72 | |
|
73 | 73 | buffer = [] |
|
74 | 74 | for token_class, token_text in tokens: |
|
75 | 75 | parts = token_text.split(split_string) |
|
76 | 76 | for part in parts[:-1]: |
|
77 | 77 | buffer.append((token_class, part)) |
|
78 | 78 | yield buffer |
|
79 | 79 | buffer = [] |
|
80 | 80 | |
|
81 | 81 | buffer.append((token_class, parts[-1])) |
|
82 | 82 | |
|
83 | 83 | if buffer: |
|
84 | 84 | yield buffer |
|
85 | 85 | |
|
86 | 86 | |
|
87 | 87 | def filenode_as_annotated_lines_tokens(filenode): |
|
88 | 88 | """ |
|
89 | 89 | Take a file node and return a list of annotations => lines, if no annotation |
|
90 | 90 | is found, it will be None. |
|
91 | 91 | |
|
92 | 92 | eg: |
|
93 | 93 | |
|
94 | 94 | [ |
|
95 | 95 | (annotation1, [ |
|
96 | 96 | (1, line1_tokens_list), |
|
97 | 97 | (2, line2_tokens_list), |
|
98 | 98 | ]), |
|
99 | 99 | (annotation2, [ |
|
100 | 100 | (3, line1_tokens_list), |
|
101 | 101 | ]), |
|
102 | 102 | (None, [ |
|
103 | 103 | (4, line1_tokens_list), |
|
104 | 104 | ]), |
|
105 | 105 | (annotation1, [ |
|
106 | 106 | (5, line1_tokens_list), |
|
107 | 107 | (6, line2_tokens_list), |
|
108 | 108 | ]) |
|
109 | 109 | ] |
|
110 | 110 | """ |
|
111 | 111 | |
|
112 | 112 | commit_cache = {} # cache commit_getter lookups |
|
113 | 113 | |
|
114 | 114 | def _get_annotation(commit_id, commit_getter): |
|
115 | 115 | if commit_id not in commit_cache: |
|
116 | 116 | commit_cache[commit_id] = commit_getter() |
|
117 | 117 | return commit_cache[commit_id] |
|
118 | 118 | |
|
119 | 119 | annotation_lookup = { |
|
120 | 120 | line_no: _get_annotation(commit_id, commit_getter) |
|
121 | 121 | for line_no, commit_id, commit_getter, line_content |
|
122 | 122 | in filenode.annotate |
|
123 | 123 | } |
|
124 | 124 | |
|
125 | 125 | annotations_lines = ((annotation_lookup.get(line_no), line_no, tokens) |
|
126 | 126 | for line_no, tokens |
|
127 | 127 | in enumerate(filenode_as_lines_tokens(filenode), 1)) |
|
128 | 128 | |
|
129 | 129 | grouped_annotations_lines = groupby(annotations_lines, lambda x: x[0]) |
|
130 | 130 | |
|
131 | 131 | for annotation, group in grouped_annotations_lines: |
|
132 | 132 | yield ( |
|
133 | 133 | annotation, [(line_no, tokens) |
|
134 | 134 | for (_, line_no, tokens) in group] |
|
135 | 135 | ) |
|
136 | 136 | |
|
137 | 137 | |
|
138 | 138 | def render_tokenstream(tokenstream): |
|
139 | 139 | result = [] |
|
140 | 140 | for token_class, token_ops_texts in rollup_tokenstream(tokenstream): |
|
141 | 141 | |
|
142 | 142 | if token_class: |
|
143 | 143 | result.append(u'<span class="%s">' % token_class) |
|
144 | 144 | else: |
|
145 | 145 | result.append(u'<span>') |
|
146 | 146 | |
|
147 | 147 | for op_tag, token_text in token_ops_texts: |
|
148 | 148 | |
|
149 | 149 | if op_tag: |
|
150 | 150 | result.append(u'<%s>' % op_tag) |
|
151 | 151 | |
|
152 | 152 | escaped_text = html_escape(token_text) |
|
153 | 153 | |
|
154 | 154 | # TODO: dan: investigate showing hidden characters like space/nl/tab |
|
155 | 155 | # escaped_text = escaped_text.replace(' ', '<sp> </sp>') |
|
156 | 156 | # escaped_text = escaped_text.replace('\n', '<nl>\n</nl>') |
|
157 | 157 | # escaped_text = escaped_text.replace('\t', '<tab>\t</tab>') |
|
158 | 158 | |
|
159 | 159 | result.append(escaped_text) |
|
160 | 160 | |
|
161 | 161 | if op_tag: |
|
162 | 162 | result.append(u'</%s>' % op_tag) |
|
163 | 163 | |
|
164 | 164 | result.append(u'</span>') |
|
165 | 165 | |
|
166 | 166 | html = ''.join(result) |
|
167 | 167 | return html |
|
168 | 168 | |
|
169 | 169 | |
|
170 | 170 | def rollup_tokenstream(tokenstream): |
|
171 | 171 | """ |
|
172 | 172 | Group a token stream of the format: |
|
173 | 173 | |
|
174 | 174 | ('class', 'op', 'text') |
|
175 | 175 | or |
|
176 | 176 | ('class', 'text') |
|
177 | 177 | |
|
178 | 178 | into |
|
179 | 179 | |
|
180 | 180 | [('class1', |
|
181 | 181 | [('op1', 'text'), |
|
182 | 182 | ('op2', 'text')]), |
|
183 | 183 | ('class2', |
|
184 | 184 | [('op3', 'text')])] |
|
185 | 185 | |
|
186 | 186 | This is used to get the minimal tags necessary when |
|
187 | 187 | rendering to html eg for a token stream ie. |
|
188 | 188 | |
|
189 | 189 | <span class="A"><ins>he</ins>llo</span> |
|
190 | 190 | vs |
|
191 | 191 | <span class="A"><ins>he</ins></span><span class="A">llo</span> |
|
192 | 192 | |
|
193 | 193 | If a 2 tuple is passed in, the output op will be an empty string. |
|
194 | 194 | |
|
195 | 195 | eg: |
|
196 | 196 | |
|
197 | 197 | >>> rollup_tokenstream([('classA', '', 'h'), |
|
198 | 198 | ('classA', 'del', 'ell'), |
|
199 | 199 | ('classA', '', 'o'), |
|
200 | 200 | ('classB', '', ' '), |
|
201 | 201 | ('classA', '', 'the'), |
|
202 | 202 | ('classA', '', 're'), |
|
203 | 203 | ]) |
|
204 | 204 | |
|
205 | 205 | [('classA', [('', 'h'), ('del', 'ell'), ('', 'o')], |
|
206 | 206 | ('classB', [('', ' ')], |
|
207 | 207 | ('classA', [('', 'there')]] |
|
208 | 208 | |
|
209 | 209 | """ |
|
210 | 210 | if tokenstream and len(tokenstream[0]) == 2: |
|
211 | 211 | tokenstream = ((t[0], '', t[1]) for t in tokenstream) |
|
212 | 212 | |
|
213 | 213 | result = [] |
|
214 | 214 | for token_class, op_list in groupby(tokenstream, lambda t: t[0]): |
|
215 | 215 | ops = [] |
|
216 | 216 | for token_op, token_text_list in groupby(op_list, lambda o: o[1]): |
|
217 | 217 | text_buffer = [] |
|
218 | 218 | for t_class, t_op, t_text in token_text_list: |
|
219 | 219 | text_buffer.append(t_text) |
|
220 | 220 | ops.append((token_op, ''.join(text_buffer))) |
|
221 | 221 | result.append((token_class, ops)) |
|
222 | 222 | return result |
|
223 | 223 | |
|
224 | 224 | |
|
225 | 225 | def tokens_diff(old_tokens, new_tokens, use_diff_match_patch=True): |
|
226 | 226 | """ |
|
227 | 227 | Converts a list of (token_class, token_text) tuples to a list of |
|
228 | 228 | (token_class, token_op, token_text) tuples where token_op is one of |
|
229 | 229 | ('ins', 'del', '') |
|
230 | 230 | |
|
231 | 231 | :param old_tokens: list of (token_class, token_text) tuples of old line |
|
232 | 232 | :param new_tokens: list of (token_class, token_text) tuples of new line |
|
233 | 233 | :param use_diff_match_patch: boolean, will use google's diff match patch |
|
234 | 234 | library which has options to 'smooth' out the character by character |
|
235 | 235 | differences making nicer ins/del blocks |
|
236 | 236 | """ |
|
237 | 237 | |
|
238 | 238 | old_tokens_result = [] |
|
239 | 239 | new_tokens_result = [] |
|
240 | 240 | |
|
241 | 241 | similarity = difflib.SequenceMatcher(None, |
|
242 | 242 | ''.join(token_text for token_class, token_text in old_tokens), |
|
243 | 243 | ''.join(token_text for token_class, token_text in new_tokens) |
|
244 | 244 | ).ratio() |
|
245 | 245 | |
|
246 | 246 | if similarity < 0.6: # return, the blocks are too different |
|
247 | 247 | for token_class, token_text in old_tokens: |
|
248 | 248 | old_tokens_result.append((token_class, '', token_text)) |
|
249 | 249 | for token_class, token_text in new_tokens: |
|
250 | 250 | new_tokens_result.append((token_class, '', token_text)) |
|
251 | 251 | return old_tokens_result, new_tokens_result, similarity |
|
252 | 252 | |
|
253 | 253 | token_sequence_matcher = difflib.SequenceMatcher(None, |
|
254 | 254 | [x[1] for x in old_tokens], |
|
255 | 255 | [x[1] for x in new_tokens]) |
|
256 | 256 | |
|
257 | 257 | for tag, o1, o2, n1, n2 in token_sequence_matcher.get_opcodes(): |
|
258 | 258 | # check the differences by token block types first to give a more |
|
259 | 259 | # nicer "block" level replacement vs character diffs |
|
260 | 260 | |
|
261 | 261 | if tag == 'equal': |
|
262 | 262 | for token_class, token_text in old_tokens[o1:o2]: |
|
263 | 263 | old_tokens_result.append((token_class, '', token_text)) |
|
264 | 264 | for token_class, token_text in new_tokens[n1:n2]: |
|
265 | 265 | new_tokens_result.append((token_class, '', token_text)) |
|
266 | 266 | elif tag == 'delete': |
|
267 | 267 | for token_class, token_text in old_tokens[o1:o2]: |
|
268 | 268 | old_tokens_result.append((token_class, 'del', token_text)) |
|
269 | 269 | elif tag == 'insert': |
|
270 | 270 | for token_class, token_text in new_tokens[n1:n2]: |
|
271 | 271 | new_tokens_result.append((token_class, 'ins', token_text)) |
|
272 | 272 | elif tag == 'replace': |
|
273 | 273 | # if same type token blocks must be replaced, do a diff on the |
|
274 | 274 | # characters in the token blocks to show individual changes |
|
275 | 275 | |
|
276 | 276 | old_char_tokens = [] |
|
277 | 277 | new_char_tokens = [] |
|
278 | 278 | for token_class, token_text in old_tokens[o1:o2]: |
|
279 | 279 | for char in token_text: |
|
280 | 280 | old_char_tokens.append((token_class, char)) |
|
281 | 281 | |
|
282 | 282 | for token_class, token_text in new_tokens[n1:n2]: |
|
283 | 283 | for char in token_text: |
|
284 | 284 | new_char_tokens.append((token_class, char)) |
|
285 | 285 | |
|
286 | 286 | old_string = ''.join([token_text for |
|
287 | 287 | token_class, token_text in old_char_tokens]) |
|
288 | 288 | new_string = ''.join([token_text for |
|
289 | 289 | token_class, token_text in new_char_tokens]) |
|
290 | 290 | |
|
291 | 291 | char_sequence = difflib.SequenceMatcher( |
|
292 | 292 | None, old_string, new_string) |
|
293 | 293 | copcodes = char_sequence.get_opcodes() |
|
294 | 294 | obuffer, nbuffer = [], [] |
|
295 | 295 | |
|
296 | 296 | if use_diff_match_patch: |
|
297 | 297 | dmp = diff_match_patch() |
|
298 | 298 | dmp.Diff_EditCost = 11 # TODO: dan: extract this to a setting |
|
299 | 299 | reps = dmp.diff_main(old_string, new_string) |
|
300 | 300 | dmp.diff_cleanupEfficiency(reps) |
|
301 | 301 | |
|
302 | 302 | a, b = 0, 0 |
|
303 | 303 | for op, rep in reps: |
|
304 | 304 | l = len(rep) |
|
305 | 305 | if op == 0: |
|
306 | 306 | for i, c in enumerate(rep): |
|
307 | 307 | obuffer.append((old_char_tokens[a+i][0], '', c)) |
|
308 | 308 | nbuffer.append((new_char_tokens[b+i][0], '', c)) |
|
309 | 309 | a += l |
|
310 | 310 | b += l |
|
311 | 311 | elif op == -1: |
|
312 | 312 | for i, c in enumerate(rep): |
|
313 | 313 | obuffer.append((old_char_tokens[a+i][0], 'del', c)) |
|
314 | 314 | a += l |
|
315 | 315 | elif op == 1: |
|
316 | 316 | for i, c in enumerate(rep): |
|
317 | 317 | nbuffer.append((new_char_tokens[b+i][0], 'ins', c)) |
|
318 | 318 | b += l |
|
319 | 319 | else: |
|
320 | 320 | for ctag, co1, co2, cn1, cn2 in copcodes: |
|
321 | 321 | if ctag == 'equal': |
|
322 | 322 | for token_class, token_text in old_char_tokens[co1:co2]: |
|
323 | 323 | obuffer.append((token_class, '', token_text)) |
|
324 | 324 | for token_class, token_text in new_char_tokens[cn1:cn2]: |
|
325 | 325 | nbuffer.append((token_class, '', token_text)) |
|
326 | 326 | elif ctag == 'delete': |
|
327 | 327 | for token_class, token_text in old_char_tokens[co1:co2]: |
|
328 | 328 | obuffer.append((token_class, 'del', token_text)) |
|
329 | 329 | elif ctag == 'insert': |
|
330 | 330 | for token_class, token_text in new_char_tokens[cn1:cn2]: |
|
331 | 331 | nbuffer.append((token_class, 'ins', token_text)) |
|
332 | 332 | elif ctag == 'replace': |
|
333 | 333 | for token_class, token_text in old_char_tokens[co1:co2]: |
|
334 | 334 | obuffer.append((token_class, 'del', token_text)) |
|
335 | 335 | for token_class, token_text in new_char_tokens[cn1:cn2]: |
|
336 | 336 | nbuffer.append((token_class, 'ins', token_text)) |
|
337 | 337 | |
|
338 | 338 | old_tokens_result.extend(obuffer) |
|
339 | 339 | new_tokens_result.extend(nbuffer) |
|
340 | 340 | |
|
341 | 341 | return old_tokens_result, new_tokens_result, similarity |
|
342 | 342 | |
|
343 | 343 | |
|
344 | 344 | class DiffSet(object): |
|
345 | 345 | """ |
|
346 | 346 | An object for parsing the diff result from diffs.DiffProcessor and |
|
347 | 347 | adding highlighting, side by side/unified renderings and line diffs |
|
348 | 348 | """ |
|
349 | 349 | |
|
350 | 350 | HL_REAL = 'REAL' # highlights using original file, slow |
|
351 | 351 | HL_FAST = 'FAST' # highlights using just the line, fast but not correct |
|
352 | 352 | # in the case of multiline code |
|
353 | 353 | HL_NONE = 'NONE' # no highlighting, fastest |
|
354 | 354 | |
|
355 | def __init__(self, highlight_mode=HL_REAL, | |
|
355 | def __init__(self, highlight_mode=HL_REAL, repo_name=None, | |
|
356 | 356 | source_node_getter=lambda filename: None, |
|
357 | 357 | target_node_getter=lambda filename: None, |
|
358 | 358 | source_nodes=None, target_nodes=None, |
|
359 | 359 | max_file_size_limit=150 * 1024, # files over this size will |
|
360 | 360 | # use fast highlighting |
|
361 | 361 | ): |
|
362 | 362 | |
|
363 | 363 | self.highlight_mode = highlight_mode |
|
364 | 364 | self.highlighted_filenodes = {} |
|
365 | 365 | self.source_node_getter = source_node_getter |
|
366 | 366 | self.target_node_getter = target_node_getter |
|
367 | 367 | self.source_nodes = source_nodes or {} |
|
368 | 368 | self.target_nodes = target_nodes or {} |
|
369 | ||
|
369 | self.repo_name = repo_name | |
|
370 | 370 | |
|
371 | 371 | self.max_file_size_limit = max_file_size_limit |
|
372 | 372 | |
|
373 | 373 | def render_patchset(self, patchset, source_ref=None, target_ref=None): |
|
374 | 374 | diffset = AttributeDict(dict( |
|
375 | 375 | lines_added=0, |
|
376 | 376 | lines_deleted=0, |
|
377 | 377 | changed_files=0, |
|
378 | 378 | files=[], |
|
379 | 379 | limited_diff=isinstance(patchset, LimitedDiffContainer), |
|
380 | repo_name=self.repo_name, | |
|
380 | 381 | source_ref=source_ref, |
|
381 | 382 | target_ref=target_ref, |
|
382 | 383 | )) |
|
383 | 384 | for patch in patchset: |
|
384 | 385 | filediff = self.render_patch(patch) |
|
385 | 386 | filediff.diffset = diffset |
|
386 | 387 | diffset.files.append(filediff) |
|
387 | 388 | diffset.changed_files += 1 |
|
388 | 389 | if not patch['stats']['binary']: |
|
389 | 390 | diffset.lines_added += patch['stats']['added'] |
|
390 | 391 | diffset.lines_deleted += patch['stats']['deleted'] |
|
391 | 392 | |
|
392 | 393 | return diffset |
|
393 | 394 | |
|
394 | 395 | _lexer_cache = {} |
|
395 | 396 | def _get_lexer_for_filename(self, filename): |
|
396 | 397 | # cached because we might need to call it twice for source/target |
|
397 | 398 | if filename not in self._lexer_cache: |
|
398 | 399 | self._lexer_cache[filename] = get_lexer_safe(filepath=filename) |
|
399 | 400 | return self._lexer_cache[filename] |
|
400 | 401 | |
|
401 | 402 | def render_patch(self, patch): |
|
402 | 403 | log.debug('rendering diff for %r' % patch['filename']) |
|
403 | 404 | |
|
404 | 405 | source_filename = patch['original_filename'] |
|
405 | 406 | target_filename = patch['filename'] |
|
406 | 407 | |
|
407 | 408 | source_lexer = plain_text_lexer |
|
408 | 409 | target_lexer = plain_text_lexer |
|
409 | 410 | |
|
410 | 411 | if not patch['stats']['binary']: |
|
411 | 412 | if self.highlight_mode == self.HL_REAL: |
|
412 | 413 | if (source_filename and patch['operation'] in ('D', 'M') |
|
413 | 414 | and source_filename not in self.source_nodes): |
|
414 | 415 | self.source_nodes[source_filename] = ( |
|
415 | 416 | self.source_node_getter(source_filename)) |
|
416 | 417 | |
|
417 | 418 | if (target_filename and patch['operation'] in ('A', 'M') |
|
418 | 419 | and target_filename not in self.target_nodes): |
|
419 | 420 | self.target_nodes[target_filename] = ( |
|
420 | 421 | self.target_node_getter(target_filename)) |
|
421 | 422 | |
|
422 | 423 | elif self.highlight_mode == self.HL_FAST: |
|
423 | 424 | source_lexer = self._get_lexer_for_filename(source_filename) |
|
424 | 425 | target_lexer = self._get_lexer_for_filename(target_filename) |
|
425 | 426 | |
|
426 | 427 | source_file = self.source_nodes.get(source_filename, source_filename) |
|
427 | 428 | target_file = self.target_nodes.get(target_filename, target_filename) |
|
428 | 429 | |
|
429 | 430 | source_filenode, target_filenode = None, None |
|
430 | 431 | |
|
431 | 432 | # TODO: dan: FileNode.lexer works on the content of the file - which |
|
432 | 433 | # can be slow - issue #4289 explains a lexer clean up - which once |
|
433 | 434 | # done can allow caching a lexer for a filenode to avoid the file lookup |
|
434 | 435 | if isinstance(source_file, FileNode): |
|
435 | 436 | source_filenode = source_file |
|
436 | 437 | source_lexer = source_file.lexer |
|
437 | 438 | if isinstance(target_file, FileNode): |
|
438 | 439 | target_filenode = target_file |
|
439 | 440 | target_lexer = target_file.lexer |
|
440 | 441 | |
|
441 | 442 | source_file_path, target_file_path = None, None |
|
442 | 443 | |
|
443 | 444 | if source_filename != '/dev/null': |
|
444 | 445 | source_file_path = source_filename |
|
445 | 446 | if target_filename != '/dev/null': |
|
446 | 447 | target_file_path = target_filename |
|
447 | 448 | |
|
448 | 449 | source_file_type = source_lexer.name |
|
449 | 450 | target_file_type = target_lexer.name |
|
450 | 451 | |
|
451 | 452 | op_hunks = patch['chunks'][0] |
|
452 | 453 | hunks = patch['chunks'][1:] |
|
453 | 454 | |
|
454 | 455 | filediff = AttributeDict({ |
|
455 | 456 | 'source_file_path': source_file_path, |
|
456 | 457 | 'target_file_path': target_file_path, |
|
457 | 458 | 'source_filenode': source_filenode, |
|
458 | 459 | 'target_filenode': target_filenode, |
|
459 | 460 | 'hunks': [], |
|
460 | 461 | 'source_file_type': target_file_type, |
|
461 | 462 | 'target_file_type': source_file_type, |
|
462 | 463 | 'patch': patch, |
|
463 | 464 | 'source_mode': patch['stats']['old_mode'], |
|
464 | 465 | 'target_mode': patch['stats']['new_mode'], |
|
465 | 466 | 'limited_diff': isinstance(patch, LimitedDiffContainer), |
|
466 | 467 | 'diffset': self, |
|
467 | 468 | }) |
|
468 | 469 | |
|
469 | 470 | for hunk in hunks: |
|
470 | 471 | hunkbit = self.parse_hunk(hunk, source_file, target_file) |
|
471 | 472 | hunkbit.filediff = filediff |
|
472 | 473 | filediff.hunks.append(hunkbit) |
|
473 | 474 | return filediff |
|
474 | 475 | |
|
475 | 476 | def parse_hunk(self, hunk, source_file, target_file): |
|
476 | 477 | result = AttributeDict(dict( |
|
477 | 478 | source_start=hunk['source_start'], |
|
478 | 479 | source_length=hunk['source_length'], |
|
479 | 480 | target_start=hunk['target_start'], |
|
480 | 481 | target_length=hunk['target_length'], |
|
481 | 482 | section_header=hunk['section_header'], |
|
482 | 483 | lines=[], |
|
483 | 484 | )) |
|
484 | 485 | before, after = [], [] |
|
485 | 486 | |
|
486 | 487 | for line in hunk['lines']: |
|
487 | 488 | if line['action'] == 'unmod': |
|
488 | 489 | result.lines.extend( |
|
489 | 490 | self.parse_lines(before, after, source_file, target_file)) |
|
490 | 491 | after.append(line) |
|
491 | 492 | before.append(line) |
|
492 | 493 | elif line['action'] == 'add': |
|
493 | 494 | after.append(line) |
|
494 | 495 | elif line['action'] == 'del': |
|
495 | 496 | before.append(line) |
|
496 | 497 | elif line['action'] == 'old-no-nl': |
|
497 | 498 | before.append(line) |
|
498 | 499 | elif line['action'] == 'new-no-nl': |
|
499 | 500 | after.append(line) |
|
500 | 501 | |
|
501 | 502 | result.lines.extend( |
|
502 | 503 | self.parse_lines(before, after, source_file, target_file)) |
|
503 | 504 | result.unified = self.as_unified(result.lines) |
|
504 | 505 | result.sideside = result.lines |
|
505 | 506 | return result |
|
506 | 507 | |
|
507 | 508 | def parse_lines(self, before_lines, after_lines, source_file, target_file): |
|
508 | 509 | # TODO: dan: investigate doing the diff comparison and fast highlighting |
|
509 | 510 | # on the entire before and after buffered block lines rather than by |
|
510 | 511 | # line, this means we can get better 'fast' highlighting if the context |
|
511 | 512 | # allows it - eg. |
|
512 | 513 | # line 4: """ |
|
513 | 514 | # line 5: this gets highlighted as a string |
|
514 | 515 | # line 6: """ |
|
515 | 516 | |
|
516 | 517 | lines = [] |
|
517 | 518 | while before_lines or after_lines: |
|
518 | 519 | before, after = None, None |
|
519 | 520 | before_tokens, after_tokens = None, None |
|
520 | 521 | |
|
521 | 522 | if before_lines: |
|
522 | 523 | before = before_lines.pop(0) |
|
523 | 524 | if after_lines: |
|
524 | 525 | after = after_lines.pop(0) |
|
525 | 526 | |
|
526 | 527 | original = AttributeDict() |
|
527 | 528 | modified = AttributeDict() |
|
528 | 529 | |
|
529 | 530 | if before: |
|
530 | 531 | if before['action'] == 'old-no-nl': |
|
531 | 532 | before_tokens = [('nonl', before['line'])] |
|
532 | 533 | else: |
|
533 | 534 | before_tokens = self.get_line_tokens( |
|
534 | 535 | line_text=before['line'], line_number=before['old_lineno'], |
|
535 | 536 | file=source_file) |
|
536 | 537 | original.lineno = before['old_lineno'] |
|
537 | 538 | original.content = before['line'] |
|
538 | 539 | original.action = self.action_to_op(before['action']) |
|
539 | 540 | |
|
540 | 541 | if after: |
|
541 | 542 | if after['action'] == 'new-no-nl': |
|
542 | 543 | after_tokens = [('nonl', after['line'])] |
|
543 | 544 | else: |
|
544 | 545 | after_tokens = self.get_line_tokens( |
|
545 | 546 | line_text=after['line'], line_number=after['new_lineno'], |
|
546 | 547 | file=target_file) |
|
547 | 548 | modified.lineno = after['new_lineno'] |
|
548 | 549 | modified.content = after['line'] |
|
549 | 550 | modified.action = self.action_to_op(after['action']) |
|
550 | 551 | |
|
551 | 552 | # diff the lines |
|
552 | 553 | if before_tokens and after_tokens: |
|
553 | 554 | o_tokens, m_tokens, similarity = tokens_diff( |
|
554 | 555 | before_tokens, after_tokens) |
|
555 | 556 | original.content = render_tokenstream(o_tokens) |
|
556 | 557 | modified.content = render_tokenstream(m_tokens) |
|
557 | 558 | elif before_tokens: |
|
558 | 559 | original.content = render_tokenstream( |
|
559 | 560 | [(x[0], '', x[1]) for x in before_tokens]) |
|
560 | 561 | elif after_tokens: |
|
561 | 562 | modified.content = render_tokenstream( |
|
562 | 563 | [(x[0], '', x[1]) for x in after_tokens]) |
|
563 | 564 | |
|
564 | 565 | lines.append(AttributeDict({ |
|
565 | 566 | 'original': original, |
|
566 | 567 | 'modified': modified, |
|
567 | 568 | })) |
|
568 | 569 | |
|
569 | 570 | return lines |
|
570 | 571 | |
|
571 | 572 | def get_line_tokens(self, line_text, line_number, file=None): |
|
572 | 573 | filenode = None |
|
573 | 574 | filename = None |
|
574 | 575 | |
|
575 | 576 | if isinstance(file, basestring): |
|
576 | 577 | filename = file |
|
577 | 578 | elif isinstance(file, FileNode): |
|
578 | 579 | filenode = file |
|
579 | 580 | filename = file.unicode_path |
|
580 | 581 | |
|
581 | 582 | if self.highlight_mode == self.HL_REAL and filenode: |
|
582 | 583 | if line_number and file.size < self.max_file_size_limit: |
|
583 | 584 | return self.get_tokenized_filenode_line(file, line_number) |
|
584 | 585 | |
|
585 | 586 | if self.highlight_mode in (self.HL_REAL, self.HL_FAST) and filename: |
|
586 | 587 | lexer = self._get_lexer_for_filename(filename) |
|
587 | 588 | return list(tokenize_string(line_text, lexer)) |
|
588 | 589 | |
|
589 | 590 | return list(tokenize_string(line_text, plain_text_lexer)) |
|
590 | 591 | |
|
591 | 592 | def get_tokenized_filenode_line(self, filenode, line_number): |
|
592 | 593 | |
|
593 | 594 | if filenode not in self.highlighted_filenodes: |
|
594 | 595 | tokenized_lines = filenode_as_lines_tokens(filenode, filenode.lexer) |
|
595 | 596 | self.highlighted_filenodes[filenode] = tokenized_lines |
|
596 | 597 | return self.highlighted_filenodes[filenode][line_number - 1] |
|
597 | 598 | |
|
598 | 599 | def action_to_op(self, action): |
|
599 | 600 | return { |
|
600 | 601 | 'add': '+', |
|
601 | 602 | 'del': '-', |
|
602 | 603 | 'unmod': ' ', |
|
603 | 604 | 'old-no-nl': ' ', |
|
604 | 605 | 'new-no-nl': ' ', |
|
605 | 606 | }.get(action, action) |
|
606 | 607 | |
|
607 | 608 | def as_unified(self, lines): |
|
608 | 609 | """ Return a generator that yields the lines of a diff in unified order """ |
|
609 | 610 | def generator(): |
|
610 | 611 | buf = [] |
|
611 | 612 | for line in lines: |
|
612 | 613 | |
|
613 | 614 | if buf and not line.original or line.original.action == ' ': |
|
614 | 615 | for b in buf: |
|
615 | 616 | yield b |
|
616 | 617 | buf = [] |
|
617 | 618 | |
|
618 | 619 | if line.original: |
|
619 | 620 | if line.original.action == ' ': |
|
620 | 621 | yield (line.original.lineno, line.modified.lineno, |
|
621 | 622 | line.original.action, line.original.content) |
|
622 | 623 | continue |
|
623 | 624 | |
|
624 | 625 | if line.original.action == '-': |
|
625 | 626 | yield (line.original.lineno, None, |
|
626 | 627 | line.original.action, line.original.content) |
|
627 | 628 | |
|
628 | 629 | if line.modified.action == '+': |
|
629 | 630 | buf.append(( |
|
630 | 631 | None, line.modified.lineno, |
|
631 | 632 | line.modified.action, line.modified.content)) |
|
632 | 633 | continue |
|
633 | 634 | |
|
634 | 635 | if line.modified: |
|
635 | 636 | yield (None, line.modified.lineno, |
|
636 | 637 | line.modified.action, line.modified.content) |
|
637 | 638 | |
|
638 | 639 | for b in buf: |
|
639 | 640 | yield b |
|
640 | 641 | |
|
641 | 642 | return generator() |
@@ -1,420 +1,420 b'' | |||
|
1 | 1 | <%def name="diff_line_anchor(filename, line, type)"><% |
|
2 | 2 | return '%s_%s_%i' % (h.safeid(filename), type, line) |
|
3 | 3 | %></%def> |
|
4 | 4 | |
|
5 | 5 | <%def name="action_class(action)"><% |
|
6 | 6 | return { |
|
7 | 7 | '-': 'cb-deletion', |
|
8 | 8 | '+': 'cb-addition', |
|
9 | 9 | ' ': 'cb-context', |
|
10 | 10 | }.get(action, 'cb-empty') |
|
11 | 11 | %></%def> |
|
12 | 12 | |
|
13 | 13 | <%def name="op_class(op_id)"><% |
|
14 | 14 | return { |
|
15 | 15 | DEL_FILENODE: 'deletion', # file deleted |
|
16 | 16 | BIN_FILENODE: 'warning' # binary diff hidden |
|
17 | 17 | }.get(op_id, 'addition') |
|
18 | 18 | %></%def> |
|
19 | 19 | |
|
20 | 20 | <%def name="link_for(**kw)"><% |
|
21 | 21 | new_args = request.GET.mixed() |
|
22 | 22 | new_args.update(kw) |
|
23 | 23 | return h.url('', **new_args) |
|
24 | 24 | %></%def> |
|
25 | 25 | |
|
26 | 26 | <%def name="render_diffset(diffset, commit=None, |
|
27 | 27 | |
|
28 | 28 | # collapse all file diff entries when there are more than this amount of files in the diff |
|
29 | 29 | collapse_when_files_over=20, |
|
30 | 30 | |
|
31 | 31 | # collapse lines in the diff when more than this amount of lines changed in the file diff |
|
32 | 32 | lines_changed_limit=500, |
|
33 | 33 | |
|
34 | 34 | # add a ruler at to the output |
|
35 | 35 | ruler_at_chars=0, |
|
36 | 36 | |
|
37 | 37 | )"> |
|
38 | 38 | <% |
|
39 | 39 | collapse_all = len(diffset.files) > collapse_when_files_over |
|
40 | 40 | %> |
|
41 | 41 | |
|
42 | 42 | %if c.diffmode == 'sideside': |
|
43 | 43 | <style> |
|
44 | 44 | .wrapper { |
|
45 | 45 | max-width: 1600px !important; |
|
46 | 46 | } |
|
47 | 47 | </style> |
|
48 | 48 | %endif |
|
49 | 49 | %if ruler_at_chars: |
|
50 | 50 | <style> |
|
51 | 51 | .diff table.cb .cb-content:after { |
|
52 | 52 | content: ""; |
|
53 | 53 | border-left: 1px solid blue; |
|
54 | 54 | position: absolute; |
|
55 | 55 | top: 0; |
|
56 | 56 | height: 18px; |
|
57 | 57 | opacity: .2; |
|
58 | 58 | z-index: 10; |
|
59 | 59 | ## +5 to account for diff action (+/-) |
|
60 | 60 | left: ${ruler_at_chars + 5}ch; |
|
61 | 61 | </style> |
|
62 | 62 | %endif |
|
63 | 63 | |
|
64 | 64 | <div class="diffset"> |
|
65 | 65 | <div class="diffset-heading ${diffset.limited_diff and 'diffset-heading-warning' or ''}"> |
|
66 | 66 | %if commit: |
|
67 | 67 | <div class="pull-right"> |
|
68 |
<a class="btn tooltip" title="${_('Browse Files at revision {}').format(commit.raw_id)}" href="${h.url('files_home',repo_name= |
|
|
68 | <a class="btn tooltip" title="${_('Browse Files at revision {}').format(commit.raw_id)}" href="${h.url('files_home',repo_name=diffset.repo_name, revision=commit.raw_id, f_path='')}"> | |
|
69 | 69 | ${_('Browse Files')} |
|
70 | 70 | </a> |
|
71 | 71 | </div> |
|
72 | 72 | %endif |
|
73 | 73 | <h2 class="clearinner"> |
|
74 | 74 | %if commit: |
|
75 | 75 | <a class="tooltip revision" title="${h.tooltip(commit.message)}" href="${h.url('changeset_home',repo_name=c.repo_name,revision=commit.raw_id)}">${'r%s:%s' % (commit.revision,h.short_id(commit.raw_id))}</a> - |
|
76 | 76 | ${h.age_component(commit.date)} - |
|
77 | 77 | %endif |
|
78 | 78 | %if diffset.limited_diff: |
|
79 | 79 | ${_('The requested commit is too big and content was truncated.')} |
|
80 | 80 | |
|
81 | 81 | ${ungettext('%(num)s file changed.', '%(num)s files changed.', diffset.changed_files) % {'num': diffset.changed_files}} |
|
82 | 82 | <a href="${link_for(fulldiff=1)}" onclick="return confirm('${_("Showing a big diff might take some time and resources, continue?")}')">${_('Show full diff')}</a> |
|
83 | 83 | %else: |
|
84 | 84 | ${ungettext('%(num)s file changed: %(linesadd)s inserted, ''%(linesdel)s deleted', |
|
85 | 85 | '%(num)s files changed: %(linesadd)s inserted, %(linesdel)s deleted', diffset.changed_files) % {'num': diffset.changed_files, 'linesadd': diffset.lines_added, 'linesdel': diffset.lines_deleted}} |
|
86 | 86 | %endif |
|
87 | 87 | </h2> |
|
88 | 88 | </div> |
|
89 | 89 | |
|
90 | 90 | %if not diffset.files: |
|
91 | 91 | <p class="empty_data">${_('No files')}</p> |
|
92 | 92 | %endif |
|
93 | 93 | |
|
94 | 94 | <div class="filediffs"> |
|
95 | 95 | %for i, filediff in enumerate(diffset.files): |
|
96 | 96 | <% |
|
97 | 97 | lines_changed = filediff['patch']['stats']['added'] + filediff['patch']['stats']['deleted'] |
|
98 | 98 | over_lines_changed_limit = lines_changed > lines_changed_limit |
|
99 | 99 | %> |
|
100 | 100 | <input ${collapse_all and 'checked' or ''} class="filediff-collapse-state" id="filediff-collapse-${id(filediff)}" type="checkbox"> |
|
101 | 101 | <div |
|
102 | 102 | class="filediff" |
|
103 | 103 | data-f-path="${filediff['patch']['filename']}" |
|
104 | 104 | id="a_${h.FID(commit and commit.raw_id or '', filediff['patch']['filename'])}"> |
|
105 | 105 | <label for="filediff-collapse-${id(filediff)}" class="filediff-heading"> |
|
106 | 106 | <div class="filediff-collapse-indicator"></div> |
|
107 | 107 | ${diff_ops(filediff)} |
|
108 | 108 | </label> |
|
109 | 109 | ${diff_menu(filediff)} |
|
110 | 110 | <table class="cb cb-diff-${c.diffmode} code-highlight ${over_lines_changed_limit and 'cb-collapsed' or ''}"> |
|
111 | 111 | %if not filediff.hunks: |
|
112 | 112 | %for op_id, op_text in filediff['patch']['stats']['ops'].items(): |
|
113 | 113 | <tr> |
|
114 | 114 | <td class="cb-text cb-${op_class(op_id)}" ${c.diffmode == 'unified' and 'colspan=3' or 'colspan=4'}> |
|
115 | 115 | %if op_id == DEL_FILENODE: |
|
116 | 116 | ${_('File was deleted')} |
|
117 | 117 | %elif op_id == BIN_FILENODE: |
|
118 | 118 | ${_('Binary file hidden')} |
|
119 | 119 | %else: |
|
120 | 120 | ${op_text} |
|
121 | 121 | %endif |
|
122 | 122 | </td> |
|
123 | 123 | </tr> |
|
124 | 124 | %endfor |
|
125 | 125 | %endif |
|
126 | 126 | %if over_lines_changed_limit: |
|
127 | 127 | <tr class="cb-warning cb-collapser"> |
|
128 | 128 | <td class="cb-text" ${c.diffmode == 'unified' and 'colspan=3' or 'colspan=4'}> |
|
129 | 129 | ${_('This diff has been collapsed as it changes many lines, (%i lines changed)' % lines_changed)} |
|
130 | 130 | <a href="#" class="cb-expand" |
|
131 | 131 | onclick="$(this).closest('table').removeClass('cb-collapsed'); return false;">${_('Show them')} |
|
132 | 132 | </a> |
|
133 | 133 | <a href="#" class="cb-collapse" |
|
134 | 134 | onclick="$(this).closest('table').addClass('cb-collapsed'); return false;">${_('Hide them')} |
|
135 | 135 | </a> |
|
136 | 136 | </td> |
|
137 | 137 | </tr> |
|
138 | 138 | %endif |
|
139 | 139 | %if filediff.patch['is_limited_diff']: |
|
140 | 140 | <tr class="cb-warning cb-collapser"> |
|
141 | 141 | <td class="cb-text" ${c.diffmode == 'unified' and 'colspan=3' or 'colspan=4'}> |
|
142 | 142 | ${_('The requested commit is too big and content was truncated.')} <a href="${link_for(fulldiff=1)}" onclick="return confirm('${_("Showing a big diff might take some time and resources, continue?")}')">${_('Show full diff')}</a> |
|
143 | 143 | </td> |
|
144 | 144 | </tr> |
|
145 | 145 | %endif |
|
146 | 146 | %for hunk in filediff.hunks: |
|
147 | 147 | <tr class="cb-hunk"> |
|
148 | 148 | <td ${c.diffmode == 'unified' and 'colspan=2' or ''}> |
|
149 | 149 | ## TODO: dan: add ajax loading of more context here |
|
150 | 150 | ## <a href="#"> |
|
151 | 151 | <i class="icon-more"></i> |
|
152 | 152 | ## </a> |
|
153 | 153 | </td> |
|
154 | 154 | <td ${c.diffmode == 'sideside' and 'colspan=3' or ''}> |
|
155 | 155 | @@ |
|
156 | 156 | -${hunk.source_start},${hunk.source_length} |
|
157 | 157 | +${hunk.target_start},${hunk.target_length} |
|
158 | 158 | ${hunk.section_header} |
|
159 | 159 | </td> |
|
160 | 160 | </tr> |
|
161 | 161 | %if c.diffmode == 'unified': |
|
162 | 162 | ${render_hunk_lines_unified(hunk)} |
|
163 | 163 | %elif c.diffmode == 'sideside': |
|
164 | 164 | ${render_hunk_lines_sideside(hunk)} |
|
165 | 165 | %else: |
|
166 | 166 | <tr class="cb-line"> |
|
167 | 167 | <td>unknown diff mode</td> |
|
168 | 168 | </tr> |
|
169 | 169 | %endif |
|
170 | 170 | %endfor |
|
171 | 171 | </table> |
|
172 | 172 | </div> |
|
173 | 173 | %endfor |
|
174 | 174 | </div> |
|
175 | 175 | </div> |
|
176 | 176 | </%def> |
|
177 | 177 | |
|
178 | 178 | <%def name="diff_ops(filediff)"> |
|
179 | 179 | <% |
|
180 | 180 | stats = filediff['patch']['stats'] |
|
181 | 181 | from rhodecode.lib.diffs import NEW_FILENODE, DEL_FILENODE, \ |
|
182 | 182 | MOD_FILENODE, RENAMED_FILENODE, CHMOD_FILENODE, BIN_FILENODE |
|
183 | 183 | %> |
|
184 | 184 | <span class="pill"> |
|
185 | 185 | %if filediff.source_file_path and filediff.target_file_path: |
|
186 | 186 | %if filediff.source_file_path != filediff.target_file_path: # file was renamed |
|
187 | 187 | <strong>${filediff.target_file_path}</strong> β¬ <del>${filediff.source_file_path}</del> |
|
188 | 188 | %else: |
|
189 | 189 | ## file was modified |
|
190 | 190 | <strong>${filediff.source_file_path}</strong> |
|
191 | 191 | %endif |
|
192 | 192 | %else: |
|
193 | 193 | %if filediff.source_file_path: |
|
194 | 194 | ## file was deleted |
|
195 | 195 | <strong>${filediff.source_file_path}</strong> |
|
196 | 196 | %else: |
|
197 | 197 | ## file was added |
|
198 | 198 | <strong>${filediff.target_file_path}</strong> |
|
199 | 199 | %endif |
|
200 | 200 | %endif |
|
201 | 201 | </span> |
|
202 | 202 | <span class="pill-group" style="float: left"> |
|
203 | 203 | %if filediff.patch['is_limited_diff']: |
|
204 | 204 | <span class="pill tooltip" op="limited" title="The stats for this diff are not complete">limited diff</span> |
|
205 | 205 | %endif |
|
206 | 206 | %if RENAMED_FILENODE in stats['ops']: |
|
207 | 207 | <span class="pill" op="renamed">renamed</span> |
|
208 | 208 | %endif |
|
209 | 209 | |
|
210 | 210 | %if NEW_FILENODE in stats['ops']: |
|
211 | 211 | <span class="pill" op="created">created</span> |
|
212 | 212 | %if filediff['target_mode'].startswith('120'): |
|
213 | 213 | <span class="pill" op="symlink">symlink</span> |
|
214 | 214 | %else: |
|
215 | 215 | <span class="pill" op="mode">${nice_mode(filediff['target_mode'])}</span> |
|
216 | 216 | %endif |
|
217 | 217 | %endif |
|
218 | 218 | |
|
219 | 219 | %if DEL_FILENODE in stats['ops']: |
|
220 | 220 | <span class="pill" op="removed">removed</span> |
|
221 | 221 | %endif |
|
222 | 222 | |
|
223 | 223 | %if CHMOD_FILENODE in stats['ops']: |
|
224 | 224 | <span class="pill" op="mode"> |
|
225 | 225 | ${nice_mode(filediff['source_mode'])} β‘ ${nice_mode(filediff['target_mode'])} |
|
226 | 226 | </span> |
|
227 | 227 | %endif |
|
228 | 228 | </span> |
|
229 | 229 | |
|
230 | 230 | <a class="pill filediff-anchor" href="#a_${h.FID(commit and commit.raw_id or '', filediff.patch['filename'])}">ΒΆ</a> |
|
231 | 231 | |
|
232 | 232 | <span class="pill-group" style="float: right"> |
|
233 | 233 | %if BIN_FILENODE in stats['ops']: |
|
234 | 234 | <span class="pill" op="binary">binary</span> |
|
235 | 235 | %if MOD_FILENODE in stats['ops']: |
|
236 | 236 | <span class="pill" op="modified">modified</span> |
|
237 | 237 | %endif |
|
238 | 238 | %endif |
|
239 | 239 | %if stats['added']: |
|
240 | 240 | <span class="pill" op="added">+${stats['added']}</span> |
|
241 | 241 | %endif |
|
242 | 242 | %if stats['deleted']: |
|
243 | 243 | <span class="pill" op="deleted">-${stats['deleted']}</span> |
|
244 | 244 | %endif |
|
245 | 245 | </span> |
|
246 | 246 | |
|
247 | 247 | </%def> |
|
248 | 248 | |
|
249 | 249 | <%def name="nice_mode(filemode)"> |
|
250 | 250 | ${filemode.startswith('100') and filemode[3:] or filemode} |
|
251 | 251 | </%def> |
|
252 | 252 | |
|
253 | 253 | <%def name="diff_menu(filediff)"> |
|
254 | 254 | <div class="filediff-menu"> |
|
255 | 255 | %if filediff.diffset.source_ref: |
|
256 | 256 | %if filediff.patch['operation'] in ['D', 'M']: |
|
257 | 257 | <a |
|
258 | 258 | class="tooltip" |
|
259 |
href="${h.url('files_home',repo_name= |
|
|
259 | href="${h.url('files_home',repo_name=filediff.diffset.repo_name,f_path=filediff.source_file_path,revision=filediff.diffset.source_ref)}" | |
|
260 | 260 | title="${h.tooltip(_('Show file at commit: %(commit_id)s') % {'commit_id': filediff.diffset.source_ref[:12]})}" |
|
261 | 261 | > |
|
262 | 262 | ${_('Show file before')} |
|
263 | 263 | </a> |
|
264 | 264 | %else: |
|
265 | 265 | <span |
|
266 | 266 | class="tooltip" |
|
267 | 267 | title="${h.tooltip(_('File no longer present at commit: %(commit_id)s') % {'commit_id': filediff.diffset.source_ref[:12]})}" |
|
268 | 268 | > |
|
269 | 269 | ${_('Show file before')} |
|
270 | 270 | </span> |
|
271 | 271 | %endif |
|
272 | 272 | %if filediff.patch['operation'] in ['A', 'M']: |
|
273 | 273 | <a |
|
274 | 274 | class="tooltip" |
|
275 |
href="${h.url('files_home',repo_name= |
|
|
275 | href="${h.url('files_home',repo_name=filediff.diffset.repo_name,f_path=filediff.target_file_path,revision=filediff.diffset.target_ref)}" | |
|
276 | 276 | title="${h.tooltip(_('Show file at commit: %(commit_id)s') % {'commit_id': filediff.diffset.target_ref[:12]})}" |
|
277 | 277 | > |
|
278 | 278 | ${_('Show file after')} |
|
279 | 279 | </a> |
|
280 | 280 | %else: |
|
281 | 281 | <span |
|
282 | 282 | class="tooltip" |
|
283 | 283 | title="${h.tooltip(_('File no longer present at commit: %(commit_id)s') % {'commit_id': filediff.diffset.target_ref[:12]})}" |
|
284 | 284 | > |
|
285 | 285 | ${_('Show file after')} |
|
286 | 286 | </span> |
|
287 | 287 | %endif |
|
288 | 288 | <a |
|
289 | 289 | class="tooltip" |
|
290 | 290 | title="${h.tooltip(_('Raw diff'))}" |
|
291 |
href="${h.url('files_diff_home',repo_name= |
|
|
291 | href="${h.url('files_diff_home',repo_name=filediff.diffset.repo_name,f_path=filediff.target_file_path,diff2=filediff.diffset.target_ref,diff1=filediff.diffset.source_ref,diff='raw')}" | |
|
292 | 292 | > |
|
293 | 293 | ${_('Raw diff')} |
|
294 | 294 | </a> |
|
295 | 295 | <a |
|
296 | 296 | class="tooltip" |
|
297 | 297 | title="${h.tooltip(_('Download diff'))}" |
|
298 |
href="${h.url('files_diff_home',repo_name= |
|
|
298 | href="${h.url('files_diff_home',repo_name=filediff.diffset.repo_name,f_path=filediff.target_file_path,diff2=filediff.diffset.target_ref,diff1=filediff.diffset.source_ref,diff='download')}" | |
|
299 | 299 | > |
|
300 | 300 | ${_('Download diff')} |
|
301 | 301 | </a> |
|
302 | 302 | %endif |
|
303 | 303 | </div> |
|
304 | 304 | </%def> |
|
305 | 305 | |
|
306 | 306 | |
|
307 | 307 | <%def name="render_hunk_lines_sideside(hunk)"> |
|
308 | 308 | %for i, line in enumerate(hunk.sideside): |
|
309 | 309 | <% |
|
310 | 310 | old_line_anchor, new_line_anchor = None, None |
|
311 | 311 | if line.original.lineno: |
|
312 | 312 | old_line_anchor = diff_line_anchor(hunk.filediff.source_file_path, line.original.lineno, 'o') |
|
313 | 313 | if line.modified.lineno: |
|
314 | 314 | new_line_anchor = diff_line_anchor(hunk.filediff.target_file_path, line.modified.lineno, 'n') |
|
315 | 315 | %> |
|
316 | 316 | <tr class="cb-line"> |
|
317 | 317 | <td class="cb-lineno ${action_class(line.original.action)}" |
|
318 | 318 | data-line-number="${line.original.lineno}" |
|
319 | 319 | %if old_line_anchor: |
|
320 | 320 | id="${old_line_anchor}" |
|
321 | 321 | %endif |
|
322 | 322 | > |
|
323 | 323 | %if line.original.lineno: |
|
324 | 324 | <a name="${old_line_anchor}" href="#${old_line_anchor}">${line.original.lineno}</a> |
|
325 | 325 | %endif |
|
326 | 326 | </td> |
|
327 | 327 | <td class="cb-content ${action_class(line.original.action)}" |
|
328 | 328 | data-line-number="o${line.original.lineno}" |
|
329 | 329 | ><span class="cb-code">${line.original.action} ${line.original.content or '' | n}</span> |
|
330 | 330 | </td> |
|
331 | 331 | <td class="cb-lineno ${action_class(line.modified.action)}" |
|
332 | 332 | data-line-number="${line.modified.lineno}" |
|
333 | 333 | %if new_line_anchor: |
|
334 | 334 | id="${new_line_anchor}" |
|
335 | 335 | %endif |
|
336 | 336 | > |
|
337 | 337 | %if line.modified.lineno: |
|
338 | 338 | <a name="${new_line_anchor}" href="#${new_line_anchor}">${line.modified.lineno}</a> |
|
339 | 339 | %endif |
|
340 | 340 | </td> |
|
341 | 341 | <td class="cb-content ${action_class(line.modified.action)}" |
|
342 | 342 | data-line-number="n${line.modified.lineno}" |
|
343 | 343 | > |
|
344 | 344 | <span class="cb-code">${line.modified.action} ${line.modified.content or '' | n}</span> |
|
345 | 345 | </td> |
|
346 | 346 | </tr> |
|
347 | 347 | %endfor |
|
348 | 348 | </%def> |
|
349 | 349 | |
|
350 | 350 | |
|
351 | 351 | <%def name="render_hunk_lines_unified(hunk)"> |
|
352 | 352 | %for old_line_no, new_line_no, action, content in hunk.unified: |
|
353 | 353 | <% |
|
354 | 354 | old_line_anchor, new_line_anchor = None, None |
|
355 | 355 | if old_line_no: |
|
356 | 356 | old_line_anchor = diff_line_anchor(hunk.filediff.source_file_path, old_line_no, 'o') |
|
357 | 357 | if new_line_no: |
|
358 | 358 | new_line_anchor = diff_line_anchor(hunk.filediff.target_file_path, new_line_no, 'n') |
|
359 | 359 | %> |
|
360 | 360 | <tr class="cb-line"> |
|
361 | 361 | <td class="cb-lineno ${action_class(action)}" |
|
362 | 362 | data-line-number="${old_line_no}" |
|
363 | 363 | %if old_line_anchor: |
|
364 | 364 | id="${old_line_anchor}" |
|
365 | 365 | %endif |
|
366 | 366 | > |
|
367 | 367 | %if old_line_anchor: |
|
368 | 368 | <a name="${old_line_anchor}" href="#${old_line_anchor}">${old_line_no}</a> |
|
369 | 369 | %endif |
|
370 | 370 | </td> |
|
371 | 371 | <td class="cb-lineno ${action_class(action)}" |
|
372 | 372 | data-line-number="${new_line_no}" |
|
373 | 373 | %if new_line_anchor: |
|
374 | 374 | id="${new_line_anchor}" |
|
375 | 375 | %endif |
|
376 | 376 | > |
|
377 | 377 | %if new_line_anchor: |
|
378 | 378 | <a name="${new_line_anchor}" href="#${new_line_anchor}">${new_line_no}</a> |
|
379 | 379 | %endif |
|
380 | 380 | </td> |
|
381 | 381 | <td class="cb-content ${action_class(action)}" |
|
382 | 382 | data-line-number="${new_line_no and 'n' or 'o'}${new_line_no or old_line_no}" |
|
383 | 383 | ><span class="cb-code">${action} ${content or '' | n}</span> |
|
384 | 384 | </td> |
|
385 | 385 | </tr> |
|
386 | 386 | %endfor |
|
387 | 387 | </%def> |
|
388 | 388 | |
|
389 | 389 | |
|
390 | 390 | <%def name="render_diffset_menu()"> |
|
391 | 391 | <div class="diffset-menu clearinner"> |
|
392 | 392 | <div class="pull-right"> |
|
393 | 393 | <div class="btn-group"> |
|
394 | 394 | <a |
|
395 | 395 | class="btn ${c.diffmode == 'sideside' and 'btn-primary'} tooltip" |
|
396 | 396 | title="${_('View side by side')}" |
|
397 | 397 | href="${h.url_replace(diffmode='sideside')}"> |
|
398 | 398 | <span>${_('Side by Side')}</span> |
|
399 | 399 | </a> |
|
400 | 400 | <a |
|
401 | 401 | class="btn ${c.diffmode == 'unified' and 'btn-primary'} tooltip" |
|
402 | 402 | title="${_('View unified')}" href="${h.url_replace(diffmode='unified')}"> |
|
403 | 403 | <span>${_('Unified')}</span> |
|
404 | 404 | </a> |
|
405 | 405 | </div> |
|
406 | 406 | </div> |
|
407 | 407 | <div class="pull-left"> |
|
408 | 408 | <div class="btn-group"> |
|
409 | 409 | <a |
|
410 | 410 | class="btn" |
|
411 | 411 | href="#" |
|
412 | 412 | onclick="$('input[class=filediff-collapse-state]').prop('checked', false); return false">${_('Expand All')}</a> |
|
413 | 413 | <a |
|
414 | 414 | class="btn" |
|
415 | 415 | href="#" |
|
416 | 416 | onclick="$('input[class=filediff-collapse-state]').prop('checked', true); return false">${_('Collapse All')}</a> |
|
417 | 417 | </div> |
|
418 | 418 | </div> |
|
419 | 419 | </div> |
|
420 | 420 | </%def> |
General Comments 0
You need to be logged in to leave comments.
Login now