##// END OF EJS Templates
diffs: add repo_name as parameter of diffset - fixes bug...
dan -
r1142:c6b41371 default
parent child Browse files
Show More
@@ -1,469 +1,470 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2010-2016 RhodeCode GmbH
3 # Copyright (C) 2010-2016 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21 """
21 """
22 commit controller for RhodeCode showing changes between commits
22 commit controller for RhodeCode showing changes between commits
23 """
23 """
24
24
25 import logging
25 import logging
26
26
27 from collections import defaultdict
27 from collections import defaultdict
28 from webob.exc import HTTPForbidden, HTTPBadRequest, HTTPNotFound
28 from webob.exc import HTTPForbidden, HTTPBadRequest, HTTPNotFound
29
29
30 from pylons import tmpl_context as c, request, response
30 from pylons import tmpl_context as c, request, response
31 from pylons.i18n.translation import _
31 from pylons.i18n.translation import _
32 from pylons.controllers.util import redirect
32 from pylons.controllers.util import redirect
33
33
34 from rhodecode.lib import auth
34 from rhodecode.lib import auth
35 from rhodecode.lib import diffs, codeblocks
35 from rhodecode.lib import diffs, codeblocks
36 from rhodecode.lib.auth import (
36 from rhodecode.lib.auth import (
37 LoginRequired, HasRepoPermissionAnyDecorator, NotAnonymous)
37 LoginRequired, HasRepoPermissionAnyDecorator, NotAnonymous)
38 from rhodecode.lib.base import BaseRepoController, render
38 from rhodecode.lib.base import BaseRepoController, render
39 from rhodecode.lib.compat import OrderedDict
39 from rhodecode.lib.compat import OrderedDict
40 from rhodecode.lib.exceptions import StatusChangeOnClosedPullRequestError
40 from rhodecode.lib.exceptions import StatusChangeOnClosedPullRequestError
41 import rhodecode.lib.helpers as h
41 import rhodecode.lib.helpers as h
42 from rhodecode.lib.utils import action_logger, jsonify
42 from rhodecode.lib.utils import action_logger, jsonify
43 from rhodecode.lib.utils2 import safe_unicode
43 from rhodecode.lib.utils2 import safe_unicode
44 from rhodecode.lib.vcs.backends.base import EmptyCommit
44 from rhodecode.lib.vcs.backends.base import EmptyCommit
45 from rhodecode.lib.vcs.exceptions import (
45 from rhodecode.lib.vcs.exceptions import (
46 RepositoryError, CommitDoesNotExistError)
46 RepositoryError, CommitDoesNotExistError)
47 from rhodecode.model.db import ChangesetComment, ChangesetStatus
47 from rhodecode.model.db import ChangesetComment, ChangesetStatus
48 from rhodecode.model.changeset_status import ChangesetStatusModel
48 from rhodecode.model.changeset_status import ChangesetStatusModel
49 from rhodecode.model.comment import ChangesetCommentsModel
49 from rhodecode.model.comment import ChangesetCommentsModel
50 from rhodecode.model.meta import Session
50 from rhodecode.model.meta import Session
51 from rhodecode.model.repo import RepoModel
51 from rhodecode.model.repo import RepoModel
52
52
53
53
54 log = logging.getLogger(__name__)
54 log = logging.getLogger(__name__)
55
55
56
56
57 def _update_with_GET(params, GET):
57 def _update_with_GET(params, GET):
58 for k in ['diff1', 'diff2', 'diff']:
58 for k in ['diff1', 'diff2', 'diff']:
59 params[k] += GET.getall(k)
59 params[k] += GET.getall(k)
60
60
61
61
62 def get_ignore_ws(fid, GET):
62 def get_ignore_ws(fid, GET):
63 ig_ws_global = GET.get('ignorews')
63 ig_ws_global = GET.get('ignorews')
64 ig_ws = filter(lambda k: k.startswith('WS'), GET.getall(fid))
64 ig_ws = filter(lambda k: k.startswith('WS'), GET.getall(fid))
65 if ig_ws:
65 if ig_ws:
66 try:
66 try:
67 return int(ig_ws[0].split(':')[-1])
67 return int(ig_ws[0].split(':')[-1])
68 except Exception:
68 except Exception:
69 pass
69 pass
70 return ig_ws_global
70 return ig_ws_global
71
71
72
72
73 def _ignorews_url(GET, fileid=None):
73 def _ignorews_url(GET, fileid=None):
74 fileid = str(fileid) if fileid else None
74 fileid = str(fileid) if fileid else None
75 params = defaultdict(list)
75 params = defaultdict(list)
76 _update_with_GET(params, GET)
76 _update_with_GET(params, GET)
77 label = _('Show whitespace')
77 label = _('Show whitespace')
78 tooltiplbl = _('Show whitespace for all diffs')
78 tooltiplbl = _('Show whitespace for all diffs')
79 ig_ws = get_ignore_ws(fileid, GET)
79 ig_ws = get_ignore_ws(fileid, GET)
80 ln_ctx = get_line_ctx(fileid, GET)
80 ln_ctx = get_line_ctx(fileid, GET)
81
81
82 if ig_ws is None:
82 if ig_ws is None:
83 params['ignorews'] += [1]
83 params['ignorews'] += [1]
84 label = _('Ignore whitespace')
84 label = _('Ignore whitespace')
85 tooltiplbl = _('Ignore whitespace for all diffs')
85 tooltiplbl = _('Ignore whitespace for all diffs')
86 ctx_key = 'context'
86 ctx_key = 'context'
87 ctx_val = ln_ctx
87 ctx_val = ln_ctx
88
88
89 # if we have passed in ln_ctx pass it along to our params
89 # if we have passed in ln_ctx pass it along to our params
90 if ln_ctx:
90 if ln_ctx:
91 params[ctx_key] += [ctx_val]
91 params[ctx_key] += [ctx_val]
92
92
93 if fileid:
93 if fileid:
94 params['anchor'] = 'a_' + fileid
94 params['anchor'] = 'a_' + fileid
95 return h.link_to(label, h.url.current(**params), title=tooltiplbl, class_='tooltip')
95 return h.link_to(label, h.url.current(**params), title=tooltiplbl, class_='tooltip')
96
96
97
97
98 def get_line_ctx(fid, GET):
98 def get_line_ctx(fid, GET):
99 ln_ctx_global = GET.get('context')
99 ln_ctx_global = GET.get('context')
100 if fid:
100 if fid:
101 ln_ctx = filter(lambda k: k.startswith('C'), GET.getall(fid))
101 ln_ctx = filter(lambda k: k.startswith('C'), GET.getall(fid))
102 else:
102 else:
103 _ln_ctx = filter(lambda k: k.startswith('C'), GET)
103 _ln_ctx = filter(lambda k: k.startswith('C'), GET)
104 ln_ctx = GET.get(_ln_ctx[0]) if _ln_ctx else ln_ctx_global
104 ln_ctx = GET.get(_ln_ctx[0]) if _ln_ctx else ln_ctx_global
105 if ln_ctx:
105 if ln_ctx:
106 ln_ctx = [ln_ctx]
106 ln_ctx = [ln_ctx]
107
107
108 if ln_ctx:
108 if ln_ctx:
109 retval = ln_ctx[0].split(':')[-1]
109 retval = ln_ctx[0].split(':')[-1]
110 else:
110 else:
111 retval = ln_ctx_global
111 retval = ln_ctx_global
112
112
113 try:
113 try:
114 return int(retval)
114 return int(retval)
115 except Exception:
115 except Exception:
116 return 3
116 return 3
117
117
118
118
119 def _context_url(GET, fileid=None):
119 def _context_url(GET, fileid=None):
120 """
120 """
121 Generates a url for context lines.
121 Generates a url for context lines.
122
122
123 :param fileid:
123 :param fileid:
124 """
124 """
125
125
126 fileid = str(fileid) if fileid else None
126 fileid = str(fileid) if fileid else None
127 ig_ws = get_ignore_ws(fileid, GET)
127 ig_ws = get_ignore_ws(fileid, GET)
128 ln_ctx = (get_line_ctx(fileid, GET) or 3) * 2
128 ln_ctx = (get_line_ctx(fileid, GET) or 3) * 2
129
129
130 params = defaultdict(list)
130 params = defaultdict(list)
131 _update_with_GET(params, GET)
131 _update_with_GET(params, GET)
132
132
133 if ln_ctx > 0:
133 if ln_ctx > 0:
134 params['context'] += [ln_ctx]
134 params['context'] += [ln_ctx]
135
135
136 if ig_ws:
136 if ig_ws:
137 ig_ws_key = 'ignorews'
137 ig_ws_key = 'ignorews'
138 ig_ws_val = 1
138 ig_ws_val = 1
139 params[ig_ws_key] += [ig_ws_val]
139 params[ig_ws_key] += [ig_ws_val]
140
140
141 lbl = _('Increase context')
141 lbl = _('Increase context')
142 tooltiplbl = _('Increase context for all diffs')
142 tooltiplbl = _('Increase context for all diffs')
143
143
144 if fileid:
144 if fileid:
145 params['anchor'] = 'a_' + fileid
145 params['anchor'] = 'a_' + fileid
146 return h.link_to(lbl, h.url.current(**params), title=tooltiplbl, class_='tooltip')
146 return h.link_to(lbl, h.url.current(**params), title=tooltiplbl, class_='tooltip')
147
147
148
148
149 class ChangesetController(BaseRepoController):
149 class ChangesetController(BaseRepoController):
150
150
151 def __before__(self):
151 def __before__(self):
152 super(ChangesetController, self).__before__()
152 super(ChangesetController, self).__before__()
153 c.affected_files_cut_off = 60
153 c.affected_files_cut_off = 60
154
154
155 def _index(self, commit_id_range, method):
155 def _index(self, commit_id_range, method):
156 c.ignorews_url = _ignorews_url
156 c.ignorews_url = _ignorews_url
157 c.context_url = _context_url
157 c.context_url = _context_url
158 c.fulldiff = fulldiff = request.GET.get('fulldiff')
158 c.fulldiff = fulldiff = request.GET.get('fulldiff')
159 # get ranges of commit ids if preset
159 # get ranges of commit ids if preset
160 commit_range = commit_id_range.split('...')[:2]
160 commit_range = commit_id_range.split('...')[:2]
161 enable_comments = True
161 enable_comments = True
162 try:
162 try:
163 pre_load = ['affected_files', 'author', 'branch', 'date',
163 pre_load = ['affected_files', 'author', 'branch', 'date',
164 'message', 'parents']
164 'message', 'parents']
165
165
166 if len(commit_range) == 2:
166 if len(commit_range) == 2:
167 enable_comments = False
167 enable_comments = False
168 commits = c.rhodecode_repo.get_commits(
168 commits = c.rhodecode_repo.get_commits(
169 start_id=commit_range[0], end_id=commit_range[1],
169 start_id=commit_range[0], end_id=commit_range[1],
170 pre_load=pre_load)
170 pre_load=pre_load)
171 commits = list(commits)
171 commits = list(commits)
172 else:
172 else:
173 commits = [c.rhodecode_repo.get_commit(
173 commits = [c.rhodecode_repo.get_commit(
174 commit_id=commit_id_range, pre_load=pre_load)]
174 commit_id=commit_id_range, pre_load=pre_load)]
175
175
176 c.commit_ranges = commits
176 c.commit_ranges = commits
177 if not c.commit_ranges:
177 if not c.commit_ranges:
178 raise RepositoryError(
178 raise RepositoryError(
179 'The commit range returned an empty result')
179 'The commit range returned an empty result')
180 except CommitDoesNotExistError:
180 except CommitDoesNotExistError:
181 msg = _('No such commit exists for this repository')
181 msg = _('No such commit exists for this repository')
182 h.flash(msg, category='error')
182 h.flash(msg, category='error')
183 raise HTTPNotFound()
183 raise HTTPNotFound()
184 except Exception:
184 except Exception:
185 log.exception("General failure")
185 log.exception("General failure")
186 raise HTTPNotFound()
186 raise HTTPNotFound()
187
187
188 c.changes = OrderedDict()
188 c.changes = OrderedDict()
189 c.lines_added = 0
189 c.lines_added = 0
190 c.lines_deleted = 0
190 c.lines_deleted = 0
191
191
192 c.commit_statuses = ChangesetStatus.STATUSES
192 c.commit_statuses = ChangesetStatus.STATUSES
193 c.comments = []
193 c.comments = []
194 c.statuses = []
194 c.statuses = []
195 c.inline_comments = []
195 c.inline_comments = []
196 c.inline_cnt = 0
196 c.inline_cnt = 0
197 c.files = []
197 c.files = []
198
198
199 # Iterate over ranges (default commit view is always one commit)
199 # Iterate over ranges (default commit view is always one commit)
200 for commit in c.commit_ranges:
200 for commit in c.commit_ranges:
201 if method == 'show':
201 if method == 'show':
202 c.statuses.extend([ChangesetStatusModel().get_status(
202 c.statuses.extend([ChangesetStatusModel().get_status(
203 c.rhodecode_db_repo.repo_id, commit.raw_id)])
203 c.rhodecode_db_repo.repo_id, commit.raw_id)])
204
204
205 c.comments.extend(ChangesetCommentsModel().get_comments(
205 c.comments.extend(ChangesetCommentsModel().get_comments(
206 c.rhodecode_db_repo.repo_id,
206 c.rhodecode_db_repo.repo_id,
207 revision=commit.raw_id))
207 revision=commit.raw_id))
208
208
209 # comments from PR
209 # comments from PR
210 st = ChangesetStatusModel().get_statuses(
210 st = ChangesetStatusModel().get_statuses(
211 c.rhodecode_db_repo.repo_id, commit.raw_id,
211 c.rhodecode_db_repo.repo_id, commit.raw_id,
212 with_revisions=True)
212 with_revisions=True)
213
213
214 # from associated statuses, check the pull requests, and
214 # from associated statuses, check the pull requests, and
215 # show comments from them
215 # show comments from them
216
216
217 prs = set(x.pull_request for x in
217 prs = set(x.pull_request for x in
218 filter(lambda x: x.pull_request is not None, st))
218 filter(lambda x: x.pull_request is not None, st))
219 for pr in prs:
219 for pr in prs:
220 c.comments.extend(pr.comments)
220 c.comments.extend(pr.comments)
221
221
222 inlines = ChangesetCommentsModel().get_inline_comments(
222 inlines = ChangesetCommentsModel().get_inline_comments(
223 c.rhodecode_db_repo.repo_id, revision=commit.raw_id)
223 c.rhodecode_db_repo.repo_id, revision=commit.raw_id)
224 c.inline_comments.extend(inlines.iteritems())
224 c.inline_comments.extend(inlines.iteritems())
225
225
226 c.changes[commit.raw_id] = []
226 c.changes[commit.raw_id] = []
227
227
228 commit2 = commit
228 commit2 = commit
229 commit1 = commit.parents[0] if commit.parents else EmptyCommit()
229 commit1 = commit.parents[0] if commit.parents else EmptyCommit()
230
230
231 # fetch global flags of ignore ws or context lines
231 # fetch global flags of ignore ws or context lines
232 context_lcl = get_line_ctx('', request.GET)
232 context_lcl = get_line_ctx('', request.GET)
233 ign_whitespace_lcl = get_ignore_ws('', request.GET)
233 ign_whitespace_lcl = get_ignore_ws('', request.GET)
234
234
235 _diff = c.rhodecode_repo.get_diff(
235 _diff = c.rhodecode_repo.get_diff(
236 commit1, commit2,
236 commit1, commit2,
237 ignore_whitespace=ign_whitespace_lcl, context=context_lcl)
237 ignore_whitespace=ign_whitespace_lcl, context=context_lcl)
238
238
239 # diff_limit will cut off the whole diff if the limit is applied
239 # diff_limit will cut off the whole diff if the limit is applied
240 # otherwise it will just hide the big files from the front-end
240 # otherwise it will just hide the big files from the front-end
241 diff_limit = self.cut_off_limit_diff
241 diff_limit = self.cut_off_limit_diff
242 file_limit = self.cut_off_limit_file
242 file_limit = self.cut_off_limit_file
243
243
244 diff_processor = diffs.DiffProcessor(
244 diff_processor = diffs.DiffProcessor(
245 _diff, format='newdiff', diff_limit=diff_limit,
245 _diff, format='newdiff', diff_limit=diff_limit,
246 file_limit=file_limit, show_full_diff=fulldiff)
246 file_limit=file_limit, show_full_diff=fulldiff)
247 commit_changes = OrderedDict()
247 commit_changes = OrderedDict()
248 if method == 'show':
248 if method == 'show':
249 _parsed = diff_processor.prepare()
249 _parsed = diff_processor.prepare()
250 c.limited_diff = isinstance(_parsed, diffs.LimitedDiffContainer)
250 c.limited_diff = isinstance(_parsed, diffs.LimitedDiffContainer)
251
251
252 _parsed = diff_processor.prepare()
252 _parsed = diff_processor.prepare()
253
253
254 def _node_getter(commit):
254 def _node_getter(commit):
255 def get_node(fname):
255 def get_node(fname):
256 try:
256 try:
257 return commit.get_node(fname)
257 return commit.get_node(fname)
258 except NodeDoesNotExistError:
258 except NodeDoesNotExistError:
259 return None
259 return None
260 return get_node
260 return get_node
261
261
262 diffset = codeblocks.DiffSet(
262 diffset = codeblocks.DiffSet(
263 repo_name=c.repo_name,
263 source_node_getter=_node_getter(commit1),
264 source_node_getter=_node_getter(commit1),
264 target_node_getter=_node_getter(commit2),
265 target_node_getter=_node_getter(commit2),
265 ).render_patchset(_parsed, commit1.raw_id, commit2.raw_id)
266 ).render_patchset(_parsed, commit1.raw_id, commit2.raw_id)
266 c.changes[commit.raw_id] = diffset
267 c.changes[commit.raw_id] = diffset
267 else:
268 else:
268 # downloads/raw we only need RAW diff nothing else
269 # downloads/raw we only need RAW diff nothing else
269 diff = diff_processor.as_raw()
270 diff = diff_processor.as_raw()
270 c.changes[commit.raw_id] = [None, None, None, None, diff, None, None]
271 c.changes[commit.raw_id] = [None, None, None, None, diff, None, None]
271
272
272 # sort comments by how they were generated
273 # sort comments by how they were generated
273 c.comments = sorted(c.comments, key=lambda x: x.comment_id)
274 c.comments = sorted(c.comments, key=lambda x: x.comment_id)
274
275
275 # count inline comments
276 # count inline comments
276 for __, lines in c.inline_comments:
277 for __, lines in c.inline_comments:
277 for comments in lines.values():
278 for comments in lines.values():
278 c.inline_cnt += len(comments)
279 c.inline_cnt += len(comments)
279
280
280 if len(c.commit_ranges) == 1:
281 if len(c.commit_ranges) == 1:
281 c.commit = c.commit_ranges[0]
282 c.commit = c.commit_ranges[0]
282 c.parent_tmpl = ''.join(
283 c.parent_tmpl = ''.join(
283 '# Parent %s\n' % x.raw_id for x in c.commit.parents)
284 '# Parent %s\n' % x.raw_id for x in c.commit.parents)
284 if method == 'download':
285 if method == 'download':
285 response.content_type = 'text/plain'
286 response.content_type = 'text/plain'
286 response.content_disposition = (
287 response.content_disposition = (
287 'attachment; filename=%s.diff' % commit_id_range[:12])
288 'attachment; filename=%s.diff' % commit_id_range[:12])
288 return diff
289 return diff
289 elif method == 'patch':
290 elif method == 'patch':
290 response.content_type = 'text/plain'
291 response.content_type = 'text/plain'
291 c.diff = safe_unicode(diff)
292 c.diff = safe_unicode(diff)
292 return render('changeset/patch_changeset.html')
293 return render('changeset/patch_changeset.html')
293 elif method == 'raw':
294 elif method == 'raw':
294 response.content_type = 'text/plain'
295 response.content_type = 'text/plain'
295 return diff
296 return diff
296 elif method == 'show':
297 elif method == 'show':
297 if len(c.commit_ranges) == 1:
298 if len(c.commit_ranges) == 1:
298 return render('changeset/changeset.html')
299 return render('changeset/changeset.html')
299 else:
300 else:
300 c.ancestor = None
301 c.ancestor = None
301 c.target_repo = c.rhodecode_db_repo
302 c.target_repo = c.rhodecode_db_repo
302 return render('changeset/changeset_range.html')
303 return render('changeset/changeset_range.html')
303
304
304 @LoginRequired()
305 @LoginRequired()
305 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
306 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
306 'repository.admin')
307 'repository.admin')
307 def index(self, revision, method='show'):
308 def index(self, revision, method='show'):
308 return self._index(revision, method=method)
309 return self._index(revision, method=method)
309
310
310 @LoginRequired()
311 @LoginRequired()
311 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
312 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
312 'repository.admin')
313 'repository.admin')
313 def changeset_raw(self, revision):
314 def changeset_raw(self, revision):
314 return self._index(revision, method='raw')
315 return self._index(revision, method='raw')
315
316
316 @LoginRequired()
317 @LoginRequired()
317 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
318 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
318 'repository.admin')
319 'repository.admin')
319 def changeset_patch(self, revision):
320 def changeset_patch(self, revision):
320 return self._index(revision, method='patch')
321 return self._index(revision, method='patch')
321
322
322 @LoginRequired()
323 @LoginRequired()
323 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
324 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
324 'repository.admin')
325 'repository.admin')
325 def changeset_download(self, revision):
326 def changeset_download(self, revision):
326 return self._index(revision, method='download')
327 return self._index(revision, method='download')
327
328
328 @LoginRequired()
329 @LoginRequired()
329 @NotAnonymous()
330 @NotAnonymous()
330 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
331 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
331 'repository.admin')
332 'repository.admin')
332 @auth.CSRFRequired()
333 @auth.CSRFRequired()
333 @jsonify
334 @jsonify
334 def comment(self, repo_name, revision):
335 def comment(self, repo_name, revision):
335 commit_id = revision
336 commit_id = revision
336 status = request.POST.get('changeset_status', None)
337 status = request.POST.get('changeset_status', None)
337 text = request.POST.get('text')
338 text = request.POST.get('text')
338 if status:
339 if status:
339 text = text or (_('Status change %(transition_icon)s %(status)s')
340 text = text or (_('Status change %(transition_icon)s %(status)s')
340 % {'transition_icon': '>',
341 % {'transition_icon': '>',
341 'status': ChangesetStatus.get_status_lbl(status)})
342 'status': ChangesetStatus.get_status_lbl(status)})
342
343
343 multi_commit_ids = filter(
344 multi_commit_ids = filter(
344 lambda s: s not in ['', None],
345 lambda s: s not in ['', None],
345 request.POST.get('commit_ids', '').split(','),)
346 request.POST.get('commit_ids', '').split(','),)
346
347
347 commit_ids = multi_commit_ids or [commit_id]
348 commit_ids = multi_commit_ids or [commit_id]
348 comment = None
349 comment = None
349 for current_id in filter(None, commit_ids):
350 for current_id in filter(None, commit_ids):
350 c.co = comment = ChangesetCommentsModel().create(
351 c.co = comment = ChangesetCommentsModel().create(
351 text=text,
352 text=text,
352 repo=c.rhodecode_db_repo.repo_id,
353 repo=c.rhodecode_db_repo.repo_id,
353 user=c.rhodecode_user.user_id,
354 user=c.rhodecode_user.user_id,
354 revision=current_id,
355 revision=current_id,
355 f_path=request.POST.get('f_path'),
356 f_path=request.POST.get('f_path'),
356 line_no=request.POST.get('line'),
357 line_no=request.POST.get('line'),
357 status_change=(ChangesetStatus.get_status_lbl(status)
358 status_change=(ChangesetStatus.get_status_lbl(status)
358 if status else None),
359 if status else None),
359 status_change_type=status
360 status_change_type=status
360 )
361 )
361 # get status if set !
362 # get status if set !
362 if status:
363 if status:
363 # if latest status was from pull request and it's closed
364 # if latest status was from pull request and it's closed
364 # disallow changing status !
365 # disallow changing status !
365 # dont_allow_on_closed_pull_request = True !
366 # dont_allow_on_closed_pull_request = True !
366
367
367 try:
368 try:
368 ChangesetStatusModel().set_status(
369 ChangesetStatusModel().set_status(
369 c.rhodecode_db_repo.repo_id,
370 c.rhodecode_db_repo.repo_id,
370 status,
371 status,
371 c.rhodecode_user.user_id,
372 c.rhodecode_user.user_id,
372 comment,
373 comment,
373 revision=current_id,
374 revision=current_id,
374 dont_allow_on_closed_pull_request=True
375 dont_allow_on_closed_pull_request=True
375 )
376 )
376 except StatusChangeOnClosedPullRequestError:
377 except StatusChangeOnClosedPullRequestError:
377 msg = _('Changing the status of a commit associated with '
378 msg = _('Changing the status of a commit associated with '
378 'a closed pull request is not allowed')
379 'a closed pull request is not allowed')
379 log.exception(msg)
380 log.exception(msg)
380 h.flash(msg, category='warning')
381 h.flash(msg, category='warning')
381 return redirect(h.url(
382 return redirect(h.url(
382 'changeset_home', repo_name=repo_name,
383 'changeset_home', repo_name=repo_name,
383 revision=current_id))
384 revision=current_id))
384
385
385 # finalize, commit and redirect
386 # finalize, commit and redirect
386 Session().commit()
387 Session().commit()
387
388
388 data = {
389 data = {
389 'target_id': h.safeid(h.safe_unicode(request.POST.get('f_path'))),
390 'target_id': h.safeid(h.safe_unicode(request.POST.get('f_path'))),
390 }
391 }
391 if comment:
392 if comment:
392 data.update(comment.get_dict())
393 data.update(comment.get_dict())
393 data.update({'rendered_text':
394 data.update({'rendered_text':
394 render('changeset/changeset_comment_block.html')})
395 render('changeset/changeset_comment_block.html')})
395
396
396 return data
397 return data
397
398
398 @LoginRequired()
399 @LoginRequired()
399 @NotAnonymous()
400 @NotAnonymous()
400 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
401 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
401 'repository.admin')
402 'repository.admin')
402 @auth.CSRFRequired()
403 @auth.CSRFRequired()
403 def preview_comment(self):
404 def preview_comment(self):
404 # Technically a CSRF token is not needed as no state changes with this
405 # Technically a CSRF token is not needed as no state changes with this
405 # call. However, as this is a POST is better to have it, so automated
406 # call. However, as this is a POST is better to have it, so automated
406 # tools don't flag it as potential CSRF.
407 # tools don't flag it as potential CSRF.
407 # Post is required because the payload could be bigger than the maximum
408 # Post is required because the payload could be bigger than the maximum
408 # allowed by GET.
409 # allowed by GET.
409 if not request.environ.get('HTTP_X_PARTIAL_XHR'):
410 if not request.environ.get('HTTP_X_PARTIAL_XHR'):
410 raise HTTPBadRequest()
411 raise HTTPBadRequest()
411 text = request.POST.get('text')
412 text = request.POST.get('text')
412 renderer = request.POST.get('renderer') or 'rst'
413 renderer = request.POST.get('renderer') or 'rst'
413 if text:
414 if text:
414 return h.render(text, renderer=renderer, mentions=True)
415 return h.render(text, renderer=renderer, mentions=True)
415 return ''
416 return ''
416
417
417 @LoginRequired()
418 @LoginRequired()
418 @NotAnonymous()
419 @NotAnonymous()
419 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
420 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
420 'repository.admin')
421 'repository.admin')
421 @auth.CSRFRequired()
422 @auth.CSRFRequired()
422 @jsonify
423 @jsonify
423 def delete_comment(self, repo_name, comment_id):
424 def delete_comment(self, repo_name, comment_id):
424 comment = ChangesetComment.get(comment_id)
425 comment = ChangesetComment.get(comment_id)
425 owner = (comment.author.user_id == c.rhodecode_user.user_id)
426 owner = (comment.author.user_id == c.rhodecode_user.user_id)
426 is_repo_admin = h.HasRepoPermissionAny('repository.admin')(c.repo_name)
427 is_repo_admin = h.HasRepoPermissionAny('repository.admin')(c.repo_name)
427 if h.HasPermissionAny('hg.admin')() or is_repo_admin or owner:
428 if h.HasPermissionAny('hg.admin')() or is_repo_admin or owner:
428 ChangesetCommentsModel().delete(comment=comment)
429 ChangesetCommentsModel().delete(comment=comment)
429 Session().commit()
430 Session().commit()
430 return True
431 return True
431 else:
432 else:
432 raise HTTPForbidden()
433 raise HTTPForbidden()
433
434
434 @LoginRequired()
435 @LoginRequired()
435 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
436 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
436 'repository.admin')
437 'repository.admin')
437 @jsonify
438 @jsonify
438 def changeset_info(self, repo_name, revision):
439 def changeset_info(self, repo_name, revision):
439 if request.is_xhr:
440 if request.is_xhr:
440 try:
441 try:
441 return c.rhodecode_repo.get_commit(commit_id=revision)
442 return c.rhodecode_repo.get_commit(commit_id=revision)
442 except CommitDoesNotExistError as e:
443 except CommitDoesNotExistError as e:
443 return EmptyCommit(message=str(e))
444 return EmptyCommit(message=str(e))
444 else:
445 else:
445 raise HTTPBadRequest()
446 raise HTTPBadRequest()
446
447
447 @LoginRequired()
448 @LoginRequired()
448 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
449 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
449 'repository.admin')
450 'repository.admin')
450 @jsonify
451 @jsonify
451 def changeset_children(self, repo_name, revision):
452 def changeset_children(self, repo_name, revision):
452 if request.is_xhr:
453 if request.is_xhr:
453 commit = c.rhodecode_repo.get_commit(commit_id=revision)
454 commit = c.rhodecode_repo.get_commit(commit_id=revision)
454 result = {"results": commit.children}
455 result = {"results": commit.children}
455 return result
456 return result
456 else:
457 else:
457 raise HTTPBadRequest()
458 raise HTTPBadRequest()
458
459
459 @LoginRequired()
460 @LoginRequired()
460 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
461 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
461 'repository.admin')
462 'repository.admin')
462 @jsonify
463 @jsonify
463 def changeset_parents(self, repo_name, revision):
464 def changeset_parents(self, repo_name, revision):
464 if request.is_xhr:
465 if request.is_xhr:
465 commit = c.rhodecode_repo.get_commit(commit_id=revision)
466 commit = c.rhodecode_repo.get_commit(commit_id=revision)
466 result = {"results": commit.parents}
467 result = {"results": commit.parents}
467 return result
468 return result
468 else:
469 else:
469 raise HTTPBadRequest()
470 raise HTTPBadRequest()
@@ -1,263 +1,264 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2012-2016 RhodeCode GmbH
3 # Copyright (C) 2012-2016 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21 """
21 """
22 Compare controller for showing differences between two commits/refs/tags etc.
22 Compare controller for showing differences between two commits/refs/tags etc.
23 """
23 """
24
24
25 import logging
25 import logging
26
26
27 from webob.exc import HTTPBadRequest
27 from webob.exc import HTTPBadRequest
28 from pylons import request, tmpl_context as c, url
28 from pylons import request, tmpl_context as c, url
29 from pylons.controllers.util import redirect
29 from pylons.controllers.util import redirect
30 from pylons.i18n.translation import _
30 from pylons.i18n.translation import _
31
31
32 from rhodecode.controllers.utils import parse_path_ref, get_commit_from_ref_name
32 from rhodecode.controllers.utils import parse_path_ref, get_commit_from_ref_name
33 from rhodecode.lib import helpers as h
33 from rhodecode.lib import helpers as h
34 from rhodecode.lib import diffs, codeblocks
34 from rhodecode.lib import diffs, codeblocks
35 from rhodecode.lib.auth import LoginRequired, HasRepoPermissionAnyDecorator
35 from rhodecode.lib.auth import LoginRequired, HasRepoPermissionAnyDecorator
36 from rhodecode.lib.base import BaseRepoController, render
36 from rhodecode.lib.base import BaseRepoController, render
37 from rhodecode.lib.utils import safe_str
37 from rhodecode.lib.utils import safe_str
38 from rhodecode.lib.utils2 import safe_unicode, str2bool
38 from rhodecode.lib.utils2 import safe_unicode, str2bool
39 from rhodecode.lib.vcs.exceptions import (
39 from rhodecode.lib.vcs.exceptions import (
40 EmptyRepositoryError, RepositoryError, RepositoryRequirementError,
40 EmptyRepositoryError, RepositoryError, RepositoryRequirementError,
41 NodeDoesNotExistError)
41 NodeDoesNotExistError)
42 from rhodecode.model.db import Repository, ChangesetStatus
42 from rhodecode.model.db import Repository, ChangesetStatus
43
43
44 log = logging.getLogger(__name__)
44 log = logging.getLogger(__name__)
45
45
46
46
47 class CompareController(BaseRepoController):
47 class CompareController(BaseRepoController):
48
48
49 def __before__(self):
49 def __before__(self):
50 super(CompareController, self).__before__()
50 super(CompareController, self).__before__()
51
51
52 def _get_commit_or_redirect(
52 def _get_commit_or_redirect(
53 self, ref, ref_type, repo, redirect_after=True, partial=False):
53 self, ref, ref_type, repo, redirect_after=True, partial=False):
54 """
54 """
55 This is a safe way to get a commit. If an error occurs it
55 This is a safe way to get a commit. If an error occurs it
56 redirects to a commit with a proper message. If partial is set
56 redirects to a commit with a proper message. If partial is set
57 then it does not do redirect raise and throws an exception instead.
57 then it does not do redirect raise and throws an exception instead.
58 """
58 """
59 try:
59 try:
60 return get_commit_from_ref_name(repo, safe_str(ref), ref_type)
60 return get_commit_from_ref_name(repo, safe_str(ref), ref_type)
61 except EmptyRepositoryError:
61 except EmptyRepositoryError:
62 if not redirect_after:
62 if not redirect_after:
63 return repo.scm_instance().EMPTY_COMMIT
63 return repo.scm_instance().EMPTY_COMMIT
64 h.flash(h.literal(_('There are no commits yet')),
64 h.flash(h.literal(_('There are no commits yet')),
65 category='warning')
65 category='warning')
66 redirect(url('summary_home', repo_name=repo.repo_name))
66 redirect(url('summary_home', repo_name=repo.repo_name))
67
67
68 except RepositoryError as e:
68 except RepositoryError as e:
69 msg = safe_str(e)
69 msg = safe_str(e)
70 log.exception(msg)
70 log.exception(msg)
71 h.flash(msg, category='warning')
71 h.flash(msg, category='warning')
72 if not partial:
72 if not partial:
73 redirect(h.url('summary_home', repo_name=repo.repo_name))
73 redirect(h.url('summary_home', repo_name=repo.repo_name))
74 raise HTTPBadRequest()
74 raise HTTPBadRequest()
75
75
76 @LoginRequired()
76 @LoginRequired()
77 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
77 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
78 'repository.admin')
78 'repository.admin')
79 def index(self, repo_name):
79 def index(self, repo_name):
80 c.compare_home = True
80 c.compare_home = True
81 c.commit_ranges = []
81 c.commit_ranges = []
82 c.diffset = None
82 c.diffset = None
83 c.limited_diff = False
83 c.limited_diff = False
84 source_repo = c.rhodecode_db_repo.repo_name
84 source_repo = c.rhodecode_db_repo.repo_name
85 target_repo = request.GET.get('target_repo', source_repo)
85 target_repo = request.GET.get('target_repo', source_repo)
86 c.source_repo = Repository.get_by_repo_name(source_repo)
86 c.source_repo = Repository.get_by_repo_name(source_repo)
87 c.target_repo = Repository.get_by_repo_name(target_repo)
87 c.target_repo = Repository.get_by_repo_name(target_repo)
88 c.source_ref = c.target_ref = _('Select commit')
88 c.source_ref = c.target_ref = _('Select commit')
89 c.source_ref_type = ""
89 c.source_ref_type = ""
90 c.target_ref_type = ""
90 c.target_ref_type = ""
91 c.commit_statuses = ChangesetStatus.STATUSES
91 c.commit_statuses = ChangesetStatus.STATUSES
92 c.preview_mode = False
92 c.preview_mode = False
93 return render('compare/compare_diff.html')
93 return render('compare/compare_diff.html')
94
94
95 @LoginRequired()
95 @LoginRequired()
96 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
96 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
97 'repository.admin')
97 'repository.admin')
98 def compare(self, repo_name, source_ref_type, source_ref,
98 def compare(self, repo_name, source_ref_type, source_ref,
99 target_ref_type, target_ref):
99 target_ref_type, target_ref):
100 # source_ref will be evaluated in source_repo
100 # source_ref will be evaluated in source_repo
101 source_repo_name = c.rhodecode_db_repo.repo_name
101 source_repo_name = c.rhodecode_db_repo.repo_name
102 source_path, source_id = parse_path_ref(source_ref)
102 source_path, source_id = parse_path_ref(source_ref)
103
103
104 # target_ref will be evaluated in target_repo
104 # target_ref will be evaluated in target_repo
105 target_repo_name = request.GET.get('target_repo', source_repo_name)
105 target_repo_name = request.GET.get('target_repo', source_repo_name)
106 target_path, target_id = parse_path_ref(target_ref)
106 target_path, target_id = parse_path_ref(target_ref)
107
107
108 c.commit_statuses = ChangesetStatus.STATUSES
108 c.commit_statuses = ChangesetStatus.STATUSES
109
109
110 # if merge is True
110 # if merge is True
111 # Show what changes since the shared ancestor commit of target/source
111 # Show what changes since the shared ancestor commit of target/source
112 # the source would get if it was merged with target. Only commits
112 # the source would get if it was merged with target. Only commits
113 # which are in target but not in source will be shown.
113 # which are in target but not in source will be shown.
114 merge = str2bool(request.GET.get('merge'))
114 merge = str2bool(request.GET.get('merge'))
115 # if merge is False
115 # if merge is False
116 # Show a raw diff of source/target refs even if no ancestor exists
116 # Show a raw diff of source/target refs even if no ancestor exists
117
117
118
118
119 # c.fulldiff disables cut_off_limit
119 # c.fulldiff disables cut_off_limit
120 c.fulldiff = str2bool(request.GET.get('fulldiff'))
120 c.fulldiff = str2bool(request.GET.get('fulldiff'))
121
121
122 # if partial, returns just compare_commits.html (commits log)
122 # if partial, returns just compare_commits.html (commits log)
123 partial = request.is_xhr
123 partial = request.is_xhr
124
124
125 # swap url for compare_diff page
125 # swap url for compare_diff page
126 c.swap_url = h.url(
126 c.swap_url = h.url(
127 'compare_url',
127 'compare_url',
128 repo_name=target_repo_name,
128 repo_name=target_repo_name,
129 source_ref_type=target_ref_type,
129 source_ref_type=target_ref_type,
130 source_ref=target_ref,
130 source_ref=target_ref,
131 target_repo=source_repo_name,
131 target_repo=source_repo_name,
132 target_ref_type=source_ref_type,
132 target_ref_type=source_ref_type,
133 target_ref=source_ref,
133 target_ref=source_ref,
134 merge=merge and '1' or '')
134 merge=merge and '1' or '')
135
135
136 source_repo = Repository.get_by_repo_name(source_repo_name)
136 source_repo = Repository.get_by_repo_name(source_repo_name)
137 target_repo = Repository.get_by_repo_name(target_repo_name)
137 target_repo = Repository.get_by_repo_name(target_repo_name)
138
138
139 if source_repo is None:
139 if source_repo is None:
140 msg = _('Could not find the original repo: %(repo)s') % {
140 msg = _('Could not find the original repo: %(repo)s') % {
141 'repo': source_repo}
141 'repo': source_repo}
142
142
143 log.error(msg)
143 log.error(msg)
144 h.flash(msg, category='error')
144 h.flash(msg, category='error')
145 return redirect(url('compare_home', repo_name=c.repo_name))
145 return redirect(url('compare_home', repo_name=c.repo_name))
146
146
147 if target_repo is None:
147 if target_repo is None:
148 msg = _('Could not find the other repo: %(repo)s') % {
148 msg = _('Could not find the other repo: %(repo)s') % {
149 'repo': target_repo_name}
149 'repo': target_repo_name}
150 log.error(msg)
150 log.error(msg)
151 h.flash(msg, category='error')
151 h.flash(msg, category='error')
152 return redirect(url('compare_home', repo_name=c.repo_name))
152 return redirect(url('compare_home', repo_name=c.repo_name))
153
153
154 source_alias = source_repo.scm_instance().alias
154 source_alias = source_repo.scm_instance().alias
155 target_alias = target_repo.scm_instance().alias
155 target_alias = target_repo.scm_instance().alias
156 if source_alias != target_alias:
156 if source_alias != target_alias:
157 msg = _('The comparison of two different kinds of remote repos '
157 msg = _('The comparison of two different kinds of remote repos '
158 'is not available')
158 'is not available')
159 log.error(msg)
159 log.error(msg)
160 h.flash(msg, category='error')
160 h.flash(msg, category='error')
161 return redirect(url('compare_home', repo_name=c.repo_name))
161 return redirect(url('compare_home', repo_name=c.repo_name))
162
162
163 source_commit = self._get_commit_or_redirect(
163 source_commit = self._get_commit_or_redirect(
164 ref=source_id, ref_type=source_ref_type, repo=source_repo,
164 ref=source_id, ref_type=source_ref_type, repo=source_repo,
165 partial=partial)
165 partial=partial)
166 target_commit = self._get_commit_or_redirect(
166 target_commit = self._get_commit_or_redirect(
167 ref=target_id, ref_type=target_ref_type, repo=target_repo,
167 ref=target_id, ref_type=target_ref_type, repo=target_repo,
168 partial=partial)
168 partial=partial)
169
169
170 c.compare_home = False
170 c.compare_home = False
171 c.source_repo = source_repo
171 c.source_repo = source_repo
172 c.target_repo = target_repo
172 c.target_repo = target_repo
173 c.source_ref = source_ref
173 c.source_ref = source_ref
174 c.target_ref = target_ref
174 c.target_ref = target_ref
175 c.source_ref_type = source_ref_type
175 c.source_ref_type = source_ref_type
176 c.target_ref_type = target_ref_type
176 c.target_ref_type = target_ref_type
177
177
178 source_scm = source_repo.scm_instance()
178 source_scm = source_repo.scm_instance()
179 target_scm = target_repo.scm_instance()
179 target_scm = target_repo.scm_instance()
180
180
181 pre_load = ["author", "branch", "date", "message"]
181 pre_load = ["author", "branch", "date", "message"]
182 c.ancestor = None
182 c.ancestor = None
183 try:
183 try:
184 c.commit_ranges = source_scm.compare(
184 c.commit_ranges = source_scm.compare(
185 source_commit.raw_id, target_commit.raw_id,
185 source_commit.raw_id, target_commit.raw_id,
186 target_scm, merge, pre_load=pre_load)
186 target_scm, merge, pre_load=pre_load)
187 if merge:
187 if merge:
188 c.ancestor = source_scm.get_common_ancestor(
188 c.ancestor = source_scm.get_common_ancestor(
189 source_commit.raw_id, target_commit.raw_id, target_scm)
189 source_commit.raw_id, target_commit.raw_id, target_scm)
190 except RepositoryRequirementError:
190 except RepositoryRequirementError:
191 msg = _('Could not compare repos with different '
191 msg = _('Could not compare repos with different '
192 'large file settings')
192 'large file settings')
193 log.error(msg)
193 log.error(msg)
194 if partial:
194 if partial:
195 return msg
195 return msg
196 h.flash(msg, category='error')
196 h.flash(msg, category='error')
197 return redirect(url('compare_home', repo_name=c.repo_name))
197 return redirect(url('compare_home', repo_name=c.repo_name))
198
198
199 c.statuses = c.rhodecode_db_repo.statuses(
199 c.statuses = c.rhodecode_db_repo.statuses(
200 [x.raw_id for x in c.commit_ranges])
200 [x.raw_id for x in c.commit_ranges])
201
201
202 if partial: # for PR ajax commits loader
202 if partial: # for PR ajax commits loader
203 if not c.ancestor:
203 if not c.ancestor:
204 return '' # cannot merge if there is no ancestor
204 return '' # cannot merge if there is no ancestor
205 return render('compare/compare_commits.html')
205 return render('compare/compare_commits.html')
206
206
207 if c.ancestor:
207 if c.ancestor:
208 # case we want a simple diff without incoming commits,
208 # case we want a simple diff without incoming commits,
209 # previewing what will be merged.
209 # previewing what will be merged.
210 # Make the diff on target repo (which is known to have target_ref)
210 # Make the diff on target repo (which is known to have target_ref)
211 log.debug('Using ancestor %s as source_ref instead of %s'
211 log.debug('Using ancestor %s as source_ref instead of %s'
212 % (c.ancestor, source_ref))
212 % (c.ancestor, source_ref))
213 source_repo = target_repo
213 source_repo = target_repo
214 source_commit = target_repo.get_commit(commit_id=c.ancestor)
214 source_commit = target_repo.get_commit(commit_id=c.ancestor)
215
215
216 # diff_limit will cut off the whole diff if the limit is applied
216 # diff_limit will cut off the whole diff if the limit is applied
217 # otherwise it will just hide the big files from the front-end
217 # otherwise it will just hide the big files from the front-end
218 diff_limit = self.cut_off_limit_diff
218 diff_limit = self.cut_off_limit_diff
219 file_limit = self.cut_off_limit_file
219 file_limit = self.cut_off_limit_file
220
220
221 log.debug('calculating diff between '
221 log.debug('calculating diff between '
222 'source_ref:%s and target_ref:%s for repo `%s`',
222 'source_ref:%s and target_ref:%s for repo `%s`',
223 source_commit, target_commit,
223 source_commit, target_commit,
224 safe_unicode(source_repo.scm_instance().path))
224 safe_unicode(source_repo.scm_instance().path))
225
225
226 if source_commit.repository != target_commit.repository:
226 if source_commit.repository != target_commit.repository:
227 msg = _(
227 msg = _(
228 "Repositories unrelated. "
228 "Repositories unrelated. "
229 "Cannot compare commit %(commit1)s from repository %(repo1)s "
229 "Cannot compare commit %(commit1)s from repository %(repo1)s "
230 "with commit %(commit2)s from repository %(repo2)s.") % {
230 "with commit %(commit2)s from repository %(repo2)s.") % {
231 'commit1': h.show_id(source_commit),
231 'commit1': h.show_id(source_commit),
232 'repo1': source_repo.repo_name,
232 'repo1': source_repo.repo_name,
233 'commit2': h.show_id(target_commit),
233 'commit2': h.show_id(target_commit),
234 'repo2': target_repo.repo_name,
234 'repo2': target_repo.repo_name,
235 }
235 }
236 h.flash(msg, category='error')
236 h.flash(msg, category='error')
237 raise HTTPBadRequest()
237 raise HTTPBadRequest()
238
238
239 txtdiff = source_repo.scm_instance().get_diff(
239 txtdiff = source_repo.scm_instance().get_diff(
240 commit1=source_commit, commit2=target_commit,
240 commit1=source_commit, commit2=target_commit,
241 path1=source_path, path=target_path)
241 path1=source_path, path=target_path)
242 diff_processor = diffs.DiffProcessor(
242 diff_processor = diffs.DiffProcessor(
243 txtdiff, format='newdiff', diff_limit=diff_limit,
243 txtdiff, format='newdiff', diff_limit=diff_limit,
244 file_limit=file_limit, show_full_diff=c.fulldiff)
244 file_limit=file_limit, show_full_diff=c.fulldiff)
245 _parsed = diff_processor.prepare()
245 _parsed = diff_processor.prepare()
246
246
247 def _node_getter(commit):
247 def _node_getter(commit):
248 """ Returns a function that returns a node for a commit or None """
248 """ Returns a function that returns a node for a commit or None """
249 def get_node(fname):
249 def get_node(fname):
250 try:
250 try:
251 return commit.get_node(fname)
251 return commit.get_node(fname)
252 except NodeDoesNotExistError:
252 except NodeDoesNotExistError:
253 return None
253 return None
254 return get_node
254 return get_node
255
255
256 c.diffset = codeblocks.DiffSet(
256 c.diffset = codeblocks.DiffSet(
257 repo_name=source_repo.repo_name,
257 source_node_getter=_node_getter(source_commit),
258 source_node_getter=_node_getter(source_commit),
258 target_node_getter=_node_getter(target_commit),
259 target_node_getter=_node_getter(target_commit),
259 ).render_patchset(_parsed, source_ref, target_ref)
260 ).render_patchset(_parsed, source_ref, target_ref)
260
261
261 c.preview_mode = merge
262 c.preview_mode = merge
262
263
263 return render('compare/compare_diff.html')
264 return render('compare/compare_diff.html')
@@ -1,641 +1,642 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2011-2016 RhodeCode GmbH
3 # Copyright (C) 2011-2016 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21 import logging
21 import logging
22 import difflib
22 import difflib
23 from itertools import groupby
23 from itertools import groupby
24
24
25 from pygments import lex
25 from pygments import lex
26 from pygments.formatters.html import _get_ttype_class as pygment_token_class
26 from pygments.formatters.html import _get_ttype_class as pygment_token_class
27 from rhodecode.lib.helpers import (
27 from rhodecode.lib.helpers import (
28 get_lexer_for_filenode, get_lexer_safe, html_escape)
28 get_lexer_for_filenode, get_lexer_safe, html_escape)
29 from rhodecode.lib.utils2 import AttributeDict
29 from rhodecode.lib.utils2 import AttributeDict
30 from rhodecode.lib.vcs.nodes import FileNode
30 from rhodecode.lib.vcs.nodes import FileNode
31 from rhodecode.lib.diff_match_patch import diff_match_patch
31 from rhodecode.lib.diff_match_patch import diff_match_patch
32 from rhodecode.lib.diffs import LimitedDiffContainer
32 from rhodecode.lib.diffs import LimitedDiffContainer
33 from pygments.lexers import get_lexer_by_name
33 from pygments.lexers import get_lexer_by_name
34
34
35 plain_text_lexer = get_lexer_by_name(
35 plain_text_lexer = get_lexer_by_name(
36 'text', stripall=False, stripnl=False, ensurenl=False)
36 'text', stripall=False, stripnl=False, ensurenl=False)
37
37
38
38
39 log = logging.getLogger()
39 log = logging.getLogger()
40
40
41
41
42 def filenode_as_lines_tokens(filenode, lexer=None):
42 def filenode_as_lines_tokens(filenode, lexer=None):
43 lexer = lexer or get_lexer_for_filenode(filenode)
43 lexer = lexer or get_lexer_for_filenode(filenode)
44 log.debug('Generating file node pygment tokens for %s, %s', lexer, filenode)
44 log.debug('Generating file node pygment tokens for %s, %s', lexer, filenode)
45 tokens = tokenize_string(filenode.content, lexer)
45 tokens = tokenize_string(filenode.content, lexer)
46 lines = split_token_stream(tokens, split_string='\n')
46 lines = split_token_stream(tokens, split_string='\n')
47 rv = list(lines)
47 rv = list(lines)
48 return rv
48 return rv
49
49
50
50
51 def tokenize_string(content, lexer):
51 def tokenize_string(content, lexer):
52 """
52 """
53 Use pygments to tokenize some content based on a lexer
53 Use pygments to tokenize some content based on a lexer
54 ensuring all original new lines and whitespace is preserved
54 ensuring all original new lines and whitespace is preserved
55 """
55 """
56
56
57 lexer.stripall = False
57 lexer.stripall = False
58 lexer.stripnl = False
58 lexer.stripnl = False
59 lexer.ensurenl = False
59 lexer.ensurenl = False
60 for token_type, token_text in lex(content, lexer):
60 for token_type, token_text in lex(content, lexer):
61 yield pygment_token_class(token_type), token_text
61 yield pygment_token_class(token_type), token_text
62
62
63
63
64 def split_token_stream(tokens, split_string=u'\n'):
64 def split_token_stream(tokens, split_string=u'\n'):
65 """
65 """
66 Take a list of (TokenType, text) tuples and split them by a string
66 Take a list of (TokenType, text) tuples and split them by a string
67
67
68 >>> split_token_stream([(TEXT, 'some\ntext'), (TEXT, 'more\n')])
68 >>> split_token_stream([(TEXT, 'some\ntext'), (TEXT, 'more\n')])
69 [(TEXT, 'some'), (TEXT, 'text'),
69 [(TEXT, 'some'), (TEXT, 'text'),
70 (TEXT, 'more'), (TEXT, 'text')]
70 (TEXT, 'more'), (TEXT, 'text')]
71 """
71 """
72
72
73 buffer = []
73 buffer = []
74 for token_class, token_text in tokens:
74 for token_class, token_text in tokens:
75 parts = token_text.split(split_string)
75 parts = token_text.split(split_string)
76 for part in parts[:-1]:
76 for part in parts[:-1]:
77 buffer.append((token_class, part))
77 buffer.append((token_class, part))
78 yield buffer
78 yield buffer
79 buffer = []
79 buffer = []
80
80
81 buffer.append((token_class, parts[-1]))
81 buffer.append((token_class, parts[-1]))
82
82
83 if buffer:
83 if buffer:
84 yield buffer
84 yield buffer
85
85
86
86
87 def filenode_as_annotated_lines_tokens(filenode):
87 def filenode_as_annotated_lines_tokens(filenode):
88 """
88 """
89 Take a file node and return a list of annotations => lines, if no annotation
89 Take a file node and return a list of annotations => lines, if no annotation
90 is found, it will be None.
90 is found, it will be None.
91
91
92 eg:
92 eg:
93
93
94 [
94 [
95 (annotation1, [
95 (annotation1, [
96 (1, line1_tokens_list),
96 (1, line1_tokens_list),
97 (2, line2_tokens_list),
97 (2, line2_tokens_list),
98 ]),
98 ]),
99 (annotation2, [
99 (annotation2, [
100 (3, line1_tokens_list),
100 (3, line1_tokens_list),
101 ]),
101 ]),
102 (None, [
102 (None, [
103 (4, line1_tokens_list),
103 (4, line1_tokens_list),
104 ]),
104 ]),
105 (annotation1, [
105 (annotation1, [
106 (5, line1_tokens_list),
106 (5, line1_tokens_list),
107 (6, line2_tokens_list),
107 (6, line2_tokens_list),
108 ])
108 ])
109 ]
109 ]
110 """
110 """
111
111
112 commit_cache = {} # cache commit_getter lookups
112 commit_cache = {} # cache commit_getter lookups
113
113
114 def _get_annotation(commit_id, commit_getter):
114 def _get_annotation(commit_id, commit_getter):
115 if commit_id not in commit_cache:
115 if commit_id not in commit_cache:
116 commit_cache[commit_id] = commit_getter()
116 commit_cache[commit_id] = commit_getter()
117 return commit_cache[commit_id]
117 return commit_cache[commit_id]
118
118
119 annotation_lookup = {
119 annotation_lookup = {
120 line_no: _get_annotation(commit_id, commit_getter)
120 line_no: _get_annotation(commit_id, commit_getter)
121 for line_no, commit_id, commit_getter, line_content
121 for line_no, commit_id, commit_getter, line_content
122 in filenode.annotate
122 in filenode.annotate
123 }
123 }
124
124
125 annotations_lines = ((annotation_lookup.get(line_no), line_no, tokens)
125 annotations_lines = ((annotation_lookup.get(line_no), line_no, tokens)
126 for line_no, tokens
126 for line_no, tokens
127 in enumerate(filenode_as_lines_tokens(filenode), 1))
127 in enumerate(filenode_as_lines_tokens(filenode), 1))
128
128
129 grouped_annotations_lines = groupby(annotations_lines, lambda x: x[0])
129 grouped_annotations_lines = groupby(annotations_lines, lambda x: x[0])
130
130
131 for annotation, group in grouped_annotations_lines:
131 for annotation, group in grouped_annotations_lines:
132 yield (
132 yield (
133 annotation, [(line_no, tokens)
133 annotation, [(line_no, tokens)
134 for (_, line_no, tokens) in group]
134 for (_, line_no, tokens) in group]
135 )
135 )
136
136
137
137
138 def render_tokenstream(tokenstream):
138 def render_tokenstream(tokenstream):
139 result = []
139 result = []
140 for token_class, token_ops_texts in rollup_tokenstream(tokenstream):
140 for token_class, token_ops_texts in rollup_tokenstream(tokenstream):
141
141
142 if token_class:
142 if token_class:
143 result.append(u'<span class="%s">' % token_class)
143 result.append(u'<span class="%s">' % token_class)
144 else:
144 else:
145 result.append(u'<span>')
145 result.append(u'<span>')
146
146
147 for op_tag, token_text in token_ops_texts:
147 for op_tag, token_text in token_ops_texts:
148
148
149 if op_tag:
149 if op_tag:
150 result.append(u'<%s>' % op_tag)
150 result.append(u'<%s>' % op_tag)
151
151
152 escaped_text = html_escape(token_text)
152 escaped_text = html_escape(token_text)
153
153
154 # TODO: dan: investigate showing hidden characters like space/nl/tab
154 # TODO: dan: investigate showing hidden characters like space/nl/tab
155 # escaped_text = escaped_text.replace(' ', '<sp> </sp>')
155 # escaped_text = escaped_text.replace(' ', '<sp> </sp>')
156 # escaped_text = escaped_text.replace('\n', '<nl>\n</nl>')
156 # escaped_text = escaped_text.replace('\n', '<nl>\n</nl>')
157 # escaped_text = escaped_text.replace('\t', '<tab>\t</tab>')
157 # escaped_text = escaped_text.replace('\t', '<tab>\t</tab>')
158
158
159 result.append(escaped_text)
159 result.append(escaped_text)
160
160
161 if op_tag:
161 if op_tag:
162 result.append(u'</%s>' % op_tag)
162 result.append(u'</%s>' % op_tag)
163
163
164 result.append(u'</span>')
164 result.append(u'</span>')
165
165
166 html = ''.join(result)
166 html = ''.join(result)
167 return html
167 return html
168
168
169
169
170 def rollup_tokenstream(tokenstream):
170 def rollup_tokenstream(tokenstream):
171 """
171 """
172 Group a token stream of the format:
172 Group a token stream of the format:
173
173
174 ('class', 'op', 'text')
174 ('class', 'op', 'text')
175 or
175 or
176 ('class', 'text')
176 ('class', 'text')
177
177
178 into
178 into
179
179
180 [('class1',
180 [('class1',
181 [('op1', 'text'),
181 [('op1', 'text'),
182 ('op2', 'text')]),
182 ('op2', 'text')]),
183 ('class2',
183 ('class2',
184 [('op3', 'text')])]
184 [('op3', 'text')])]
185
185
186 This is used to get the minimal tags necessary when
186 This is used to get the minimal tags necessary when
187 rendering to html eg for a token stream ie.
187 rendering to html eg for a token stream ie.
188
188
189 <span class="A"><ins>he</ins>llo</span>
189 <span class="A"><ins>he</ins>llo</span>
190 vs
190 vs
191 <span class="A"><ins>he</ins></span><span class="A">llo</span>
191 <span class="A"><ins>he</ins></span><span class="A">llo</span>
192
192
193 If a 2 tuple is passed in, the output op will be an empty string.
193 If a 2 tuple is passed in, the output op will be an empty string.
194
194
195 eg:
195 eg:
196
196
197 >>> rollup_tokenstream([('classA', '', 'h'),
197 >>> rollup_tokenstream([('classA', '', 'h'),
198 ('classA', 'del', 'ell'),
198 ('classA', 'del', 'ell'),
199 ('classA', '', 'o'),
199 ('classA', '', 'o'),
200 ('classB', '', ' '),
200 ('classB', '', ' '),
201 ('classA', '', 'the'),
201 ('classA', '', 'the'),
202 ('classA', '', 're'),
202 ('classA', '', 're'),
203 ])
203 ])
204
204
205 [('classA', [('', 'h'), ('del', 'ell'), ('', 'o')],
205 [('classA', [('', 'h'), ('del', 'ell'), ('', 'o')],
206 ('classB', [('', ' ')],
206 ('classB', [('', ' ')],
207 ('classA', [('', 'there')]]
207 ('classA', [('', 'there')]]
208
208
209 """
209 """
210 if tokenstream and len(tokenstream[0]) == 2:
210 if tokenstream and len(tokenstream[0]) == 2:
211 tokenstream = ((t[0], '', t[1]) for t in tokenstream)
211 tokenstream = ((t[0], '', t[1]) for t in tokenstream)
212
212
213 result = []
213 result = []
214 for token_class, op_list in groupby(tokenstream, lambda t: t[0]):
214 for token_class, op_list in groupby(tokenstream, lambda t: t[0]):
215 ops = []
215 ops = []
216 for token_op, token_text_list in groupby(op_list, lambda o: o[1]):
216 for token_op, token_text_list in groupby(op_list, lambda o: o[1]):
217 text_buffer = []
217 text_buffer = []
218 for t_class, t_op, t_text in token_text_list:
218 for t_class, t_op, t_text in token_text_list:
219 text_buffer.append(t_text)
219 text_buffer.append(t_text)
220 ops.append((token_op, ''.join(text_buffer)))
220 ops.append((token_op, ''.join(text_buffer)))
221 result.append((token_class, ops))
221 result.append((token_class, ops))
222 return result
222 return result
223
223
224
224
225 def tokens_diff(old_tokens, new_tokens, use_diff_match_patch=True):
225 def tokens_diff(old_tokens, new_tokens, use_diff_match_patch=True):
226 """
226 """
227 Converts a list of (token_class, token_text) tuples to a list of
227 Converts a list of (token_class, token_text) tuples to a list of
228 (token_class, token_op, token_text) tuples where token_op is one of
228 (token_class, token_op, token_text) tuples where token_op is one of
229 ('ins', 'del', '')
229 ('ins', 'del', '')
230
230
231 :param old_tokens: list of (token_class, token_text) tuples of old line
231 :param old_tokens: list of (token_class, token_text) tuples of old line
232 :param new_tokens: list of (token_class, token_text) tuples of new line
232 :param new_tokens: list of (token_class, token_text) tuples of new line
233 :param use_diff_match_patch: boolean, will use google's diff match patch
233 :param use_diff_match_patch: boolean, will use google's diff match patch
234 library which has options to 'smooth' out the character by character
234 library which has options to 'smooth' out the character by character
235 differences making nicer ins/del blocks
235 differences making nicer ins/del blocks
236 """
236 """
237
237
238 old_tokens_result = []
238 old_tokens_result = []
239 new_tokens_result = []
239 new_tokens_result = []
240
240
241 similarity = difflib.SequenceMatcher(None,
241 similarity = difflib.SequenceMatcher(None,
242 ''.join(token_text for token_class, token_text in old_tokens),
242 ''.join(token_text for token_class, token_text in old_tokens),
243 ''.join(token_text for token_class, token_text in new_tokens)
243 ''.join(token_text for token_class, token_text in new_tokens)
244 ).ratio()
244 ).ratio()
245
245
246 if similarity < 0.6: # return, the blocks are too different
246 if similarity < 0.6: # return, the blocks are too different
247 for token_class, token_text in old_tokens:
247 for token_class, token_text in old_tokens:
248 old_tokens_result.append((token_class, '', token_text))
248 old_tokens_result.append((token_class, '', token_text))
249 for token_class, token_text in new_tokens:
249 for token_class, token_text in new_tokens:
250 new_tokens_result.append((token_class, '', token_text))
250 new_tokens_result.append((token_class, '', token_text))
251 return old_tokens_result, new_tokens_result, similarity
251 return old_tokens_result, new_tokens_result, similarity
252
252
253 token_sequence_matcher = difflib.SequenceMatcher(None,
253 token_sequence_matcher = difflib.SequenceMatcher(None,
254 [x[1] for x in old_tokens],
254 [x[1] for x in old_tokens],
255 [x[1] for x in new_tokens])
255 [x[1] for x in new_tokens])
256
256
257 for tag, o1, o2, n1, n2 in token_sequence_matcher.get_opcodes():
257 for tag, o1, o2, n1, n2 in token_sequence_matcher.get_opcodes():
258 # check the differences by token block types first to give a more
258 # check the differences by token block types first to give a more
259 # nicer "block" level replacement vs character diffs
259 # nicer "block" level replacement vs character diffs
260
260
261 if tag == 'equal':
261 if tag == 'equal':
262 for token_class, token_text in old_tokens[o1:o2]:
262 for token_class, token_text in old_tokens[o1:o2]:
263 old_tokens_result.append((token_class, '', token_text))
263 old_tokens_result.append((token_class, '', token_text))
264 for token_class, token_text in new_tokens[n1:n2]:
264 for token_class, token_text in new_tokens[n1:n2]:
265 new_tokens_result.append((token_class, '', token_text))
265 new_tokens_result.append((token_class, '', token_text))
266 elif tag == 'delete':
266 elif tag == 'delete':
267 for token_class, token_text in old_tokens[o1:o2]:
267 for token_class, token_text in old_tokens[o1:o2]:
268 old_tokens_result.append((token_class, 'del', token_text))
268 old_tokens_result.append((token_class, 'del', token_text))
269 elif tag == 'insert':
269 elif tag == 'insert':
270 for token_class, token_text in new_tokens[n1:n2]:
270 for token_class, token_text in new_tokens[n1:n2]:
271 new_tokens_result.append((token_class, 'ins', token_text))
271 new_tokens_result.append((token_class, 'ins', token_text))
272 elif tag == 'replace':
272 elif tag == 'replace':
273 # if same type token blocks must be replaced, do a diff on the
273 # if same type token blocks must be replaced, do a diff on the
274 # characters in the token blocks to show individual changes
274 # characters in the token blocks to show individual changes
275
275
276 old_char_tokens = []
276 old_char_tokens = []
277 new_char_tokens = []
277 new_char_tokens = []
278 for token_class, token_text in old_tokens[o1:o2]:
278 for token_class, token_text in old_tokens[o1:o2]:
279 for char in token_text:
279 for char in token_text:
280 old_char_tokens.append((token_class, char))
280 old_char_tokens.append((token_class, char))
281
281
282 for token_class, token_text in new_tokens[n1:n2]:
282 for token_class, token_text in new_tokens[n1:n2]:
283 for char in token_text:
283 for char in token_text:
284 new_char_tokens.append((token_class, char))
284 new_char_tokens.append((token_class, char))
285
285
286 old_string = ''.join([token_text for
286 old_string = ''.join([token_text for
287 token_class, token_text in old_char_tokens])
287 token_class, token_text in old_char_tokens])
288 new_string = ''.join([token_text for
288 new_string = ''.join([token_text for
289 token_class, token_text in new_char_tokens])
289 token_class, token_text in new_char_tokens])
290
290
291 char_sequence = difflib.SequenceMatcher(
291 char_sequence = difflib.SequenceMatcher(
292 None, old_string, new_string)
292 None, old_string, new_string)
293 copcodes = char_sequence.get_opcodes()
293 copcodes = char_sequence.get_opcodes()
294 obuffer, nbuffer = [], []
294 obuffer, nbuffer = [], []
295
295
296 if use_diff_match_patch:
296 if use_diff_match_patch:
297 dmp = diff_match_patch()
297 dmp = diff_match_patch()
298 dmp.Diff_EditCost = 11 # TODO: dan: extract this to a setting
298 dmp.Diff_EditCost = 11 # TODO: dan: extract this to a setting
299 reps = dmp.diff_main(old_string, new_string)
299 reps = dmp.diff_main(old_string, new_string)
300 dmp.diff_cleanupEfficiency(reps)
300 dmp.diff_cleanupEfficiency(reps)
301
301
302 a, b = 0, 0
302 a, b = 0, 0
303 for op, rep in reps:
303 for op, rep in reps:
304 l = len(rep)
304 l = len(rep)
305 if op == 0:
305 if op == 0:
306 for i, c in enumerate(rep):
306 for i, c in enumerate(rep):
307 obuffer.append((old_char_tokens[a+i][0], '', c))
307 obuffer.append((old_char_tokens[a+i][0], '', c))
308 nbuffer.append((new_char_tokens[b+i][0], '', c))
308 nbuffer.append((new_char_tokens[b+i][0], '', c))
309 a += l
309 a += l
310 b += l
310 b += l
311 elif op == -1:
311 elif op == -1:
312 for i, c in enumerate(rep):
312 for i, c in enumerate(rep):
313 obuffer.append((old_char_tokens[a+i][0], 'del', c))
313 obuffer.append((old_char_tokens[a+i][0], 'del', c))
314 a += l
314 a += l
315 elif op == 1:
315 elif op == 1:
316 for i, c in enumerate(rep):
316 for i, c in enumerate(rep):
317 nbuffer.append((new_char_tokens[b+i][0], 'ins', c))
317 nbuffer.append((new_char_tokens[b+i][0], 'ins', c))
318 b += l
318 b += l
319 else:
319 else:
320 for ctag, co1, co2, cn1, cn2 in copcodes:
320 for ctag, co1, co2, cn1, cn2 in copcodes:
321 if ctag == 'equal':
321 if ctag == 'equal':
322 for token_class, token_text in old_char_tokens[co1:co2]:
322 for token_class, token_text in old_char_tokens[co1:co2]:
323 obuffer.append((token_class, '', token_text))
323 obuffer.append((token_class, '', token_text))
324 for token_class, token_text in new_char_tokens[cn1:cn2]:
324 for token_class, token_text in new_char_tokens[cn1:cn2]:
325 nbuffer.append((token_class, '', token_text))
325 nbuffer.append((token_class, '', token_text))
326 elif ctag == 'delete':
326 elif ctag == 'delete':
327 for token_class, token_text in old_char_tokens[co1:co2]:
327 for token_class, token_text in old_char_tokens[co1:co2]:
328 obuffer.append((token_class, 'del', token_text))
328 obuffer.append((token_class, 'del', token_text))
329 elif ctag == 'insert':
329 elif ctag == 'insert':
330 for token_class, token_text in new_char_tokens[cn1:cn2]:
330 for token_class, token_text in new_char_tokens[cn1:cn2]:
331 nbuffer.append((token_class, 'ins', token_text))
331 nbuffer.append((token_class, 'ins', token_text))
332 elif ctag == 'replace':
332 elif ctag == 'replace':
333 for token_class, token_text in old_char_tokens[co1:co2]:
333 for token_class, token_text in old_char_tokens[co1:co2]:
334 obuffer.append((token_class, 'del', token_text))
334 obuffer.append((token_class, 'del', token_text))
335 for token_class, token_text in new_char_tokens[cn1:cn2]:
335 for token_class, token_text in new_char_tokens[cn1:cn2]:
336 nbuffer.append((token_class, 'ins', token_text))
336 nbuffer.append((token_class, 'ins', token_text))
337
337
338 old_tokens_result.extend(obuffer)
338 old_tokens_result.extend(obuffer)
339 new_tokens_result.extend(nbuffer)
339 new_tokens_result.extend(nbuffer)
340
340
341 return old_tokens_result, new_tokens_result, similarity
341 return old_tokens_result, new_tokens_result, similarity
342
342
343
343
344 class DiffSet(object):
344 class DiffSet(object):
345 """
345 """
346 An object for parsing the diff result from diffs.DiffProcessor and
346 An object for parsing the diff result from diffs.DiffProcessor and
347 adding highlighting, side by side/unified renderings and line diffs
347 adding highlighting, side by side/unified renderings and line diffs
348 """
348 """
349
349
350 HL_REAL = 'REAL' # highlights using original file, slow
350 HL_REAL = 'REAL' # highlights using original file, slow
351 HL_FAST = 'FAST' # highlights using just the line, fast but not correct
351 HL_FAST = 'FAST' # highlights using just the line, fast but not correct
352 # in the case of multiline code
352 # in the case of multiline code
353 HL_NONE = 'NONE' # no highlighting, fastest
353 HL_NONE = 'NONE' # no highlighting, fastest
354
354
355 def __init__(self, highlight_mode=HL_REAL,
355 def __init__(self, highlight_mode=HL_REAL, repo_name=None,
356 source_node_getter=lambda filename: None,
356 source_node_getter=lambda filename: None,
357 target_node_getter=lambda filename: None,
357 target_node_getter=lambda filename: None,
358 source_nodes=None, target_nodes=None,
358 source_nodes=None, target_nodes=None,
359 max_file_size_limit=150 * 1024, # files over this size will
359 max_file_size_limit=150 * 1024, # files over this size will
360 # use fast highlighting
360 # use fast highlighting
361 ):
361 ):
362
362
363 self.highlight_mode = highlight_mode
363 self.highlight_mode = highlight_mode
364 self.highlighted_filenodes = {}
364 self.highlighted_filenodes = {}
365 self.source_node_getter = source_node_getter
365 self.source_node_getter = source_node_getter
366 self.target_node_getter = target_node_getter
366 self.target_node_getter = target_node_getter
367 self.source_nodes = source_nodes or {}
367 self.source_nodes = source_nodes or {}
368 self.target_nodes = target_nodes or {}
368 self.target_nodes = target_nodes or {}
369
369 self.repo_name = repo_name
370
370
371 self.max_file_size_limit = max_file_size_limit
371 self.max_file_size_limit = max_file_size_limit
372
372
373 def render_patchset(self, patchset, source_ref=None, target_ref=None):
373 def render_patchset(self, patchset, source_ref=None, target_ref=None):
374 diffset = AttributeDict(dict(
374 diffset = AttributeDict(dict(
375 lines_added=0,
375 lines_added=0,
376 lines_deleted=0,
376 lines_deleted=0,
377 changed_files=0,
377 changed_files=0,
378 files=[],
378 files=[],
379 limited_diff=isinstance(patchset, LimitedDiffContainer),
379 limited_diff=isinstance(patchset, LimitedDiffContainer),
380 repo_name=self.repo_name,
380 source_ref=source_ref,
381 source_ref=source_ref,
381 target_ref=target_ref,
382 target_ref=target_ref,
382 ))
383 ))
383 for patch in patchset:
384 for patch in patchset:
384 filediff = self.render_patch(patch)
385 filediff = self.render_patch(patch)
385 filediff.diffset = diffset
386 filediff.diffset = diffset
386 diffset.files.append(filediff)
387 diffset.files.append(filediff)
387 diffset.changed_files += 1
388 diffset.changed_files += 1
388 if not patch['stats']['binary']:
389 if not patch['stats']['binary']:
389 diffset.lines_added += patch['stats']['added']
390 diffset.lines_added += patch['stats']['added']
390 diffset.lines_deleted += patch['stats']['deleted']
391 diffset.lines_deleted += patch['stats']['deleted']
391
392
392 return diffset
393 return diffset
393
394
394 _lexer_cache = {}
395 _lexer_cache = {}
395 def _get_lexer_for_filename(self, filename):
396 def _get_lexer_for_filename(self, filename):
396 # cached because we might need to call it twice for source/target
397 # cached because we might need to call it twice for source/target
397 if filename not in self._lexer_cache:
398 if filename not in self._lexer_cache:
398 self._lexer_cache[filename] = get_lexer_safe(filepath=filename)
399 self._lexer_cache[filename] = get_lexer_safe(filepath=filename)
399 return self._lexer_cache[filename]
400 return self._lexer_cache[filename]
400
401
401 def render_patch(self, patch):
402 def render_patch(self, patch):
402 log.debug('rendering diff for %r' % patch['filename'])
403 log.debug('rendering diff for %r' % patch['filename'])
403
404
404 source_filename = patch['original_filename']
405 source_filename = patch['original_filename']
405 target_filename = patch['filename']
406 target_filename = patch['filename']
406
407
407 source_lexer = plain_text_lexer
408 source_lexer = plain_text_lexer
408 target_lexer = plain_text_lexer
409 target_lexer = plain_text_lexer
409
410
410 if not patch['stats']['binary']:
411 if not patch['stats']['binary']:
411 if self.highlight_mode == self.HL_REAL:
412 if self.highlight_mode == self.HL_REAL:
412 if (source_filename and patch['operation'] in ('D', 'M')
413 if (source_filename and patch['operation'] in ('D', 'M')
413 and source_filename not in self.source_nodes):
414 and source_filename not in self.source_nodes):
414 self.source_nodes[source_filename] = (
415 self.source_nodes[source_filename] = (
415 self.source_node_getter(source_filename))
416 self.source_node_getter(source_filename))
416
417
417 if (target_filename and patch['operation'] in ('A', 'M')
418 if (target_filename and patch['operation'] in ('A', 'M')
418 and target_filename not in self.target_nodes):
419 and target_filename not in self.target_nodes):
419 self.target_nodes[target_filename] = (
420 self.target_nodes[target_filename] = (
420 self.target_node_getter(target_filename))
421 self.target_node_getter(target_filename))
421
422
422 elif self.highlight_mode == self.HL_FAST:
423 elif self.highlight_mode == self.HL_FAST:
423 source_lexer = self._get_lexer_for_filename(source_filename)
424 source_lexer = self._get_lexer_for_filename(source_filename)
424 target_lexer = self._get_lexer_for_filename(target_filename)
425 target_lexer = self._get_lexer_for_filename(target_filename)
425
426
426 source_file = self.source_nodes.get(source_filename, source_filename)
427 source_file = self.source_nodes.get(source_filename, source_filename)
427 target_file = self.target_nodes.get(target_filename, target_filename)
428 target_file = self.target_nodes.get(target_filename, target_filename)
428
429
429 source_filenode, target_filenode = None, None
430 source_filenode, target_filenode = None, None
430
431
431 # TODO: dan: FileNode.lexer works on the content of the file - which
432 # TODO: dan: FileNode.lexer works on the content of the file - which
432 # can be slow - issue #4289 explains a lexer clean up - which once
433 # can be slow - issue #4289 explains a lexer clean up - which once
433 # done can allow caching a lexer for a filenode to avoid the file lookup
434 # done can allow caching a lexer for a filenode to avoid the file lookup
434 if isinstance(source_file, FileNode):
435 if isinstance(source_file, FileNode):
435 source_filenode = source_file
436 source_filenode = source_file
436 source_lexer = source_file.lexer
437 source_lexer = source_file.lexer
437 if isinstance(target_file, FileNode):
438 if isinstance(target_file, FileNode):
438 target_filenode = target_file
439 target_filenode = target_file
439 target_lexer = target_file.lexer
440 target_lexer = target_file.lexer
440
441
441 source_file_path, target_file_path = None, None
442 source_file_path, target_file_path = None, None
442
443
443 if source_filename != '/dev/null':
444 if source_filename != '/dev/null':
444 source_file_path = source_filename
445 source_file_path = source_filename
445 if target_filename != '/dev/null':
446 if target_filename != '/dev/null':
446 target_file_path = target_filename
447 target_file_path = target_filename
447
448
448 source_file_type = source_lexer.name
449 source_file_type = source_lexer.name
449 target_file_type = target_lexer.name
450 target_file_type = target_lexer.name
450
451
451 op_hunks = patch['chunks'][0]
452 op_hunks = patch['chunks'][0]
452 hunks = patch['chunks'][1:]
453 hunks = patch['chunks'][1:]
453
454
454 filediff = AttributeDict({
455 filediff = AttributeDict({
455 'source_file_path': source_file_path,
456 'source_file_path': source_file_path,
456 'target_file_path': target_file_path,
457 'target_file_path': target_file_path,
457 'source_filenode': source_filenode,
458 'source_filenode': source_filenode,
458 'target_filenode': target_filenode,
459 'target_filenode': target_filenode,
459 'hunks': [],
460 'hunks': [],
460 'source_file_type': target_file_type,
461 'source_file_type': target_file_type,
461 'target_file_type': source_file_type,
462 'target_file_type': source_file_type,
462 'patch': patch,
463 'patch': patch,
463 'source_mode': patch['stats']['old_mode'],
464 'source_mode': patch['stats']['old_mode'],
464 'target_mode': patch['stats']['new_mode'],
465 'target_mode': patch['stats']['new_mode'],
465 'limited_diff': isinstance(patch, LimitedDiffContainer),
466 'limited_diff': isinstance(patch, LimitedDiffContainer),
466 'diffset': self,
467 'diffset': self,
467 })
468 })
468
469
469 for hunk in hunks:
470 for hunk in hunks:
470 hunkbit = self.parse_hunk(hunk, source_file, target_file)
471 hunkbit = self.parse_hunk(hunk, source_file, target_file)
471 hunkbit.filediff = filediff
472 hunkbit.filediff = filediff
472 filediff.hunks.append(hunkbit)
473 filediff.hunks.append(hunkbit)
473 return filediff
474 return filediff
474
475
475 def parse_hunk(self, hunk, source_file, target_file):
476 def parse_hunk(self, hunk, source_file, target_file):
476 result = AttributeDict(dict(
477 result = AttributeDict(dict(
477 source_start=hunk['source_start'],
478 source_start=hunk['source_start'],
478 source_length=hunk['source_length'],
479 source_length=hunk['source_length'],
479 target_start=hunk['target_start'],
480 target_start=hunk['target_start'],
480 target_length=hunk['target_length'],
481 target_length=hunk['target_length'],
481 section_header=hunk['section_header'],
482 section_header=hunk['section_header'],
482 lines=[],
483 lines=[],
483 ))
484 ))
484 before, after = [], []
485 before, after = [], []
485
486
486 for line in hunk['lines']:
487 for line in hunk['lines']:
487 if line['action'] == 'unmod':
488 if line['action'] == 'unmod':
488 result.lines.extend(
489 result.lines.extend(
489 self.parse_lines(before, after, source_file, target_file))
490 self.parse_lines(before, after, source_file, target_file))
490 after.append(line)
491 after.append(line)
491 before.append(line)
492 before.append(line)
492 elif line['action'] == 'add':
493 elif line['action'] == 'add':
493 after.append(line)
494 after.append(line)
494 elif line['action'] == 'del':
495 elif line['action'] == 'del':
495 before.append(line)
496 before.append(line)
496 elif line['action'] == 'old-no-nl':
497 elif line['action'] == 'old-no-nl':
497 before.append(line)
498 before.append(line)
498 elif line['action'] == 'new-no-nl':
499 elif line['action'] == 'new-no-nl':
499 after.append(line)
500 after.append(line)
500
501
501 result.lines.extend(
502 result.lines.extend(
502 self.parse_lines(before, after, source_file, target_file))
503 self.parse_lines(before, after, source_file, target_file))
503 result.unified = self.as_unified(result.lines)
504 result.unified = self.as_unified(result.lines)
504 result.sideside = result.lines
505 result.sideside = result.lines
505 return result
506 return result
506
507
507 def parse_lines(self, before_lines, after_lines, source_file, target_file):
508 def parse_lines(self, before_lines, after_lines, source_file, target_file):
508 # TODO: dan: investigate doing the diff comparison and fast highlighting
509 # TODO: dan: investigate doing the diff comparison and fast highlighting
509 # on the entire before and after buffered block lines rather than by
510 # on the entire before and after buffered block lines rather than by
510 # line, this means we can get better 'fast' highlighting if the context
511 # line, this means we can get better 'fast' highlighting if the context
511 # allows it - eg.
512 # allows it - eg.
512 # line 4: """
513 # line 4: """
513 # line 5: this gets highlighted as a string
514 # line 5: this gets highlighted as a string
514 # line 6: """
515 # line 6: """
515
516
516 lines = []
517 lines = []
517 while before_lines or after_lines:
518 while before_lines or after_lines:
518 before, after = None, None
519 before, after = None, None
519 before_tokens, after_tokens = None, None
520 before_tokens, after_tokens = None, None
520
521
521 if before_lines:
522 if before_lines:
522 before = before_lines.pop(0)
523 before = before_lines.pop(0)
523 if after_lines:
524 if after_lines:
524 after = after_lines.pop(0)
525 after = after_lines.pop(0)
525
526
526 original = AttributeDict()
527 original = AttributeDict()
527 modified = AttributeDict()
528 modified = AttributeDict()
528
529
529 if before:
530 if before:
530 if before['action'] == 'old-no-nl':
531 if before['action'] == 'old-no-nl':
531 before_tokens = [('nonl', before['line'])]
532 before_tokens = [('nonl', before['line'])]
532 else:
533 else:
533 before_tokens = self.get_line_tokens(
534 before_tokens = self.get_line_tokens(
534 line_text=before['line'], line_number=before['old_lineno'],
535 line_text=before['line'], line_number=before['old_lineno'],
535 file=source_file)
536 file=source_file)
536 original.lineno = before['old_lineno']
537 original.lineno = before['old_lineno']
537 original.content = before['line']
538 original.content = before['line']
538 original.action = self.action_to_op(before['action'])
539 original.action = self.action_to_op(before['action'])
539
540
540 if after:
541 if after:
541 if after['action'] == 'new-no-nl':
542 if after['action'] == 'new-no-nl':
542 after_tokens = [('nonl', after['line'])]
543 after_tokens = [('nonl', after['line'])]
543 else:
544 else:
544 after_tokens = self.get_line_tokens(
545 after_tokens = self.get_line_tokens(
545 line_text=after['line'], line_number=after['new_lineno'],
546 line_text=after['line'], line_number=after['new_lineno'],
546 file=target_file)
547 file=target_file)
547 modified.lineno = after['new_lineno']
548 modified.lineno = after['new_lineno']
548 modified.content = after['line']
549 modified.content = after['line']
549 modified.action = self.action_to_op(after['action'])
550 modified.action = self.action_to_op(after['action'])
550
551
551 # diff the lines
552 # diff the lines
552 if before_tokens and after_tokens:
553 if before_tokens and after_tokens:
553 o_tokens, m_tokens, similarity = tokens_diff(
554 o_tokens, m_tokens, similarity = tokens_diff(
554 before_tokens, after_tokens)
555 before_tokens, after_tokens)
555 original.content = render_tokenstream(o_tokens)
556 original.content = render_tokenstream(o_tokens)
556 modified.content = render_tokenstream(m_tokens)
557 modified.content = render_tokenstream(m_tokens)
557 elif before_tokens:
558 elif before_tokens:
558 original.content = render_tokenstream(
559 original.content = render_tokenstream(
559 [(x[0], '', x[1]) for x in before_tokens])
560 [(x[0], '', x[1]) for x in before_tokens])
560 elif after_tokens:
561 elif after_tokens:
561 modified.content = render_tokenstream(
562 modified.content = render_tokenstream(
562 [(x[0], '', x[1]) for x in after_tokens])
563 [(x[0], '', x[1]) for x in after_tokens])
563
564
564 lines.append(AttributeDict({
565 lines.append(AttributeDict({
565 'original': original,
566 'original': original,
566 'modified': modified,
567 'modified': modified,
567 }))
568 }))
568
569
569 return lines
570 return lines
570
571
571 def get_line_tokens(self, line_text, line_number, file=None):
572 def get_line_tokens(self, line_text, line_number, file=None):
572 filenode = None
573 filenode = None
573 filename = None
574 filename = None
574
575
575 if isinstance(file, basestring):
576 if isinstance(file, basestring):
576 filename = file
577 filename = file
577 elif isinstance(file, FileNode):
578 elif isinstance(file, FileNode):
578 filenode = file
579 filenode = file
579 filename = file.unicode_path
580 filename = file.unicode_path
580
581
581 if self.highlight_mode == self.HL_REAL and filenode:
582 if self.highlight_mode == self.HL_REAL and filenode:
582 if line_number and file.size < self.max_file_size_limit:
583 if line_number and file.size < self.max_file_size_limit:
583 return self.get_tokenized_filenode_line(file, line_number)
584 return self.get_tokenized_filenode_line(file, line_number)
584
585
585 if self.highlight_mode in (self.HL_REAL, self.HL_FAST) and filename:
586 if self.highlight_mode in (self.HL_REAL, self.HL_FAST) and filename:
586 lexer = self._get_lexer_for_filename(filename)
587 lexer = self._get_lexer_for_filename(filename)
587 return list(tokenize_string(line_text, lexer))
588 return list(tokenize_string(line_text, lexer))
588
589
589 return list(tokenize_string(line_text, plain_text_lexer))
590 return list(tokenize_string(line_text, plain_text_lexer))
590
591
591 def get_tokenized_filenode_line(self, filenode, line_number):
592 def get_tokenized_filenode_line(self, filenode, line_number):
592
593
593 if filenode not in self.highlighted_filenodes:
594 if filenode not in self.highlighted_filenodes:
594 tokenized_lines = filenode_as_lines_tokens(filenode, filenode.lexer)
595 tokenized_lines = filenode_as_lines_tokens(filenode, filenode.lexer)
595 self.highlighted_filenodes[filenode] = tokenized_lines
596 self.highlighted_filenodes[filenode] = tokenized_lines
596 return self.highlighted_filenodes[filenode][line_number - 1]
597 return self.highlighted_filenodes[filenode][line_number - 1]
597
598
598 def action_to_op(self, action):
599 def action_to_op(self, action):
599 return {
600 return {
600 'add': '+',
601 'add': '+',
601 'del': '-',
602 'del': '-',
602 'unmod': ' ',
603 'unmod': ' ',
603 'old-no-nl': ' ',
604 'old-no-nl': ' ',
604 'new-no-nl': ' ',
605 'new-no-nl': ' ',
605 }.get(action, action)
606 }.get(action, action)
606
607
607 def as_unified(self, lines):
608 def as_unified(self, lines):
608 """ Return a generator that yields the lines of a diff in unified order """
609 """ Return a generator that yields the lines of a diff in unified order """
609 def generator():
610 def generator():
610 buf = []
611 buf = []
611 for line in lines:
612 for line in lines:
612
613
613 if buf and not line.original or line.original.action == ' ':
614 if buf and not line.original or line.original.action == ' ':
614 for b in buf:
615 for b in buf:
615 yield b
616 yield b
616 buf = []
617 buf = []
617
618
618 if line.original:
619 if line.original:
619 if line.original.action == ' ':
620 if line.original.action == ' ':
620 yield (line.original.lineno, line.modified.lineno,
621 yield (line.original.lineno, line.modified.lineno,
621 line.original.action, line.original.content)
622 line.original.action, line.original.content)
622 continue
623 continue
623
624
624 if line.original.action == '-':
625 if line.original.action == '-':
625 yield (line.original.lineno, None,
626 yield (line.original.lineno, None,
626 line.original.action, line.original.content)
627 line.original.action, line.original.content)
627
628
628 if line.modified.action == '+':
629 if line.modified.action == '+':
629 buf.append((
630 buf.append((
630 None, line.modified.lineno,
631 None, line.modified.lineno,
631 line.modified.action, line.modified.content))
632 line.modified.action, line.modified.content))
632 continue
633 continue
633
634
634 if line.modified:
635 if line.modified:
635 yield (None, line.modified.lineno,
636 yield (None, line.modified.lineno,
636 line.modified.action, line.modified.content)
637 line.modified.action, line.modified.content)
637
638
638 for b in buf:
639 for b in buf:
639 yield b
640 yield b
640
641
641 return generator()
642 return generator()
@@ -1,420 +1,420 b''
1 <%def name="diff_line_anchor(filename, line, type)"><%
1 <%def name="diff_line_anchor(filename, line, type)"><%
2 return '%s_%s_%i' % (h.safeid(filename), type, line)
2 return '%s_%s_%i' % (h.safeid(filename), type, line)
3 %></%def>
3 %></%def>
4
4
5 <%def name="action_class(action)"><%
5 <%def name="action_class(action)"><%
6 return {
6 return {
7 '-': 'cb-deletion',
7 '-': 'cb-deletion',
8 '+': 'cb-addition',
8 '+': 'cb-addition',
9 ' ': 'cb-context',
9 ' ': 'cb-context',
10 }.get(action, 'cb-empty')
10 }.get(action, 'cb-empty')
11 %></%def>
11 %></%def>
12
12
13 <%def name="op_class(op_id)"><%
13 <%def name="op_class(op_id)"><%
14 return {
14 return {
15 DEL_FILENODE: 'deletion', # file deleted
15 DEL_FILENODE: 'deletion', # file deleted
16 BIN_FILENODE: 'warning' # binary diff hidden
16 BIN_FILENODE: 'warning' # binary diff hidden
17 }.get(op_id, 'addition')
17 }.get(op_id, 'addition')
18 %></%def>
18 %></%def>
19
19
20 <%def name="link_for(**kw)"><%
20 <%def name="link_for(**kw)"><%
21 new_args = request.GET.mixed()
21 new_args = request.GET.mixed()
22 new_args.update(kw)
22 new_args.update(kw)
23 return h.url('', **new_args)
23 return h.url('', **new_args)
24 %></%def>
24 %></%def>
25
25
26 <%def name="render_diffset(diffset, commit=None,
26 <%def name="render_diffset(diffset, commit=None,
27
27
28 # collapse all file diff entries when there are more than this amount of files in the diff
28 # collapse all file diff entries when there are more than this amount of files in the diff
29 collapse_when_files_over=20,
29 collapse_when_files_over=20,
30
30
31 # collapse lines in the diff when more than this amount of lines changed in the file diff
31 # collapse lines in the diff when more than this amount of lines changed in the file diff
32 lines_changed_limit=500,
32 lines_changed_limit=500,
33
33
34 # add a ruler at to the output
34 # add a ruler at to the output
35 ruler_at_chars=0,
35 ruler_at_chars=0,
36
36
37 )">
37 )">
38 <%
38 <%
39 collapse_all = len(diffset.files) > collapse_when_files_over
39 collapse_all = len(diffset.files) > collapse_when_files_over
40 %>
40 %>
41
41
42 %if c.diffmode == 'sideside':
42 %if c.diffmode == 'sideside':
43 <style>
43 <style>
44 .wrapper {
44 .wrapper {
45 max-width: 1600px !important;
45 max-width: 1600px !important;
46 }
46 }
47 </style>
47 </style>
48 %endif
48 %endif
49 %if ruler_at_chars:
49 %if ruler_at_chars:
50 <style>
50 <style>
51 .diff table.cb .cb-content:after {
51 .diff table.cb .cb-content:after {
52 content: "";
52 content: "";
53 border-left: 1px solid blue;
53 border-left: 1px solid blue;
54 position: absolute;
54 position: absolute;
55 top: 0;
55 top: 0;
56 height: 18px;
56 height: 18px;
57 opacity: .2;
57 opacity: .2;
58 z-index: 10;
58 z-index: 10;
59 ## +5 to account for diff action (+/-)
59 ## +5 to account for diff action (+/-)
60 left: ${ruler_at_chars + 5}ch;
60 left: ${ruler_at_chars + 5}ch;
61 </style>
61 </style>
62 %endif
62 %endif
63
63
64 <div class="diffset">
64 <div class="diffset">
65 <div class="diffset-heading ${diffset.limited_diff and 'diffset-heading-warning' or ''}">
65 <div class="diffset-heading ${diffset.limited_diff and 'diffset-heading-warning' or ''}">
66 %if commit:
66 %if commit:
67 <div class="pull-right">
67 <div class="pull-right">
68 <a class="btn tooltip" title="${_('Browse Files at revision {}').format(commit.raw_id)}" href="${h.url('files_home',repo_name=c.repo_name, revision=commit.raw_id, f_path='')}">
68 <a class="btn tooltip" title="${_('Browse Files at revision {}').format(commit.raw_id)}" href="${h.url('files_home',repo_name=diffset.repo_name, revision=commit.raw_id, f_path='')}">
69 ${_('Browse Files')}
69 ${_('Browse Files')}
70 </a>
70 </a>
71 </div>
71 </div>
72 %endif
72 %endif
73 <h2 class="clearinner">
73 <h2 class="clearinner">
74 %if commit:
74 %if commit:
75 <a class="tooltip revision" title="${h.tooltip(commit.message)}" href="${h.url('changeset_home',repo_name=c.repo_name,revision=commit.raw_id)}">${'r%s:%s' % (commit.revision,h.short_id(commit.raw_id))}</a> -
75 <a class="tooltip revision" title="${h.tooltip(commit.message)}" href="${h.url('changeset_home',repo_name=c.repo_name,revision=commit.raw_id)}">${'r%s:%s' % (commit.revision,h.short_id(commit.raw_id))}</a> -
76 ${h.age_component(commit.date)} -
76 ${h.age_component(commit.date)} -
77 %endif
77 %endif
78 %if diffset.limited_diff:
78 %if diffset.limited_diff:
79 ${_('The requested commit is too big and content was truncated.')}
79 ${_('The requested commit is too big and content was truncated.')}
80
80
81 ${ungettext('%(num)s file changed.', '%(num)s files changed.', diffset.changed_files) % {'num': diffset.changed_files}}
81 ${ungettext('%(num)s file changed.', '%(num)s files changed.', diffset.changed_files) % {'num': diffset.changed_files}}
82 <a href="${link_for(fulldiff=1)}" onclick="return confirm('${_("Showing a big diff might take some time and resources, continue?")}')">${_('Show full diff')}</a>
82 <a href="${link_for(fulldiff=1)}" onclick="return confirm('${_("Showing a big diff might take some time and resources, continue?")}')">${_('Show full diff')}</a>
83 %else:
83 %else:
84 ${ungettext('%(num)s file changed: %(linesadd)s inserted, ''%(linesdel)s deleted',
84 ${ungettext('%(num)s file changed: %(linesadd)s inserted, ''%(linesdel)s deleted',
85 '%(num)s files changed: %(linesadd)s inserted, %(linesdel)s deleted', diffset.changed_files) % {'num': diffset.changed_files, 'linesadd': diffset.lines_added, 'linesdel': diffset.lines_deleted}}
85 '%(num)s files changed: %(linesadd)s inserted, %(linesdel)s deleted', diffset.changed_files) % {'num': diffset.changed_files, 'linesadd': diffset.lines_added, 'linesdel': diffset.lines_deleted}}
86 %endif
86 %endif
87 </h2>
87 </h2>
88 </div>
88 </div>
89
89
90 %if not diffset.files:
90 %if not diffset.files:
91 <p class="empty_data">${_('No files')}</p>
91 <p class="empty_data">${_('No files')}</p>
92 %endif
92 %endif
93
93
94 <div class="filediffs">
94 <div class="filediffs">
95 %for i, filediff in enumerate(diffset.files):
95 %for i, filediff in enumerate(diffset.files):
96 <%
96 <%
97 lines_changed = filediff['patch']['stats']['added'] + filediff['patch']['stats']['deleted']
97 lines_changed = filediff['patch']['stats']['added'] + filediff['patch']['stats']['deleted']
98 over_lines_changed_limit = lines_changed > lines_changed_limit
98 over_lines_changed_limit = lines_changed > lines_changed_limit
99 %>
99 %>
100 <input ${collapse_all and 'checked' or ''} class="filediff-collapse-state" id="filediff-collapse-${id(filediff)}" type="checkbox">
100 <input ${collapse_all and 'checked' or ''} class="filediff-collapse-state" id="filediff-collapse-${id(filediff)}" type="checkbox">
101 <div
101 <div
102 class="filediff"
102 class="filediff"
103 data-f-path="${filediff['patch']['filename']}"
103 data-f-path="${filediff['patch']['filename']}"
104 id="a_${h.FID(commit and commit.raw_id or '', filediff['patch']['filename'])}">
104 id="a_${h.FID(commit and commit.raw_id or '', filediff['patch']['filename'])}">
105 <label for="filediff-collapse-${id(filediff)}" class="filediff-heading">
105 <label for="filediff-collapse-${id(filediff)}" class="filediff-heading">
106 <div class="filediff-collapse-indicator"></div>
106 <div class="filediff-collapse-indicator"></div>
107 ${diff_ops(filediff)}
107 ${diff_ops(filediff)}
108 </label>
108 </label>
109 ${diff_menu(filediff)}
109 ${diff_menu(filediff)}
110 <table class="cb cb-diff-${c.diffmode} code-highlight ${over_lines_changed_limit and 'cb-collapsed' or ''}">
110 <table class="cb cb-diff-${c.diffmode} code-highlight ${over_lines_changed_limit and 'cb-collapsed' or ''}">
111 %if not filediff.hunks:
111 %if not filediff.hunks:
112 %for op_id, op_text in filediff['patch']['stats']['ops'].items():
112 %for op_id, op_text in filediff['patch']['stats']['ops'].items():
113 <tr>
113 <tr>
114 <td class="cb-text cb-${op_class(op_id)}" ${c.diffmode == 'unified' and 'colspan=3' or 'colspan=4'}>
114 <td class="cb-text cb-${op_class(op_id)}" ${c.diffmode == 'unified' and 'colspan=3' or 'colspan=4'}>
115 %if op_id == DEL_FILENODE:
115 %if op_id == DEL_FILENODE:
116 ${_('File was deleted')}
116 ${_('File was deleted')}
117 %elif op_id == BIN_FILENODE:
117 %elif op_id == BIN_FILENODE:
118 ${_('Binary file hidden')}
118 ${_('Binary file hidden')}
119 %else:
119 %else:
120 ${op_text}
120 ${op_text}
121 %endif
121 %endif
122 </td>
122 </td>
123 </tr>
123 </tr>
124 %endfor
124 %endfor
125 %endif
125 %endif
126 %if over_lines_changed_limit:
126 %if over_lines_changed_limit:
127 <tr class="cb-warning cb-collapser">
127 <tr class="cb-warning cb-collapser">
128 <td class="cb-text" ${c.diffmode == 'unified' and 'colspan=3' or 'colspan=4'}>
128 <td class="cb-text" ${c.diffmode == 'unified' and 'colspan=3' or 'colspan=4'}>
129 ${_('This diff has been collapsed as it changes many lines, (%i lines changed)' % lines_changed)}
129 ${_('This diff has been collapsed as it changes many lines, (%i lines changed)' % lines_changed)}
130 <a href="#" class="cb-expand"
130 <a href="#" class="cb-expand"
131 onclick="$(this).closest('table').removeClass('cb-collapsed'); return false;">${_('Show them')}
131 onclick="$(this).closest('table').removeClass('cb-collapsed'); return false;">${_('Show them')}
132 </a>
132 </a>
133 <a href="#" class="cb-collapse"
133 <a href="#" class="cb-collapse"
134 onclick="$(this).closest('table').addClass('cb-collapsed'); return false;">${_('Hide them')}
134 onclick="$(this).closest('table').addClass('cb-collapsed'); return false;">${_('Hide them')}
135 </a>
135 </a>
136 </td>
136 </td>
137 </tr>
137 </tr>
138 %endif
138 %endif
139 %if filediff.patch['is_limited_diff']:
139 %if filediff.patch['is_limited_diff']:
140 <tr class="cb-warning cb-collapser">
140 <tr class="cb-warning cb-collapser">
141 <td class="cb-text" ${c.diffmode == 'unified' and 'colspan=3' or 'colspan=4'}>
141 <td class="cb-text" ${c.diffmode == 'unified' and 'colspan=3' or 'colspan=4'}>
142 ${_('The requested commit is too big and content was truncated.')} <a href="${link_for(fulldiff=1)}" onclick="return confirm('${_("Showing a big diff might take some time and resources, continue?")}')">${_('Show full diff')}</a>
142 ${_('The requested commit is too big and content was truncated.')} <a href="${link_for(fulldiff=1)}" onclick="return confirm('${_("Showing a big diff might take some time and resources, continue?")}')">${_('Show full diff')}</a>
143 </td>
143 </td>
144 </tr>
144 </tr>
145 %endif
145 %endif
146 %for hunk in filediff.hunks:
146 %for hunk in filediff.hunks:
147 <tr class="cb-hunk">
147 <tr class="cb-hunk">
148 <td ${c.diffmode == 'unified' and 'colspan=2' or ''}>
148 <td ${c.diffmode == 'unified' and 'colspan=2' or ''}>
149 ## TODO: dan: add ajax loading of more context here
149 ## TODO: dan: add ajax loading of more context here
150 ## <a href="#">
150 ## <a href="#">
151 <i class="icon-more"></i>
151 <i class="icon-more"></i>
152 ## </a>
152 ## </a>
153 </td>
153 </td>
154 <td ${c.diffmode == 'sideside' and 'colspan=3' or ''}>
154 <td ${c.diffmode == 'sideside' and 'colspan=3' or ''}>
155 @@
155 @@
156 -${hunk.source_start},${hunk.source_length}
156 -${hunk.source_start},${hunk.source_length}
157 +${hunk.target_start},${hunk.target_length}
157 +${hunk.target_start},${hunk.target_length}
158 ${hunk.section_header}
158 ${hunk.section_header}
159 </td>
159 </td>
160 </tr>
160 </tr>
161 %if c.diffmode == 'unified':
161 %if c.diffmode == 'unified':
162 ${render_hunk_lines_unified(hunk)}
162 ${render_hunk_lines_unified(hunk)}
163 %elif c.diffmode == 'sideside':
163 %elif c.diffmode == 'sideside':
164 ${render_hunk_lines_sideside(hunk)}
164 ${render_hunk_lines_sideside(hunk)}
165 %else:
165 %else:
166 <tr class="cb-line">
166 <tr class="cb-line">
167 <td>unknown diff mode</td>
167 <td>unknown diff mode</td>
168 </tr>
168 </tr>
169 %endif
169 %endif
170 %endfor
170 %endfor
171 </table>
171 </table>
172 </div>
172 </div>
173 %endfor
173 %endfor
174 </div>
174 </div>
175 </div>
175 </div>
176 </%def>
176 </%def>
177
177
178 <%def name="diff_ops(filediff)">
178 <%def name="diff_ops(filediff)">
179 <%
179 <%
180 stats = filediff['patch']['stats']
180 stats = filediff['patch']['stats']
181 from rhodecode.lib.diffs import NEW_FILENODE, DEL_FILENODE, \
181 from rhodecode.lib.diffs import NEW_FILENODE, DEL_FILENODE, \
182 MOD_FILENODE, RENAMED_FILENODE, CHMOD_FILENODE, BIN_FILENODE
182 MOD_FILENODE, RENAMED_FILENODE, CHMOD_FILENODE, BIN_FILENODE
183 %>
183 %>
184 <span class="pill">
184 <span class="pill">
185 %if filediff.source_file_path and filediff.target_file_path:
185 %if filediff.source_file_path and filediff.target_file_path:
186 %if filediff.source_file_path != filediff.target_file_path: # file was renamed
186 %if filediff.source_file_path != filediff.target_file_path: # file was renamed
187 <strong>${filediff.target_file_path}</strong> β¬… <del>${filediff.source_file_path}</del>
187 <strong>${filediff.target_file_path}</strong> β¬… <del>${filediff.source_file_path}</del>
188 %else:
188 %else:
189 ## file was modified
189 ## file was modified
190 <strong>${filediff.source_file_path}</strong>
190 <strong>${filediff.source_file_path}</strong>
191 %endif
191 %endif
192 %else:
192 %else:
193 %if filediff.source_file_path:
193 %if filediff.source_file_path:
194 ## file was deleted
194 ## file was deleted
195 <strong>${filediff.source_file_path}</strong>
195 <strong>${filediff.source_file_path}</strong>
196 %else:
196 %else:
197 ## file was added
197 ## file was added
198 <strong>${filediff.target_file_path}</strong>
198 <strong>${filediff.target_file_path}</strong>
199 %endif
199 %endif
200 %endif
200 %endif
201 </span>
201 </span>
202 <span class="pill-group" style="float: left">
202 <span class="pill-group" style="float: left">
203 %if filediff.patch['is_limited_diff']:
203 %if filediff.patch['is_limited_diff']:
204 <span class="pill tooltip" op="limited" title="The stats for this diff are not complete">limited diff</span>
204 <span class="pill tooltip" op="limited" title="The stats for this diff are not complete">limited diff</span>
205 %endif
205 %endif
206 %if RENAMED_FILENODE in stats['ops']:
206 %if RENAMED_FILENODE in stats['ops']:
207 <span class="pill" op="renamed">renamed</span>
207 <span class="pill" op="renamed">renamed</span>
208 %endif
208 %endif
209
209
210 %if NEW_FILENODE in stats['ops']:
210 %if NEW_FILENODE in stats['ops']:
211 <span class="pill" op="created">created</span>
211 <span class="pill" op="created">created</span>
212 %if filediff['target_mode'].startswith('120'):
212 %if filediff['target_mode'].startswith('120'):
213 <span class="pill" op="symlink">symlink</span>
213 <span class="pill" op="symlink">symlink</span>
214 %else:
214 %else:
215 <span class="pill" op="mode">${nice_mode(filediff['target_mode'])}</span>
215 <span class="pill" op="mode">${nice_mode(filediff['target_mode'])}</span>
216 %endif
216 %endif
217 %endif
217 %endif
218
218
219 %if DEL_FILENODE in stats['ops']:
219 %if DEL_FILENODE in stats['ops']:
220 <span class="pill" op="removed">removed</span>
220 <span class="pill" op="removed">removed</span>
221 %endif
221 %endif
222
222
223 %if CHMOD_FILENODE in stats['ops']:
223 %if CHMOD_FILENODE in stats['ops']:
224 <span class="pill" op="mode">
224 <span class="pill" op="mode">
225 ${nice_mode(filediff['source_mode'])} ➑ ${nice_mode(filediff['target_mode'])}
225 ${nice_mode(filediff['source_mode'])} ➑ ${nice_mode(filediff['target_mode'])}
226 </span>
226 </span>
227 %endif
227 %endif
228 </span>
228 </span>
229
229
230 <a class="pill filediff-anchor" href="#a_${h.FID(commit and commit.raw_id or '', filediff.patch['filename'])}">ΒΆ</a>
230 <a class="pill filediff-anchor" href="#a_${h.FID(commit and commit.raw_id or '', filediff.patch['filename'])}">ΒΆ</a>
231
231
232 <span class="pill-group" style="float: right">
232 <span class="pill-group" style="float: right">
233 %if BIN_FILENODE in stats['ops']:
233 %if BIN_FILENODE in stats['ops']:
234 <span class="pill" op="binary">binary</span>
234 <span class="pill" op="binary">binary</span>
235 %if MOD_FILENODE in stats['ops']:
235 %if MOD_FILENODE in stats['ops']:
236 <span class="pill" op="modified">modified</span>
236 <span class="pill" op="modified">modified</span>
237 %endif
237 %endif
238 %endif
238 %endif
239 %if stats['added']:
239 %if stats['added']:
240 <span class="pill" op="added">+${stats['added']}</span>
240 <span class="pill" op="added">+${stats['added']}</span>
241 %endif
241 %endif
242 %if stats['deleted']:
242 %if stats['deleted']:
243 <span class="pill" op="deleted">-${stats['deleted']}</span>
243 <span class="pill" op="deleted">-${stats['deleted']}</span>
244 %endif
244 %endif
245 </span>
245 </span>
246
246
247 </%def>
247 </%def>
248
248
249 <%def name="nice_mode(filemode)">
249 <%def name="nice_mode(filemode)">
250 ${filemode.startswith('100') and filemode[3:] or filemode}
250 ${filemode.startswith('100') and filemode[3:] or filemode}
251 </%def>
251 </%def>
252
252
253 <%def name="diff_menu(filediff)">
253 <%def name="diff_menu(filediff)">
254 <div class="filediff-menu">
254 <div class="filediff-menu">
255 %if filediff.diffset.source_ref:
255 %if filediff.diffset.source_ref:
256 %if filediff.patch['operation'] in ['D', 'M']:
256 %if filediff.patch['operation'] in ['D', 'M']:
257 <a
257 <a
258 class="tooltip"
258 class="tooltip"
259 href="${h.url('files_home',repo_name=c.repo_name,f_path=filediff.source_file_path,revision=filediff.diffset.source_ref)}"
259 href="${h.url('files_home',repo_name=filediff.diffset.repo_name,f_path=filediff.source_file_path,revision=filediff.diffset.source_ref)}"
260 title="${h.tooltip(_('Show file at commit: %(commit_id)s') % {'commit_id': filediff.diffset.source_ref[:12]})}"
260 title="${h.tooltip(_('Show file at commit: %(commit_id)s') % {'commit_id': filediff.diffset.source_ref[:12]})}"
261 >
261 >
262 ${_('Show file before')}
262 ${_('Show file before')}
263 </a>
263 </a>
264 %else:
264 %else:
265 <span
265 <span
266 class="tooltip"
266 class="tooltip"
267 title="${h.tooltip(_('File no longer present at commit: %(commit_id)s') % {'commit_id': filediff.diffset.source_ref[:12]})}"
267 title="${h.tooltip(_('File no longer present at commit: %(commit_id)s') % {'commit_id': filediff.diffset.source_ref[:12]})}"
268 >
268 >
269 ${_('Show file before')}
269 ${_('Show file before')}
270 </span>
270 </span>
271 %endif
271 %endif
272 %if filediff.patch['operation'] in ['A', 'M']:
272 %if filediff.patch['operation'] in ['A', 'M']:
273 <a
273 <a
274 class="tooltip"
274 class="tooltip"
275 href="${h.url('files_home',repo_name=c.repo_name,f_path=filediff.target_file_path,revision=filediff.diffset.target_ref)}"
275 href="${h.url('files_home',repo_name=filediff.diffset.repo_name,f_path=filediff.target_file_path,revision=filediff.diffset.target_ref)}"
276 title="${h.tooltip(_('Show file at commit: %(commit_id)s') % {'commit_id': filediff.diffset.target_ref[:12]})}"
276 title="${h.tooltip(_('Show file at commit: %(commit_id)s') % {'commit_id': filediff.diffset.target_ref[:12]})}"
277 >
277 >
278 ${_('Show file after')}
278 ${_('Show file after')}
279 </a>
279 </a>
280 %else:
280 %else:
281 <span
281 <span
282 class="tooltip"
282 class="tooltip"
283 title="${h.tooltip(_('File no longer present at commit: %(commit_id)s') % {'commit_id': filediff.diffset.target_ref[:12]})}"
283 title="${h.tooltip(_('File no longer present at commit: %(commit_id)s') % {'commit_id': filediff.diffset.target_ref[:12]})}"
284 >
284 >
285 ${_('Show file after')}
285 ${_('Show file after')}
286 </span>
286 </span>
287 %endif
287 %endif
288 <a
288 <a
289 class="tooltip"
289 class="tooltip"
290 title="${h.tooltip(_('Raw diff'))}"
290 title="${h.tooltip(_('Raw diff'))}"
291 href="${h.url('files_diff_home',repo_name=c.repo_name,f_path=filediff.target_file_path,diff2=filediff.diffset.target_ref,diff1=filediff.diffset.source_ref,diff='raw')}"
291 href="${h.url('files_diff_home',repo_name=filediff.diffset.repo_name,f_path=filediff.target_file_path,diff2=filediff.diffset.target_ref,diff1=filediff.diffset.source_ref,diff='raw')}"
292 >
292 >
293 ${_('Raw diff')}
293 ${_('Raw diff')}
294 </a>
294 </a>
295 <a
295 <a
296 class="tooltip"
296 class="tooltip"
297 title="${h.tooltip(_('Download diff'))}"
297 title="${h.tooltip(_('Download diff'))}"
298 href="${h.url('files_diff_home',repo_name=c.repo_name,f_path=filediff.target_file_path,diff2=filediff.diffset.target_ref,diff1=filediff.diffset.source_ref,diff='download')}"
298 href="${h.url('files_diff_home',repo_name=filediff.diffset.repo_name,f_path=filediff.target_file_path,diff2=filediff.diffset.target_ref,diff1=filediff.diffset.source_ref,diff='download')}"
299 >
299 >
300 ${_('Download diff')}
300 ${_('Download diff')}
301 </a>
301 </a>
302 %endif
302 %endif
303 </div>
303 </div>
304 </%def>
304 </%def>
305
305
306
306
307 <%def name="render_hunk_lines_sideside(hunk)">
307 <%def name="render_hunk_lines_sideside(hunk)">
308 %for i, line in enumerate(hunk.sideside):
308 %for i, line in enumerate(hunk.sideside):
309 <%
309 <%
310 old_line_anchor, new_line_anchor = None, None
310 old_line_anchor, new_line_anchor = None, None
311 if line.original.lineno:
311 if line.original.lineno:
312 old_line_anchor = diff_line_anchor(hunk.filediff.source_file_path, line.original.lineno, 'o')
312 old_line_anchor = diff_line_anchor(hunk.filediff.source_file_path, line.original.lineno, 'o')
313 if line.modified.lineno:
313 if line.modified.lineno:
314 new_line_anchor = diff_line_anchor(hunk.filediff.target_file_path, line.modified.lineno, 'n')
314 new_line_anchor = diff_line_anchor(hunk.filediff.target_file_path, line.modified.lineno, 'n')
315 %>
315 %>
316 <tr class="cb-line">
316 <tr class="cb-line">
317 <td class="cb-lineno ${action_class(line.original.action)}"
317 <td class="cb-lineno ${action_class(line.original.action)}"
318 data-line-number="${line.original.lineno}"
318 data-line-number="${line.original.lineno}"
319 %if old_line_anchor:
319 %if old_line_anchor:
320 id="${old_line_anchor}"
320 id="${old_line_anchor}"
321 %endif
321 %endif
322 >
322 >
323 %if line.original.lineno:
323 %if line.original.lineno:
324 <a name="${old_line_anchor}" href="#${old_line_anchor}">${line.original.lineno}</a>
324 <a name="${old_line_anchor}" href="#${old_line_anchor}">${line.original.lineno}</a>
325 %endif
325 %endif
326 </td>
326 </td>
327 <td class="cb-content ${action_class(line.original.action)}"
327 <td class="cb-content ${action_class(line.original.action)}"
328 data-line-number="o${line.original.lineno}"
328 data-line-number="o${line.original.lineno}"
329 ><span class="cb-code">${line.original.action} ${line.original.content or '' | n}</span>
329 ><span class="cb-code">${line.original.action} ${line.original.content or '' | n}</span>
330 </td>
330 </td>
331 <td class="cb-lineno ${action_class(line.modified.action)}"
331 <td class="cb-lineno ${action_class(line.modified.action)}"
332 data-line-number="${line.modified.lineno}"
332 data-line-number="${line.modified.lineno}"
333 %if new_line_anchor:
333 %if new_line_anchor:
334 id="${new_line_anchor}"
334 id="${new_line_anchor}"
335 %endif
335 %endif
336 >
336 >
337 %if line.modified.lineno:
337 %if line.modified.lineno:
338 <a name="${new_line_anchor}" href="#${new_line_anchor}">${line.modified.lineno}</a>
338 <a name="${new_line_anchor}" href="#${new_line_anchor}">${line.modified.lineno}</a>
339 %endif
339 %endif
340 </td>
340 </td>
341 <td class="cb-content ${action_class(line.modified.action)}"
341 <td class="cb-content ${action_class(line.modified.action)}"
342 data-line-number="n${line.modified.lineno}"
342 data-line-number="n${line.modified.lineno}"
343 >
343 >
344 <span class="cb-code">${line.modified.action} ${line.modified.content or '' | n}</span>
344 <span class="cb-code">${line.modified.action} ${line.modified.content or '' | n}</span>
345 </td>
345 </td>
346 </tr>
346 </tr>
347 %endfor
347 %endfor
348 </%def>
348 </%def>
349
349
350
350
351 <%def name="render_hunk_lines_unified(hunk)">
351 <%def name="render_hunk_lines_unified(hunk)">
352 %for old_line_no, new_line_no, action, content in hunk.unified:
352 %for old_line_no, new_line_no, action, content in hunk.unified:
353 <%
353 <%
354 old_line_anchor, new_line_anchor = None, None
354 old_line_anchor, new_line_anchor = None, None
355 if old_line_no:
355 if old_line_no:
356 old_line_anchor = diff_line_anchor(hunk.filediff.source_file_path, old_line_no, 'o')
356 old_line_anchor = diff_line_anchor(hunk.filediff.source_file_path, old_line_no, 'o')
357 if new_line_no:
357 if new_line_no:
358 new_line_anchor = diff_line_anchor(hunk.filediff.target_file_path, new_line_no, 'n')
358 new_line_anchor = diff_line_anchor(hunk.filediff.target_file_path, new_line_no, 'n')
359 %>
359 %>
360 <tr class="cb-line">
360 <tr class="cb-line">
361 <td class="cb-lineno ${action_class(action)}"
361 <td class="cb-lineno ${action_class(action)}"
362 data-line-number="${old_line_no}"
362 data-line-number="${old_line_no}"
363 %if old_line_anchor:
363 %if old_line_anchor:
364 id="${old_line_anchor}"
364 id="${old_line_anchor}"
365 %endif
365 %endif
366 >
366 >
367 %if old_line_anchor:
367 %if old_line_anchor:
368 <a name="${old_line_anchor}" href="#${old_line_anchor}">${old_line_no}</a>
368 <a name="${old_line_anchor}" href="#${old_line_anchor}">${old_line_no}</a>
369 %endif
369 %endif
370 </td>
370 </td>
371 <td class="cb-lineno ${action_class(action)}"
371 <td class="cb-lineno ${action_class(action)}"
372 data-line-number="${new_line_no}"
372 data-line-number="${new_line_no}"
373 %if new_line_anchor:
373 %if new_line_anchor:
374 id="${new_line_anchor}"
374 id="${new_line_anchor}"
375 %endif
375 %endif
376 >
376 >
377 %if new_line_anchor:
377 %if new_line_anchor:
378 <a name="${new_line_anchor}" href="#${new_line_anchor}">${new_line_no}</a>
378 <a name="${new_line_anchor}" href="#${new_line_anchor}">${new_line_no}</a>
379 %endif
379 %endif
380 </td>
380 </td>
381 <td class="cb-content ${action_class(action)}"
381 <td class="cb-content ${action_class(action)}"
382 data-line-number="${new_line_no and 'n' or 'o'}${new_line_no or old_line_no}"
382 data-line-number="${new_line_no and 'n' or 'o'}${new_line_no or old_line_no}"
383 ><span class="cb-code">${action} ${content or '' | n}</span>
383 ><span class="cb-code">${action} ${content or '' | n}</span>
384 </td>
384 </td>
385 </tr>
385 </tr>
386 %endfor
386 %endfor
387 </%def>
387 </%def>
388
388
389
389
390 <%def name="render_diffset_menu()">
390 <%def name="render_diffset_menu()">
391 <div class="diffset-menu clearinner">
391 <div class="diffset-menu clearinner">
392 <div class="pull-right">
392 <div class="pull-right">
393 <div class="btn-group">
393 <div class="btn-group">
394 <a
394 <a
395 class="btn ${c.diffmode == 'sideside' and 'btn-primary'} tooltip"
395 class="btn ${c.diffmode == 'sideside' and 'btn-primary'} tooltip"
396 title="${_('View side by side')}"
396 title="${_('View side by side')}"
397 href="${h.url_replace(diffmode='sideside')}">
397 href="${h.url_replace(diffmode='sideside')}">
398 <span>${_('Side by Side')}</span>
398 <span>${_('Side by Side')}</span>
399 </a>
399 </a>
400 <a
400 <a
401 class="btn ${c.diffmode == 'unified' and 'btn-primary'} tooltip"
401 class="btn ${c.diffmode == 'unified' and 'btn-primary'} tooltip"
402 title="${_('View unified')}" href="${h.url_replace(diffmode='unified')}">
402 title="${_('View unified')}" href="${h.url_replace(diffmode='unified')}">
403 <span>${_('Unified')}</span>
403 <span>${_('Unified')}</span>
404 </a>
404 </a>
405 </div>
405 </div>
406 </div>
406 </div>
407 <div class="pull-left">
407 <div class="pull-left">
408 <div class="btn-group">
408 <div class="btn-group">
409 <a
409 <a
410 class="btn"
410 class="btn"
411 href="#"
411 href="#"
412 onclick="$('input[class=filediff-collapse-state]').prop('checked', false); return false">${_('Expand All')}</a>
412 onclick="$('input[class=filediff-collapse-state]').prop('checked', false); return false">${_('Expand All')}</a>
413 <a
413 <a
414 class="btn"
414 class="btn"
415 href="#"
415 href="#"
416 onclick="$('input[class=filediff-collapse-state]').prop('checked', true); return false">${_('Collapse All')}</a>
416 onclick="$('input[class=filediff-collapse-state]').prop('checked', true); return false">${_('Collapse All')}</a>
417 </div>
417 </div>
418 </div>
418 </div>
419 </div>
419 </div>
420 </%def>
420 </%def>
General Comments 0
You need to be logged in to leave comments. Login now