Show More
@@ -1,490 +1,492 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 |
|
2 | |||
3 | # Copyright (C) 2010-2017 RhodeCode GmbH |
|
3 | # Copyright (C) 2010-2017 RhodeCode GmbH | |
4 | # |
|
4 | # | |
5 | # This program is free software: you can redistribute it and/or modify |
|
5 | # This program is free software: you can redistribute it and/or modify | |
6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
6 | # it under the terms of the GNU Affero General Public License, version 3 | |
7 | # (only), as published by the Free Software Foundation. |
|
7 | # (only), as published by the Free Software Foundation. | |
8 | # |
|
8 | # | |
9 | # This program is distributed in the hope that it will be useful, |
|
9 | # This program is distributed in the hope that it will be useful, | |
10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | # GNU General Public License for more details. |
|
12 | # GNU General Public License for more details. | |
13 | # |
|
13 | # | |
14 | # You should have received a copy of the GNU Affero General Public License |
|
14 | # You should have received a copy of the GNU Affero General Public License | |
15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | # |
|
16 | # | |
17 | # This program is dual-licensed. If you wish to learn more about the |
|
17 | # This program is dual-licensed. If you wish to learn more about the | |
18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
18 | # RhodeCode Enterprise Edition, including its added features, Support services, | |
19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |
20 |
|
20 | |||
21 | """ |
|
21 | """ | |
22 | commit controller for RhodeCode showing changes between commits |
|
22 | commit controller for RhodeCode showing changes between commits | |
23 | """ |
|
23 | """ | |
24 |
|
24 | |||
25 | import logging |
|
25 | import logging | |
26 |
|
26 | |||
27 | from collections import defaultdict |
|
27 | from collections import defaultdict | |
28 | from webob.exc import HTTPForbidden, HTTPBadRequest, HTTPNotFound |
|
28 | from webob.exc import HTTPForbidden, HTTPBadRequest, HTTPNotFound | |
29 |
|
29 | |||
30 | from pylons import tmpl_context as c, request, response |
|
30 | from pylons import tmpl_context as c, request, response | |
31 | from pylons.i18n.translation import _ |
|
31 | from pylons.i18n.translation import _ | |
32 | from pylons.controllers.util import redirect |
|
32 | from pylons.controllers.util import redirect | |
33 |
|
33 | |||
34 | from rhodecode.lib import auth |
|
34 | from rhodecode.lib import auth | |
35 | from rhodecode.lib import diffs, codeblocks |
|
35 | from rhodecode.lib import diffs, codeblocks | |
36 | from rhodecode.lib.auth import ( |
|
36 | from rhodecode.lib.auth import ( | |
37 | LoginRequired, HasRepoPermissionAnyDecorator, NotAnonymous) |
|
37 | LoginRequired, HasRepoPermissionAnyDecorator, NotAnonymous) | |
38 | from rhodecode.lib.base import BaseRepoController, render |
|
38 | from rhodecode.lib.base import BaseRepoController, render | |
39 | from rhodecode.lib.compat import OrderedDict |
|
39 | from rhodecode.lib.compat import OrderedDict | |
40 | from rhodecode.lib.exceptions import StatusChangeOnClosedPullRequestError |
|
40 | from rhodecode.lib.exceptions import StatusChangeOnClosedPullRequestError | |
41 | import rhodecode.lib.helpers as h |
|
41 | import rhodecode.lib.helpers as h | |
42 | from rhodecode.lib.utils import jsonify |
|
42 | from rhodecode.lib.utils import jsonify | |
43 | from rhodecode.lib.utils2 import safe_unicode, safe_int |
|
43 | from rhodecode.lib.utils2 import safe_unicode, safe_int | |
44 | from rhodecode.lib.vcs.backends.base import EmptyCommit |
|
44 | from rhodecode.lib.vcs.backends.base import EmptyCommit | |
45 | from rhodecode.lib.vcs.exceptions import ( |
|
45 | from rhodecode.lib.vcs.exceptions import ( | |
46 | RepositoryError, CommitDoesNotExistError, NodeDoesNotExistError) |
|
46 | RepositoryError, CommitDoesNotExistError, NodeDoesNotExistError) | |
47 | from rhodecode.model.db import ChangesetComment, ChangesetStatus |
|
47 | from rhodecode.model.db import ChangesetComment, ChangesetStatus | |
48 | from rhodecode.model.changeset_status import ChangesetStatusModel |
|
48 | from rhodecode.model.changeset_status import ChangesetStatusModel | |
49 | from rhodecode.model.comment import CommentsModel |
|
49 | from rhodecode.model.comment import CommentsModel | |
50 | from rhodecode.model.meta import Session |
|
50 | from rhodecode.model.meta import Session | |
51 |
|
51 | |||
52 |
|
52 | |||
53 | log = logging.getLogger(__name__) |
|
53 | log = logging.getLogger(__name__) | |
54 |
|
54 | |||
55 |
|
55 | |||
56 | def _update_with_GET(params, GET): |
|
56 | def _update_with_GET(params, GET): | |
57 | for k in ['diff1', 'diff2', 'diff']: |
|
57 | for k in ['diff1', 'diff2', 'diff']: | |
58 | params[k] += GET.getall(k) |
|
58 | params[k] += GET.getall(k) | |
59 |
|
59 | |||
60 |
|
60 | |||
61 | def get_ignore_ws(fid, GET): |
|
61 | def get_ignore_ws(fid, GET): | |
62 | ig_ws_global = GET.get('ignorews') |
|
62 | ig_ws_global = GET.get('ignorews') | |
63 | ig_ws = filter(lambda k: k.startswith('WS'), GET.getall(fid)) |
|
63 | ig_ws = filter(lambda k: k.startswith('WS'), GET.getall(fid)) | |
64 | if ig_ws: |
|
64 | if ig_ws: | |
65 | try: |
|
65 | try: | |
66 | return int(ig_ws[0].split(':')[-1]) |
|
66 | return int(ig_ws[0].split(':')[-1]) | |
67 | except Exception: |
|
67 | except Exception: | |
68 | pass |
|
68 | pass | |
69 | return ig_ws_global |
|
69 | return ig_ws_global | |
70 |
|
70 | |||
71 |
|
71 | |||
72 | def _ignorews_url(GET, fileid=None): |
|
72 | def _ignorews_url(GET, fileid=None): | |
73 | fileid = str(fileid) if fileid else None |
|
73 | fileid = str(fileid) if fileid else None | |
74 | params = defaultdict(list) |
|
74 | params = defaultdict(list) | |
75 | _update_with_GET(params, GET) |
|
75 | _update_with_GET(params, GET) | |
76 | label = _('Show whitespace') |
|
76 | label = _('Show whitespace') | |
77 | tooltiplbl = _('Show whitespace for all diffs') |
|
77 | tooltiplbl = _('Show whitespace for all diffs') | |
78 | ig_ws = get_ignore_ws(fileid, GET) |
|
78 | ig_ws = get_ignore_ws(fileid, GET) | |
79 | ln_ctx = get_line_ctx(fileid, GET) |
|
79 | ln_ctx = get_line_ctx(fileid, GET) | |
80 |
|
80 | |||
81 | if ig_ws is None: |
|
81 | if ig_ws is None: | |
82 | params['ignorews'] += [1] |
|
82 | params['ignorews'] += [1] | |
83 | label = _('Ignore whitespace') |
|
83 | label = _('Ignore whitespace') | |
84 | tooltiplbl = _('Ignore whitespace for all diffs') |
|
84 | tooltiplbl = _('Ignore whitespace for all diffs') | |
85 | ctx_key = 'context' |
|
85 | ctx_key = 'context' | |
86 | ctx_val = ln_ctx |
|
86 | ctx_val = ln_ctx | |
87 |
|
87 | |||
88 | # if we have passed in ln_ctx pass it along to our params |
|
88 | # if we have passed in ln_ctx pass it along to our params | |
89 | if ln_ctx: |
|
89 | if ln_ctx: | |
90 | params[ctx_key] += [ctx_val] |
|
90 | params[ctx_key] += [ctx_val] | |
91 |
|
91 | |||
92 | if fileid: |
|
92 | if fileid: | |
93 | params['anchor'] = 'a_' + fileid |
|
93 | params['anchor'] = 'a_' + fileid | |
94 | return h.link_to(label, h.url.current(**params), title=tooltiplbl, class_='tooltip') |
|
94 | return h.link_to(label, h.url.current(**params), title=tooltiplbl, class_='tooltip') | |
95 |
|
95 | |||
96 |
|
96 | |||
97 | def get_line_ctx(fid, GET): |
|
97 | def get_line_ctx(fid, GET): | |
98 | ln_ctx_global = GET.get('context') |
|
98 | ln_ctx_global = GET.get('context') | |
99 | if fid: |
|
99 | if fid: | |
100 | ln_ctx = filter(lambda k: k.startswith('C'), GET.getall(fid)) |
|
100 | ln_ctx = filter(lambda k: k.startswith('C'), GET.getall(fid)) | |
101 | else: |
|
101 | else: | |
102 | _ln_ctx = filter(lambda k: k.startswith('C'), GET) |
|
102 | _ln_ctx = filter(lambda k: k.startswith('C'), GET) | |
103 | ln_ctx = GET.get(_ln_ctx[0]) if _ln_ctx else ln_ctx_global |
|
103 | ln_ctx = GET.get(_ln_ctx[0]) if _ln_ctx else ln_ctx_global | |
104 | if ln_ctx: |
|
104 | if ln_ctx: | |
105 | ln_ctx = [ln_ctx] |
|
105 | ln_ctx = [ln_ctx] | |
106 |
|
106 | |||
107 | if ln_ctx: |
|
107 | if ln_ctx: | |
108 | retval = ln_ctx[0].split(':')[-1] |
|
108 | retval = ln_ctx[0].split(':')[-1] | |
109 | else: |
|
109 | else: | |
110 | retval = ln_ctx_global |
|
110 | retval = ln_ctx_global | |
111 |
|
111 | |||
112 | try: |
|
112 | try: | |
113 | return int(retval) |
|
113 | return int(retval) | |
114 | except Exception: |
|
114 | except Exception: | |
115 | return 3 |
|
115 | return 3 | |
116 |
|
116 | |||
117 |
|
117 | |||
118 | def _context_url(GET, fileid=None): |
|
118 | def _context_url(GET, fileid=None): | |
119 | """ |
|
119 | """ | |
120 | Generates a url for context lines. |
|
120 | Generates a url for context lines. | |
121 |
|
121 | |||
122 | :param fileid: |
|
122 | :param fileid: | |
123 | """ |
|
123 | """ | |
124 |
|
124 | |||
125 | fileid = str(fileid) if fileid else None |
|
125 | fileid = str(fileid) if fileid else None | |
126 | ig_ws = get_ignore_ws(fileid, GET) |
|
126 | ig_ws = get_ignore_ws(fileid, GET) | |
127 | ln_ctx = (get_line_ctx(fileid, GET) or 3) * 2 |
|
127 | ln_ctx = (get_line_ctx(fileid, GET) or 3) * 2 | |
128 |
|
128 | |||
129 | params = defaultdict(list) |
|
129 | params = defaultdict(list) | |
130 | _update_with_GET(params, GET) |
|
130 | _update_with_GET(params, GET) | |
131 |
|
131 | |||
132 | if ln_ctx > 0: |
|
132 | if ln_ctx > 0: | |
133 | params['context'] += [ln_ctx] |
|
133 | params['context'] += [ln_ctx] | |
134 |
|
134 | |||
135 | if ig_ws: |
|
135 | if ig_ws: | |
136 | ig_ws_key = 'ignorews' |
|
136 | ig_ws_key = 'ignorews' | |
137 | ig_ws_val = 1 |
|
137 | ig_ws_val = 1 | |
138 | params[ig_ws_key] += [ig_ws_val] |
|
138 | params[ig_ws_key] += [ig_ws_val] | |
139 |
|
139 | |||
140 | lbl = _('Increase context') |
|
140 | lbl = _('Increase context') | |
141 | tooltiplbl = _('Increase context for all diffs') |
|
141 | tooltiplbl = _('Increase context for all diffs') | |
142 |
|
142 | |||
143 | if fileid: |
|
143 | if fileid: | |
144 | params['anchor'] = 'a_' + fileid |
|
144 | params['anchor'] = 'a_' + fileid | |
145 | return h.link_to(lbl, h.url.current(**params), title=tooltiplbl, class_='tooltip') |
|
145 | return h.link_to(lbl, h.url.current(**params), title=tooltiplbl, class_='tooltip') | |
146 |
|
146 | |||
147 |
|
147 | |||
148 | class ChangesetController(BaseRepoController): |
|
148 | class ChangesetController(BaseRepoController): | |
149 |
|
149 | |||
150 | def __before__(self): |
|
150 | def __before__(self): | |
151 | super(ChangesetController, self).__before__() |
|
151 | super(ChangesetController, self).__before__() | |
152 | c.affected_files_cut_off = 60 |
|
152 | c.affected_files_cut_off = 60 | |
153 |
|
153 | |||
154 | def _index(self, commit_id_range, method): |
|
154 | def _index(self, commit_id_range, method): | |
155 | c.ignorews_url = _ignorews_url |
|
155 | c.ignorews_url = _ignorews_url | |
156 | c.context_url = _context_url |
|
156 | c.context_url = _context_url | |
157 | c.fulldiff = fulldiff = request.GET.get('fulldiff') |
|
157 | c.fulldiff = fulldiff = request.GET.get('fulldiff') | |
158 |
|
158 | |||
159 | # fetch global flags of ignore ws or context lines |
|
159 | # fetch global flags of ignore ws or context lines | |
160 | context_lcl = get_line_ctx('', request.GET) |
|
160 | context_lcl = get_line_ctx('', request.GET) | |
161 | ign_whitespace_lcl = get_ignore_ws('', request.GET) |
|
161 | ign_whitespace_lcl = get_ignore_ws('', request.GET) | |
162 |
|
162 | |||
163 | # diff_limit will cut off the whole diff if the limit is applied |
|
163 | # diff_limit will cut off the whole diff if the limit is applied | |
164 | # otherwise it will just hide the big files from the front-end |
|
164 | # otherwise it will just hide the big files from the front-end | |
165 | diff_limit = self.cut_off_limit_diff |
|
165 | diff_limit = self.cut_off_limit_diff | |
166 | file_limit = self.cut_off_limit_file |
|
166 | file_limit = self.cut_off_limit_file | |
167 |
|
167 | |||
168 | # get ranges of commit ids if preset |
|
168 | # get ranges of commit ids if preset | |
169 | commit_range = commit_id_range.split('...')[:2] |
|
169 | commit_range = commit_id_range.split('...')[:2] | |
170 |
|
170 | |||
171 | try: |
|
171 | try: | |
172 | pre_load = ['affected_files', 'author', 'branch', 'date', |
|
172 | pre_load = ['affected_files', 'author', 'branch', 'date', | |
173 | 'message', 'parents'] |
|
173 | 'message', 'parents'] | |
174 |
|
174 | |||
175 | if len(commit_range) == 2: |
|
175 | if len(commit_range) == 2: | |
176 | commits = c.rhodecode_repo.get_commits( |
|
176 | commits = c.rhodecode_repo.get_commits( | |
177 | start_id=commit_range[0], end_id=commit_range[1], |
|
177 | start_id=commit_range[0], end_id=commit_range[1], | |
178 | pre_load=pre_load) |
|
178 | pre_load=pre_load) | |
179 | commits = list(commits) |
|
179 | commits = list(commits) | |
180 | else: |
|
180 | else: | |
181 | commits = [c.rhodecode_repo.get_commit( |
|
181 | commits = [c.rhodecode_repo.get_commit( | |
182 | commit_id=commit_id_range, pre_load=pre_load)] |
|
182 | commit_id=commit_id_range, pre_load=pre_load)] | |
183 |
|
183 | |||
184 | c.commit_ranges = commits |
|
184 | c.commit_ranges = commits | |
185 | if not c.commit_ranges: |
|
185 | if not c.commit_ranges: | |
186 | raise RepositoryError( |
|
186 | raise RepositoryError( | |
187 | 'The commit range returned an empty result') |
|
187 | 'The commit range returned an empty result') | |
188 | except CommitDoesNotExistError: |
|
188 | except CommitDoesNotExistError: | |
189 | msg = _('No such commit exists for this repository') |
|
189 | msg = _('No such commit exists for this repository') | |
190 | h.flash(msg, category='error') |
|
190 | h.flash(msg, category='error') | |
191 | raise HTTPNotFound() |
|
191 | raise HTTPNotFound() | |
192 | except Exception: |
|
192 | except Exception: | |
193 | log.exception("General failure") |
|
193 | log.exception("General failure") | |
194 | raise HTTPNotFound() |
|
194 | raise HTTPNotFound() | |
195 |
|
195 | |||
196 | c.changes = OrderedDict() |
|
196 | c.changes = OrderedDict() | |
197 | c.lines_added = 0 |
|
197 | c.lines_added = 0 | |
198 | c.lines_deleted = 0 |
|
198 | c.lines_deleted = 0 | |
199 |
|
199 | |||
200 | # auto collapse if we have more than limit |
|
200 | # auto collapse if we have more than limit | |
201 | collapse_limit = diffs.DiffProcessor._collapse_commits_over |
|
201 | collapse_limit = diffs.DiffProcessor._collapse_commits_over | |
202 | c.collapse_all_commits = len(c.commit_ranges) > collapse_limit |
|
202 | c.collapse_all_commits = len(c.commit_ranges) > collapse_limit | |
203 |
|
203 | |||
204 | c.commit_statuses = ChangesetStatus.STATUSES |
|
204 | c.commit_statuses = ChangesetStatus.STATUSES | |
205 | c.inline_comments = [] |
|
205 | c.inline_comments = [] | |
206 | c.files = [] |
|
206 | c.files = [] | |
207 |
|
207 | |||
208 | c.statuses = [] |
|
208 | c.statuses = [] | |
209 | c.comments = [] |
|
209 | c.comments = [] | |
210 | c.unresolved_comments = [] |
|
210 | c.unresolved_comments = [] | |
211 | if len(c.commit_ranges) == 1: |
|
211 | if len(c.commit_ranges) == 1: | |
212 | commit = c.commit_ranges[0] |
|
212 | commit = c.commit_ranges[0] | |
213 | c.comments = CommentsModel().get_comments( |
|
213 | c.comments = CommentsModel().get_comments( | |
214 | c.rhodecode_db_repo.repo_id, |
|
214 | c.rhodecode_db_repo.repo_id, | |
215 | revision=commit.raw_id) |
|
215 | revision=commit.raw_id) | |
216 | c.statuses.append(ChangesetStatusModel().get_status( |
|
216 | c.statuses.append(ChangesetStatusModel().get_status( | |
217 | c.rhodecode_db_repo.repo_id, commit.raw_id)) |
|
217 | c.rhodecode_db_repo.repo_id, commit.raw_id)) | |
218 | # comments from PR |
|
218 | # comments from PR | |
219 | statuses = ChangesetStatusModel().get_statuses( |
|
219 | statuses = ChangesetStatusModel().get_statuses( | |
220 | c.rhodecode_db_repo.repo_id, commit.raw_id, |
|
220 | c.rhodecode_db_repo.repo_id, commit.raw_id, | |
221 | with_revisions=True) |
|
221 | with_revisions=True) | |
222 | prs = set(st.pull_request for st in statuses |
|
222 | prs = set(st.pull_request for st in statuses | |
223 | if st.pull_request is not None) |
|
223 | if st.pull_request is not None) | |
224 | # from associated statuses, check the pull requests, and |
|
224 | # from associated statuses, check the pull requests, and | |
225 | # show comments from them |
|
225 | # show comments from them | |
226 | for pr in prs: |
|
226 | for pr in prs: | |
227 | c.comments.extend(pr.comments) |
|
227 | c.comments.extend(pr.comments) | |
228 |
|
228 | |||
229 | c.unresolved_comments = CommentsModel()\ |
|
229 | c.unresolved_comments = CommentsModel()\ | |
230 | .get_commit_unresolved_todos(commit.raw_id) |
|
230 | .get_commit_unresolved_todos(commit.raw_id) | |
231 |
|
231 | |||
232 | # Iterate over ranges (default commit view is always one commit) |
|
232 | # Iterate over ranges (default commit view is always one commit) | |
233 | for commit in c.commit_ranges: |
|
233 | for commit in c.commit_ranges: | |
234 | c.changes[commit.raw_id] = [] |
|
234 | c.changes[commit.raw_id] = [] | |
235 |
|
235 | |||
236 | commit2 = commit |
|
236 | commit2 = commit | |
237 | commit1 = commit.parents[0] if commit.parents else EmptyCommit() |
|
237 | commit1 = commit.parents[0] if commit.parents else EmptyCommit() | |
238 |
|
238 | |||
239 | _diff = c.rhodecode_repo.get_diff( |
|
239 | _diff = c.rhodecode_repo.get_diff( | |
240 | commit1, commit2, |
|
240 | commit1, commit2, | |
241 | ignore_whitespace=ign_whitespace_lcl, context=context_lcl) |
|
241 | ignore_whitespace=ign_whitespace_lcl, context=context_lcl) | |
242 | diff_processor = diffs.DiffProcessor( |
|
242 | diff_processor = diffs.DiffProcessor( | |
243 | _diff, format='newdiff', diff_limit=diff_limit, |
|
243 | _diff, format='newdiff', diff_limit=diff_limit, | |
244 | file_limit=file_limit, show_full_diff=fulldiff) |
|
244 | file_limit=file_limit, show_full_diff=fulldiff) | |
245 |
|
245 | |||
246 | commit_changes = OrderedDict() |
|
246 | commit_changes = OrderedDict() | |
247 | if method == 'show': |
|
247 | if method == 'show': | |
248 | _parsed = diff_processor.prepare() |
|
248 | _parsed = diff_processor.prepare() | |
249 | c.limited_diff = isinstance(_parsed, diffs.LimitedDiffContainer) |
|
249 | c.limited_diff = isinstance(_parsed, diffs.LimitedDiffContainer) | |
250 |
|
250 | |||
251 | _parsed = diff_processor.prepare() |
|
251 | _parsed = diff_processor.prepare() | |
252 |
|
252 | |||
253 | def _node_getter(commit): |
|
253 | def _node_getter(commit): | |
254 | def get_node(fname): |
|
254 | def get_node(fname): | |
255 | try: |
|
255 | try: | |
256 | return commit.get_node(fname) |
|
256 | return commit.get_node(fname) | |
257 | except NodeDoesNotExistError: |
|
257 | except NodeDoesNotExistError: | |
258 | return None |
|
258 | return None | |
259 | return get_node |
|
259 | return get_node | |
260 |
|
260 | |||
261 | inline_comments = CommentsModel().get_inline_comments( |
|
261 | inline_comments = CommentsModel().get_inline_comments( | |
262 | c.rhodecode_db_repo.repo_id, revision=commit.raw_id) |
|
262 | c.rhodecode_db_repo.repo_id, revision=commit.raw_id) | |
263 | c.inline_cnt = CommentsModel().get_inline_comments_count( |
|
263 | c.inline_cnt = CommentsModel().get_inline_comments_count( | |
264 | inline_comments) |
|
264 | inline_comments) | |
265 |
|
265 | |||
266 | diffset = codeblocks.DiffSet( |
|
266 | diffset = codeblocks.DiffSet( | |
267 | repo_name=c.repo_name, |
|
267 | repo_name=c.repo_name, | |
268 | source_node_getter=_node_getter(commit1), |
|
268 | source_node_getter=_node_getter(commit1), | |
269 | target_node_getter=_node_getter(commit2), |
|
269 | target_node_getter=_node_getter(commit2), | |
270 | comments=inline_comments |
|
270 | comments=inline_comments) | |
271 | ).render_patchset(_parsed, commit1.raw_id, commit2.raw_id) |
|
271 | diffset = diffset.render_patchset( | |
|
272 | _parsed, commit1.raw_id, commit2.raw_id) | |||
|
273 | ||||
272 | c.changes[commit.raw_id] = diffset |
|
274 | c.changes[commit.raw_id] = diffset | |
273 | else: |
|
275 | else: | |
274 | # downloads/raw we only need RAW diff nothing else |
|
276 | # downloads/raw we only need RAW diff nothing else | |
275 | diff = diff_processor.as_raw() |
|
277 | diff = diff_processor.as_raw() | |
276 | c.changes[commit.raw_id] = [None, None, None, None, diff, None, None] |
|
278 | c.changes[commit.raw_id] = [None, None, None, None, diff, None, None] | |
277 |
|
279 | |||
278 | # sort comments by how they were generated |
|
280 | # sort comments by how they were generated | |
279 | c.comments = sorted(c.comments, key=lambda x: x.comment_id) |
|
281 | c.comments = sorted(c.comments, key=lambda x: x.comment_id) | |
280 |
|
282 | |||
281 | if len(c.commit_ranges) == 1: |
|
283 | if len(c.commit_ranges) == 1: | |
282 | c.commit = c.commit_ranges[0] |
|
284 | c.commit = c.commit_ranges[0] | |
283 | c.parent_tmpl = ''.join( |
|
285 | c.parent_tmpl = ''.join( | |
284 | '# Parent %s\n' % x.raw_id for x in c.commit.parents) |
|
286 | '# Parent %s\n' % x.raw_id for x in c.commit.parents) | |
285 | if method == 'download': |
|
287 | if method == 'download': | |
286 | response.content_type = 'text/plain' |
|
288 | response.content_type = 'text/plain' | |
287 | response.content_disposition = ( |
|
289 | response.content_disposition = ( | |
288 | 'attachment; filename=%s.diff' % commit_id_range[:12]) |
|
290 | 'attachment; filename=%s.diff' % commit_id_range[:12]) | |
289 | return diff |
|
291 | return diff | |
290 | elif method == 'patch': |
|
292 | elif method == 'patch': | |
291 | response.content_type = 'text/plain' |
|
293 | response.content_type = 'text/plain' | |
292 | c.diff = safe_unicode(diff) |
|
294 | c.diff = safe_unicode(diff) | |
293 | return render('changeset/patch_changeset.mako') |
|
295 | return render('changeset/patch_changeset.mako') | |
294 | elif method == 'raw': |
|
296 | elif method == 'raw': | |
295 | response.content_type = 'text/plain' |
|
297 | response.content_type = 'text/plain' | |
296 | return diff |
|
298 | return diff | |
297 | elif method == 'show': |
|
299 | elif method == 'show': | |
298 | if len(c.commit_ranges) == 1: |
|
300 | if len(c.commit_ranges) == 1: | |
299 | return render('changeset/changeset.mako') |
|
301 | return render('changeset/changeset.mako') | |
300 | else: |
|
302 | else: | |
301 | c.ancestor = None |
|
303 | c.ancestor = None | |
302 | c.target_repo = c.rhodecode_db_repo |
|
304 | c.target_repo = c.rhodecode_db_repo | |
303 | return render('changeset/changeset_range.mako') |
|
305 | return render('changeset/changeset_range.mako') | |
304 |
|
306 | |||
305 | @LoginRequired() |
|
307 | @LoginRequired() | |
306 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', |
|
308 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', | |
307 | 'repository.admin') |
|
309 | 'repository.admin') | |
308 | def index(self, revision, method='show'): |
|
310 | def index(self, revision, method='show'): | |
309 | return self._index(revision, method=method) |
|
311 | return self._index(revision, method=method) | |
310 |
|
312 | |||
311 | @LoginRequired() |
|
313 | @LoginRequired() | |
312 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', |
|
314 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', | |
313 | 'repository.admin') |
|
315 | 'repository.admin') | |
314 | def changeset_raw(self, revision): |
|
316 | def changeset_raw(self, revision): | |
315 | return self._index(revision, method='raw') |
|
317 | return self._index(revision, method='raw') | |
316 |
|
318 | |||
317 | @LoginRequired() |
|
319 | @LoginRequired() | |
318 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', |
|
320 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', | |
319 | 'repository.admin') |
|
321 | 'repository.admin') | |
320 | def changeset_patch(self, revision): |
|
322 | def changeset_patch(self, revision): | |
321 | return self._index(revision, method='patch') |
|
323 | return self._index(revision, method='patch') | |
322 |
|
324 | |||
323 | @LoginRequired() |
|
325 | @LoginRequired() | |
324 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', |
|
326 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', | |
325 | 'repository.admin') |
|
327 | 'repository.admin') | |
326 | def changeset_download(self, revision): |
|
328 | def changeset_download(self, revision): | |
327 | return self._index(revision, method='download') |
|
329 | return self._index(revision, method='download') | |
328 |
|
330 | |||
329 | @LoginRequired() |
|
331 | @LoginRequired() | |
330 | @NotAnonymous() |
|
332 | @NotAnonymous() | |
331 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', |
|
333 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', | |
332 | 'repository.admin') |
|
334 | 'repository.admin') | |
333 | @auth.CSRFRequired() |
|
335 | @auth.CSRFRequired() | |
334 | @jsonify |
|
336 | @jsonify | |
335 | def comment(self, repo_name, revision): |
|
337 | def comment(self, repo_name, revision): | |
336 | commit_id = revision |
|
338 | commit_id = revision | |
337 | status = request.POST.get('changeset_status', None) |
|
339 | status = request.POST.get('changeset_status', None) | |
338 | text = request.POST.get('text') |
|
340 | text = request.POST.get('text') | |
339 | comment_type = request.POST.get('comment_type') |
|
341 | comment_type = request.POST.get('comment_type') | |
340 | resolves_comment_id = request.POST.get('resolves_comment_id', None) |
|
342 | resolves_comment_id = request.POST.get('resolves_comment_id', None) | |
341 |
|
343 | |||
342 | if status: |
|
344 | if status: | |
343 | text = text or (_('Status change %(transition_icon)s %(status)s') |
|
345 | text = text or (_('Status change %(transition_icon)s %(status)s') | |
344 | % {'transition_icon': '>', |
|
346 | % {'transition_icon': '>', | |
345 | 'status': ChangesetStatus.get_status_lbl(status)}) |
|
347 | 'status': ChangesetStatus.get_status_lbl(status)}) | |
346 |
|
348 | |||
347 | multi_commit_ids = [] |
|
349 | multi_commit_ids = [] | |
348 | for _commit_id in request.POST.get('commit_ids', '').split(','): |
|
350 | for _commit_id in request.POST.get('commit_ids', '').split(','): | |
349 | if _commit_id not in ['', None, EmptyCommit.raw_id]: |
|
351 | if _commit_id not in ['', None, EmptyCommit.raw_id]: | |
350 | if _commit_id not in multi_commit_ids: |
|
352 | if _commit_id not in multi_commit_ids: | |
351 | multi_commit_ids.append(_commit_id) |
|
353 | multi_commit_ids.append(_commit_id) | |
352 |
|
354 | |||
353 | commit_ids = multi_commit_ids or [commit_id] |
|
355 | commit_ids = multi_commit_ids or [commit_id] | |
354 |
|
356 | |||
355 | comment = None |
|
357 | comment = None | |
356 | for current_id in filter(None, commit_ids): |
|
358 | for current_id in filter(None, commit_ids): | |
357 | c.co = comment = CommentsModel().create( |
|
359 | c.co = comment = CommentsModel().create( | |
358 | text=text, |
|
360 | text=text, | |
359 | repo=c.rhodecode_db_repo.repo_id, |
|
361 | repo=c.rhodecode_db_repo.repo_id, | |
360 | user=c.rhodecode_user.user_id, |
|
362 | user=c.rhodecode_user.user_id, | |
361 | commit_id=current_id, |
|
363 | commit_id=current_id, | |
362 | f_path=request.POST.get('f_path'), |
|
364 | f_path=request.POST.get('f_path'), | |
363 | line_no=request.POST.get('line'), |
|
365 | line_no=request.POST.get('line'), | |
364 | status_change=(ChangesetStatus.get_status_lbl(status) |
|
366 | status_change=(ChangesetStatus.get_status_lbl(status) | |
365 | if status else None), |
|
367 | if status else None), | |
366 | status_change_type=status, |
|
368 | status_change_type=status, | |
367 | comment_type=comment_type, |
|
369 | comment_type=comment_type, | |
368 | resolves_comment_id=resolves_comment_id |
|
370 | resolves_comment_id=resolves_comment_id | |
369 | ) |
|
371 | ) | |
370 |
|
372 | |||
371 | # get status if set ! |
|
373 | # get status if set ! | |
372 | if status: |
|
374 | if status: | |
373 | # if latest status was from pull request and it's closed |
|
375 | # if latest status was from pull request and it's closed | |
374 | # disallow changing status ! |
|
376 | # disallow changing status ! | |
375 | # dont_allow_on_closed_pull_request = True ! |
|
377 | # dont_allow_on_closed_pull_request = True ! | |
376 |
|
378 | |||
377 | try: |
|
379 | try: | |
378 | ChangesetStatusModel().set_status( |
|
380 | ChangesetStatusModel().set_status( | |
379 | c.rhodecode_db_repo.repo_id, |
|
381 | c.rhodecode_db_repo.repo_id, | |
380 | status, |
|
382 | status, | |
381 | c.rhodecode_user.user_id, |
|
383 | c.rhodecode_user.user_id, | |
382 | comment, |
|
384 | comment, | |
383 | revision=current_id, |
|
385 | revision=current_id, | |
384 | dont_allow_on_closed_pull_request=True |
|
386 | dont_allow_on_closed_pull_request=True | |
385 | ) |
|
387 | ) | |
386 | except StatusChangeOnClosedPullRequestError: |
|
388 | except StatusChangeOnClosedPullRequestError: | |
387 | msg = _('Changing the status of a commit associated with ' |
|
389 | msg = _('Changing the status of a commit associated with ' | |
388 | 'a closed pull request is not allowed') |
|
390 | 'a closed pull request is not allowed') | |
389 | log.exception(msg) |
|
391 | log.exception(msg) | |
390 | h.flash(msg, category='warning') |
|
392 | h.flash(msg, category='warning') | |
391 | return redirect(h.url( |
|
393 | return redirect(h.url( | |
392 | 'changeset_home', repo_name=repo_name, |
|
394 | 'changeset_home', repo_name=repo_name, | |
393 | revision=current_id)) |
|
395 | revision=current_id)) | |
394 |
|
396 | |||
395 | # finalize, commit and redirect |
|
397 | # finalize, commit and redirect | |
396 | Session().commit() |
|
398 | Session().commit() | |
397 |
|
399 | |||
398 | data = { |
|
400 | data = { | |
399 | 'target_id': h.safeid(h.safe_unicode(request.POST.get('f_path'))), |
|
401 | 'target_id': h.safeid(h.safe_unicode(request.POST.get('f_path'))), | |
400 | } |
|
402 | } | |
401 | if comment: |
|
403 | if comment: | |
402 | data.update(comment.get_dict()) |
|
404 | data.update(comment.get_dict()) | |
403 | data.update({'rendered_text': |
|
405 | data.update({'rendered_text': | |
404 | render('changeset/changeset_comment_block.mako')}) |
|
406 | render('changeset/changeset_comment_block.mako')}) | |
405 |
|
407 | |||
406 | return data |
|
408 | return data | |
407 |
|
409 | |||
408 | @LoginRequired() |
|
410 | @LoginRequired() | |
409 | @NotAnonymous() |
|
411 | @NotAnonymous() | |
410 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', |
|
412 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', | |
411 | 'repository.admin') |
|
413 | 'repository.admin') | |
412 | @auth.CSRFRequired() |
|
414 | @auth.CSRFRequired() | |
413 | def preview_comment(self): |
|
415 | def preview_comment(self): | |
414 | # Technically a CSRF token is not needed as no state changes with this |
|
416 | # Technically a CSRF token is not needed as no state changes with this | |
415 | # call. However, as this is a POST is better to have it, so automated |
|
417 | # call. However, as this is a POST is better to have it, so automated | |
416 | # tools don't flag it as potential CSRF. |
|
418 | # tools don't flag it as potential CSRF. | |
417 | # Post is required because the payload could be bigger than the maximum |
|
419 | # Post is required because the payload could be bigger than the maximum | |
418 | # allowed by GET. |
|
420 | # allowed by GET. | |
419 | if not request.environ.get('HTTP_X_PARTIAL_XHR'): |
|
421 | if not request.environ.get('HTTP_X_PARTIAL_XHR'): | |
420 | raise HTTPBadRequest() |
|
422 | raise HTTPBadRequest() | |
421 | text = request.POST.get('text') |
|
423 | text = request.POST.get('text') | |
422 | renderer = request.POST.get('renderer') or 'rst' |
|
424 | renderer = request.POST.get('renderer') or 'rst' | |
423 | if text: |
|
425 | if text: | |
424 | return h.render(text, renderer=renderer, mentions=True) |
|
426 | return h.render(text, renderer=renderer, mentions=True) | |
425 | return '' |
|
427 | return '' | |
426 |
|
428 | |||
427 | @LoginRequired() |
|
429 | @LoginRequired() | |
428 | @NotAnonymous() |
|
430 | @NotAnonymous() | |
429 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', |
|
431 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', | |
430 | 'repository.admin') |
|
432 | 'repository.admin') | |
431 | @auth.CSRFRequired() |
|
433 | @auth.CSRFRequired() | |
432 | @jsonify |
|
434 | @jsonify | |
433 | def delete_comment(self, repo_name, comment_id): |
|
435 | def delete_comment(self, repo_name, comment_id): | |
434 | comment = ChangesetComment.get_or_404(safe_int(comment_id)) |
|
436 | comment = ChangesetComment.get_or_404(safe_int(comment_id)) | |
435 | if not comment: |
|
437 | if not comment: | |
436 | log.debug('Comment with id:%s not found, skipping', comment_id) |
|
438 | log.debug('Comment with id:%s not found, skipping', comment_id) | |
437 | # comment already deleted in another call probably |
|
439 | # comment already deleted in another call probably | |
438 | return True |
|
440 | return True | |
439 |
|
441 | |||
440 | is_repo_admin = h.HasRepoPermissionAny('repository.admin')(c.repo_name) |
|
442 | is_repo_admin = h.HasRepoPermissionAny('repository.admin')(c.repo_name) | |
441 | super_admin = h.HasPermissionAny('hg.admin')() |
|
443 | super_admin = h.HasPermissionAny('hg.admin')() | |
442 | comment_owner = (comment.author.user_id == c.rhodecode_user.user_id) |
|
444 | comment_owner = (comment.author.user_id == c.rhodecode_user.user_id) | |
443 | is_repo_comment = comment.repo.repo_name == c.repo_name |
|
445 | is_repo_comment = comment.repo.repo_name == c.repo_name | |
444 | comment_repo_admin = is_repo_admin and is_repo_comment |
|
446 | comment_repo_admin = is_repo_admin and is_repo_comment | |
445 |
|
447 | |||
446 | if super_admin or comment_owner or comment_repo_admin: |
|
448 | if super_admin or comment_owner or comment_repo_admin: | |
447 | CommentsModel().delete(comment=comment, user=c.rhodecode_user) |
|
449 | CommentsModel().delete(comment=comment, user=c.rhodecode_user) | |
448 | Session().commit() |
|
450 | Session().commit() | |
449 | return True |
|
451 | return True | |
450 | else: |
|
452 | else: | |
451 | log.warning('No permissions for user %s to delete comment_id: %s', |
|
453 | log.warning('No permissions for user %s to delete comment_id: %s', | |
452 | c.rhodecode_user, comment_id) |
|
454 | c.rhodecode_user, comment_id) | |
453 | raise HTTPNotFound() |
|
455 | raise HTTPNotFound() | |
454 |
|
456 | |||
455 | @LoginRequired() |
|
457 | @LoginRequired() | |
456 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', |
|
458 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', | |
457 | 'repository.admin') |
|
459 | 'repository.admin') | |
458 | @jsonify |
|
460 | @jsonify | |
459 | def changeset_info(self, repo_name, revision): |
|
461 | def changeset_info(self, repo_name, revision): | |
460 | if request.is_xhr: |
|
462 | if request.is_xhr: | |
461 | try: |
|
463 | try: | |
462 | return c.rhodecode_repo.get_commit(commit_id=revision) |
|
464 | return c.rhodecode_repo.get_commit(commit_id=revision) | |
463 | except CommitDoesNotExistError as e: |
|
465 | except CommitDoesNotExistError as e: | |
464 | return EmptyCommit(message=str(e)) |
|
466 | return EmptyCommit(message=str(e)) | |
465 | else: |
|
467 | else: | |
466 | raise HTTPBadRequest() |
|
468 | raise HTTPBadRequest() | |
467 |
|
469 | |||
468 | @LoginRequired() |
|
470 | @LoginRequired() | |
469 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', |
|
471 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', | |
470 | 'repository.admin') |
|
472 | 'repository.admin') | |
471 | @jsonify |
|
473 | @jsonify | |
472 | def changeset_children(self, repo_name, revision): |
|
474 | def changeset_children(self, repo_name, revision): | |
473 | if request.is_xhr: |
|
475 | if request.is_xhr: | |
474 | commit = c.rhodecode_repo.get_commit(commit_id=revision) |
|
476 | commit = c.rhodecode_repo.get_commit(commit_id=revision) | |
475 | result = {"results": commit.children} |
|
477 | result = {"results": commit.children} | |
476 | return result |
|
478 | return result | |
477 | else: |
|
479 | else: | |
478 | raise HTTPBadRequest() |
|
480 | raise HTTPBadRequest() | |
479 |
|
481 | |||
480 | @LoginRequired() |
|
482 | @LoginRequired() | |
481 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', |
|
483 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', | |
482 | 'repository.admin') |
|
484 | 'repository.admin') | |
483 | @jsonify |
|
485 | @jsonify | |
484 | def changeset_parents(self, repo_name, revision): |
|
486 | def changeset_parents(self, repo_name, revision): | |
485 | if request.is_xhr: |
|
487 | if request.is_xhr: | |
486 | commit = c.rhodecode_repo.get_commit(commit_id=revision) |
|
488 | commit = c.rhodecode_repo.get_commit(commit_id=revision) | |
487 | result = {"results": commit.parents} |
|
489 | result = {"results": commit.parents} | |
488 | return result |
|
490 | return result | |
489 | else: |
|
491 | else: | |
490 | raise HTTPBadRequest() |
|
492 | raise HTTPBadRequest() |
@@ -1,284 +1,286 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 |
|
2 | |||
3 | # Copyright (C) 2012-2017 RhodeCode GmbH |
|
3 | # Copyright (C) 2012-2017 RhodeCode GmbH | |
4 | # |
|
4 | # | |
5 | # This program is free software: you can redistribute it and/or modify |
|
5 | # This program is free software: you can redistribute it and/or modify | |
6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
6 | # it under the terms of the GNU Affero General Public License, version 3 | |
7 | # (only), as published by the Free Software Foundation. |
|
7 | # (only), as published by the Free Software Foundation. | |
8 | # |
|
8 | # | |
9 | # This program is distributed in the hope that it will be useful, |
|
9 | # This program is distributed in the hope that it will be useful, | |
10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | # GNU General Public License for more details. |
|
12 | # GNU General Public License for more details. | |
13 | # |
|
13 | # | |
14 | # You should have received a copy of the GNU Affero General Public License |
|
14 | # You should have received a copy of the GNU Affero General Public License | |
15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | # |
|
16 | # | |
17 | # This program is dual-licensed. If you wish to learn more about the |
|
17 | # This program is dual-licensed. If you wish to learn more about the | |
18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
18 | # RhodeCode Enterprise Edition, including its added features, Support services, | |
19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |
20 |
|
20 | |||
21 | """ |
|
21 | """ | |
22 | Compare controller for showing differences between two commits/refs/tags etc. |
|
22 | Compare controller for showing differences between two commits/refs/tags etc. | |
23 | """ |
|
23 | """ | |
24 |
|
24 | |||
25 | import logging |
|
25 | import logging | |
26 |
|
26 | |||
27 | from webob.exc import HTTPBadRequest, HTTPNotFound |
|
27 | from webob.exc import HTTPBadRequest, HTTPNotFound | |
28 | from pylons import request, tmpl_context as c, url |
|
28 | from pylons import request, tmpl_context as c, url | |
29 | from pylons.controllers.util import redirect |
|
29 | from pylons.controllers.util import redirect | |
30 | from pylons.i18n.translation import _ |
|
30 | from pylons.i18n.translation import _ | |
31 |
|
31 | |||
32 | from rhodecode.controllers.utils import parse_path_ref, get_commit_from_ref_name |
|
32 | from rhodecode.controllers.utils import parse_path_ref, get_commit_from_ref_name | |
33 | from rhodecode.lib import helpers as h |
|
33 | from rhodecode.lib import helpers as h | |
34 | from rhodecode.lib import diffs, codeblocks |
|
34 | from rhodecode.lib import diffs, codeblocks | |
35 | from rhodecode.lib.auth import LoginRequired, HasRepoPermissionAnyDecorator |
|
35 | from rhodecode.lib.auth import LoginRequired, HasRepoPermissionAnyDecorator | |
36 | from rhodecode.lib.base import BaseRepoController, render |
|
36 | from rhodecode.lib.base import BaseRepoController, render | |
37 | from rhodecode.lib.utils import safe_str |
|
37 | from rhodecode.lib.utils import safe_str | |
38 | from rhodecode.lib.utils2 import safe_unicode, str2bool |
|
38 | from rhodecode.lib.utils2 import safe_unicode, str2bool | |
39 | from rhodecode.lib.vcs.exceptions import ( |
|
39 | from rhodecode.lib.vcs.exceptions import ( | |
40 | EmptyRepositoryError, RepositoryError, RepositoryRequirementError, |
|
40 | EmptyRepositoryError, RepositoryError, RepositoryRequirementError, | |
41 | NodeDoesNotExistError) |
|
41 | NodeDoesNotExistError) | |
42 | from rhodecode.model.db import Repository, ChangesetStatus |
|
42 | from rhodecode.model.db import Repository, ChangesetStatus | |
43 |
|
43 | |||
44 | log = logging.getLogger(__name__) |
|
44 | log = logging.getLogger(__name__) | |
45 |
|
45 | |||
46 |
|
46 | |||
47 | class CompareController(BaseRepoController): |
|
47 | class CompareController(BaseRepoController): | |
48 |
|
48 | |||
49 | def __before__(self): |
|
49 | def __before__(self): | |
50 | super(CompareController, self).__before__() |
|
50 | super(CompareController, self).__before__() | |
51 |
|
51 | |||
52 | def _get_commit_or_redirect( |
|
52 | def _get_commit_or_redirect( | |
53 | self, ref, ref_type, repo, redirect_after=True, partial=False): |
|
53 | self, ref, ref_type, repo, redirect_after=True, partial=False): | |
54 | """ |
|
54 | """ | |
55 | This is a safe way to get a commit. If an error occurs it |
|
55 | This is a safe way to get a commit. If an error occurs it | |
56 | redirects to a commit with a proper message. If partial is set |
|
56 | redirects to a commit with a proper message. If partial is set | |
57 | then it does not do redirect raise and throws an exception instead. |
|
57 | then it does not do redirect raise and throws an exception instead. | |
58 | """ |
|
58 | """ | |
59 | try: |
|
59 | try: | |
60 | return get_commit_from_ref_name(repo, safe_str(ref), ref_type) |
|
60 | return get_commit_from_ref_name(repo, safe_str(ref), ref_type) | |
61 | except EmptyRepositoryError: |
|
61 | except EmptyRepositoryError: | |
62 | if not redirect_after: |
|
62 | if not redirect_after: | |
63 | return repo.scm_instance().EMPTY_COMMIT |
|
63 | return repo.scm_instance().EMPTY_COMMIT | |
64 | h.flash(h.literal(_('There are no commits yet')), |
|
64 | h.flash(h.literal(_('There are no commits yet')), | |
65 | category='warning') |
|
65 | category='warning') | |
66 | redirect(h.route_path('repo_summary', repo_name=repo.repo_name)) |
|
66 | redirect(h.route_path('repo_summary', repo_name=repo.repo_name)) | |
67 |
|
67 | |||
68 | except RepositoryError as e: |
|
68 | except RepositoryError as e: | |
69 | log.exception(safe_str(e)) |
|
69 | log.exception(safe_str(e)) | |
70 | h.flash(safe_str(h.escape(e)), category='warning') |
|
70 | h.flash(safe_str(h.escape(e)), category='warning') | |
71 | if not partial: |
|
71 | if not partial: | |
72 | redirect(h.route_path('repo_summary', repo_name=repo.repo_name)) |
|
72 | redirect(h.route_path('repo_summary', repo_name=repo.repo_name)) | |
73 | raise HTTPBadRequest() |
|
73 | raise HTTPBadRequest() | |
74 |
|
74 | |||
75 | @LoginRequired() |
|
75 | @LoginRequired() | |
76 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', |
|
76 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', | |
77 | 'repository.admin') |
|
77 | 'repository.admin') | |
78 | def index(self, repo_name): |
|
78 | def index(self, repo_name): | |
79 | c.compare_home = True |
|
79 | c.compare_home = True | |
80 | c.commit_ranges = [] |
|
80 | c.commit_ranges = [] | |
81 | c.collapse_all_commits = False |
|
81 | c.collapse_all_commits = False | |
82 | c.diffset = None |
|
82 | c.diffset = None | |
83 | c.limited_diff = False |
|
83 | c.limited_diff = False | |
84 | source_repo = c.rhodecode_db_repo.repo_name |
|
84 | source_repo = c.rhodecode_db_repo.repo_name | |
85 | target_repo = request.GET.get('target_repo', source_repo) |
|
85 | target_repo = request.GET.get('target_repo', source_repo) | |
86 | c.source_repo = Repository.get_by_repo_name(source_repo) |
|
86 | c.source_repo = Repository.get_by_repo_name(source_repo) | |
87 | c.target_repo = Repository.get_by_repo_name(target_repo) |
|
87 | c.target_repo = Repository.get_by_repo_name(target_repo) | |
88 |
|
88 | |||
89 | if c.source_repo is None or c.target_repo is None: |
|
89 | if c.source_repo is None or c.target_repo is None: | |
90 | raise HTTPNotFound() |
|
90 | raise HTTPNotFound() | |
91 |
|
91 | |||
92 | c.source_ref = c.target_ref = _('Select commit') |
|
92 | c.source_ref = c.target_ref = _('Select commit') | |
93 | c.source_ref_type = "" |
|
93 | c.source_ref_type = "" | |
94 | c.target_ref_type = "" |
|
94 | c.target_ref_type = "" | |
95 | c.commit_statuses = ChangesetStatus.STATUSES |
|
95 | c.commit_statuses = ChangesetStatus.STATUSES | |
96 | c.preview_mode = False |
|
96 | c.preview_mode = False | |
97 | c.file_path = None |
|
97 | c.file_path = None | |
98 | return render('compare/compare_diff.mako') |
|
98 | return render('compare/compare_diff.mako') | |
99 |
|
99 | |||
100 | @LoginRequired() |
|
100 | @LoginRequired() | |
101 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', |
|
101 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', | |
102 | 'repository.admin') |
|
102 | 'repository.admin') | |
103 | def compare(self, repo_name, source_ref_type, source_ref, |
|
103 | def compare(self, repo_name, source_ref_type, source_ref, | |
104 | target_ref_type, target_ref): |
|
104 | target_ref_type, target_ref): | |
105 | # source_ref will be evaluated in source_repo |
|
105 | # source_ref will be evaluated in source_repo | |
106 | source_repo_name = c.rhodecode_db_repo.repo_name |
|
106 | source_repo_name = c.rhodecode_db_repo.repo_name | |
107 | source_path, source_id = parse_path_ref(source_ref) |
|
107 | source_path, source_id = parse_path_ref(source_ref) | |
108 |
|
108 | |||
109 | # target_ref will be evaluated in target_repo |
|
109 | # target_ref will be evaluated in target_repo | |
110 | target_repo_name = request.GET.get('target_repo', source_repo_name) |
|
110 | target_repo_name = request.GET.get('target_repo', source_repo_name) | |
111 | target_path, target_id = parse_path_ref( |
|
111 | target_path, target_id = parse_path_ref( | |
112 | target_ref, default_path=request.GET.get('f_path', '')) |
|
112 | target_ref, default_path=request.GET.get('f_path', '')) | |
113 |
|
113 | |||
114 | c.file_path = target_path |
|
114 | c.file_path = target_path | |
115 | c.commit_statuses = ChangesetStatus.STATUSES |
|
115 | c.commit_statuses = ChangesetStatus.STATUSES | |
116 |
|
116 | |||
117 | # if merge is True |
|
117 | # if merge is True | |
118 | # Show what changes since the shared ancestor commit of target/source |
|
118 | # Show what changes since the shared ancestor commit of target/source | |
119 | # the source would get if it was merged with target. Only commits |
|
119 | # the source would get if it was merged with target. Only commits | |
120 | # which are in target but not in source will be shown. |
|
120 | # which are in target but not in source will be shown. | |
121 | merge = str2bool(request.GET.get('merge')) |
|
121 | merge = str2bool(request.GET.get('merge')) | |
122 | # if merge is False |
|
122 | # if merge is False | |
123 | # Show a raw diff of source/target refs even if no ancestor exists |
|
123 | # Show a raw diff of source/target refs even if no ancestor exists | |
124 |
|
124 | |||
125 | # c.fulldiff disables cut_off_limit |
|
125 | # c.fulldiff disables cut_off_limit | |
126 | c.fulldiff = str2bool(request.GET.get('fulldiff')) |
|
126 | c.fulldiff = str2bool(request.GET.get('fulldiff')) | |
127 |
|
127 | |||
128 | # if partial, returns just compare_commits.html (commits log) |
|
128 | # if partial, returns just compare_commits.html (commits log) | |
129 | partial = request.is_xhr |
|
129 | partial = request.is_xhr | |
130 |
|
130 | |||
131 | # swap url for compare_diff page |
|
131 | # swap url for compare_diff page | |
132 | c.swap_url = h.url( |
|
132 | c.swap_url = h.url( | |
133 | 'compare_url', |
|
133 | 'compare_url', | |
134 | repo_name=target_repo_name, |
|
134 | repo_name=target_repo_name, | |
135 | source_ref_type=target_ref_type, |
|
135 | source_ref_type=target_ref_type, | |
136 | source_ref=target_ref, |
|
136 | source_ref=target_ref, | |
137 | target_repo=source_repo_name, |
|
137 | target_repo=source_repo_name, | |
138 | target_ref_type=source_ref_type, |
|
138 | target_ref_type=source_ref_type, | |
139 | target_ref=source_ref, |
|
139 | target_ref=source_ref, | |
140 | merge=merge and '1' or '', |
|
140 | merge=merge and '1' or '', | |
141 | f_path=target_path) |
|
141 | f_path=target_path) | |
142 |
|
142 | |||
143 | source_repo = Repository.get_by_repo_name(source_repo_name) |
|
143 | source_repo = Repository.get_by_repo_name(source_repo_name) | |
144 | target_repo = Repository.get_by_repo_name(target_repo_name) |
|
144 | target_repo = Repository.get_by_repo_name(target_repo_name) | |
145 |
|
145 | |||
146 | if source_repo is None: |
|
146 | if source_repo is None: | |
147 | log.error('Could not find the source repo: {}' |
|
147 | log.error('Could not find the source repo: {}' | |
148 | .format(source_repo_name)) |
|
148 | .format(source_repo_name)) | |
149 | h.flash(_('Could not find the source repo: `{}`') |
|
149 | h.flash(_('Could not find the source repo: `{}`') | |
150 | .format(h.escape(source_repo_name)), category='error') |
|
150 | .format(h.escape(source_repo_name)), category='error') | |
151 | return redirect(url('compare_home', repo_name=c.repo_name)) |
|
151 | return redirect(url('compare_home', repo_name=c.repo_name)) | |
152 |
|
152 | |||
153 | if target_repo is None: |
|
153 | if target_repo is None: | |
154 | log.error('Could not find the target repo: {}' |
|
154 | log.error('Could not find the target repo: {}' | |
155 | .format(source_repo_name)) |
|
155 | .format(source_repo_name)) | |
156 | h.flash(_('Could not find the target repo: `{}`') |
|
156 | h.flash(_('Could not find the target repo: `{}`') | |
157 | .format(h.escape(target_repo_name)), category='error') |
|
157 | .format(h.escape(target_repo_name)), category='error') | |
158 | return redirect(url('compare_home', repo_name=c.repo_name)) |
|
158 | return redirect(url('compare_home', repo_name=c.repo_name)) | |
159 |
|
159 | |||
160 | source_scm = source_repo.scm_instance() |
|
160 | source_scm = source_repo.scm_instance() | |
161 | target_scm = target_repo.scm_instance() |
|
161 | target_scm = target_repo.scm_instance() | |
162 |
|
162 | |||
163 | source_alias = source_scm.alias |
|
163 | source_alias = source_scm.alias | |
164 | target_alias = target_scm.alias |
|
164 | target_alias = target_scm.alias | |
165 | if source_alias != target_alias: |
|
165 | if source_alias != target_alias: | |
166 | msg = _('The comparison of two different kinds of remote repos ' |
|
166 | msg = _('The comparison of two different kinds of remote repos ' | |
167 | 'is not available') |
|
167 | 'is not available') | |
168 | log.error(msg) |
|
168 | log.error(msg) | |
169 | h.flash(msg, category='error') |
|
169 | h.flash(msg, category='error') | |
170 | return redirect(url('compare_home', repo_name=c.repo_name)) |
|
170 | return redirect(url('compare_home', repo_name=c.repo_name)) | |
171 |
|
171 | |||
172 | source_commit = self._get_commit_or_redirect( |
|
172 | source_commit = self._get_commit_or_redirect( | |
173 | ref=source_id, ref_type=source_ref_type, repo=source_repo, |
|
173 | ref=source_id, ref_type=source_ref_type, repo=source_repo, | |
174 | partial=partial) |
|
174 | partial=partial) | |
175 | target_commit = self._get_commit_or_redirect( |
|
175 | target_commit = self._get_commit_or_redirect( | |
176 | ref=target_id, ref_type=target_ref_type, repo=target_repo, |
|
176 | ref=target_id, ref_type=target_ref_type, repo=target_repo, | |
177 | partial=partial) |
|
177 | partial=partial) | |
178 |
|
178 | |||
179 | c.compare_home = False |
|
179 | c.compare_home = False | |
180 | c.source_repo = source_repo |
|
180 | c.source_repo = source_repo | |
181 | c.target_repo = target_repo |
|
181 | c.target_repo = target_repo | |
182 | c.source_ref = source_ref |
|
182 | c.source_ref = source_ref | |
183 | c.target_ref = target_ref |
|
183 | c.target_ref = target_ref | |
184 | c.source_ref_type = source_ref_type |
|
184 | c.source_ref_type = source_ref_type | |
185 | c.target_ref_type = target_ref_type |
|
185 | c.target_ref_type = target_ref_type | |
186 |
|
186 | |||
187 | pre_load = ["author", "branch", "date", "message"] |
|
187 | pre_load = ["author", "branch", "date", "message"] | |
188 | c.ancestor = None |
|
188 | c.ancestor = None | |
189 |
|
189 | |||
190 | if c.file_path: |
|
190 | if c.file_path: | |
191 | if source_commit == target_commit: |
|
191 | if source_commit == target_commit: | |
192 | c.commit_ranges = [] |
|
192 | c.commit_ranges = [] | |
193 | else: |
|
193 | else: | |
194 | c.commit_ranges = [target_commit] |
|
194 | c.commit_ranges = [target_commit] | |
195 | else: |
|
195 | else: | |
196 | try: |
|
196 | try: | |
197 | c.commit_ranges = source_scm.compare( |
|
197 | c.commit_ranges = source_scm.compare( | |
198 | source_commit.raw_id, target_commit.raw_id, |
|
198 | source_commit.raw_id, target_commit.raw_id, | |
199 | target_scm, merge, pre_load=pre_load) |
|
199 | target_scm, merge, pre_load=pre_load) | |
200 | if merge: |
|
200 | if merge: | |
201 | c.ancestor = source_scm.get_common_ancestor( |
|
201 | c.ancestor = source_scm.get_common_ancestor( | |
202 | source_commit.raw_id, target_commit.raw_id, target_scm) |
|
202 | source_commit.raw_id, target_commit.raw_id, target_scm) | |
203 | except RepositoryRequirementError: |
|
203 | except RepositoryRequirementError: | |
204 | msg = _('Could not compare repos with different ' |
|
204 | msg = _('Could not compare repos with different ' | |
205 | 'large file settings') |
|
205 | 'large file settings') | |
206 | log.error(msg) |
|
206 | log.error(msg) | |
207 | if partial: |
|
207 | if partial: | |
208 | return msg |
|
208 | return msg | |
209 | h.flash(msg, category='error') |
|
209 | h.flash(msg, category='error') | |
210 | return redirect(url('compare_home', repo_name=c.repo_name)) |
|
210 | return redirect(url('compare_home', repo_name=c.repo_name)) | |
211 |
|
211 | |||
212 | c.statuses = c.rhodecode_db_repo.statuses( |
|
212 | c.statuses = c.rhodecode_db_repo.statuses( | |
213 | [x.raw_id for x in c.commit_ranges]) |
|
213 | [x.raw_id for x in c.commit_ranges]) | |
214 |
|
214 | |||
215 | # auto collapse if we have more than limit |
|
215 | # auto collapse if we have more than limit | |
216 | collapse_limit = diffs.DiffProcessor._collapse_commits_over |
|
216 | collapse_limit = diffs.DiffProcessor._collapse_commits_over | |
217 | c.collapse_all_commits = len(c.commit_ranges) > collapse_limit |
|
217 | c.collapse_all_commits = len(c.commit_ranges) > collapse_limit | |
218 |
|
218 | |||
219 | if partial: # for PR ajax commits loader |
|
219 | if partial: # for PR ajax commits loader | |
220 | if not c.ancestor: |
|
220 | if not c.ancestor: | |
221 | return '' # cannot merge if there is no ancestor |
|
221 | return '' # cannot merge if there is no ancestor | |
222 | return render('compare/compare_commits.mako') |
|
222 | return render('compare/compare_commits.mako') | |
223 |
|
223 | |||
224 | if c.ancestor: |
|
224 | if c.ancestor: | |
225 | # case we want a simple diff without incoming commits, |
|
225 | # case we want a simple diff without incoming commits, | |
226 | # previewing what will be merged. |
|
226 | # previewing what will be merged. | |
227 | # Make the diff on target repo (which is known to have target_ref) |
|
227 | # Make the diff on target repo (which is known to have target_ref) | |
228 | log.debug('Using ancestor %s as source_ref instead of %s' |
|
228 | log.debug('Using ancestor %s as source_ref instead of %s' | |
229 | % (c.ancestor, source_ref)) |
|
229 | % (c.ancestor, source_ref)) | |
230 | source_repo = target_repo |
|
230 | source_repo = target_repo | |
231 | source_commit = target_repo.get_commit(commit_id=c.ancestor) |
|
231 | source_commit = target_repo.get_commit(commit_id=c.ancestor) | |
232 |
|
232 | |||
233 | # diff_limit will cut off the whole diff if the limit is applied |
|
233 | # diff_limit will cut off the whole diff if the limit is applied | |
234 | # otherwise it will just hide the big files from the front-end |
|
234 | # otherwise it will just hide the big files from the front-end | |
235 | diff_limit = self.cut_off_limit_diff |
|
235 | diff_limit = self.cut_off_limit_diff | |
236 | file_limit = self.cut_off_limit_file |
|
236 | file_limit = self.cut_off_limit_file | |
237 |
|
237 | |||
238 | log.debug('calculating diff between ' |
|
238 | log.debug('calculating diff between ' | |
239 | 'source_ref:%s and target_ref:%s for repo `%s`', |
|
239 | 'source_ref:%s and target_ref:%s for repo `%s`', | |
240 | source_commit, target_commit, |
|
240 | source_commit, target_commit, | |
241 | safe_unicode(source_repo.scm_instance().path)) |
|
241 | safe_unicode(source_repo.scm_instance().path)) | |
242 |
|
242 | |||
243 | if source_commit.repository != target_commit.repository: |
|
243 | if source_commit.repository != target_commit.repository: | |
244 | msg = _( |
|
244 | msg = _( | |
245 | "Repositories unrelated. " |
|
245 | "Repositories unrelated. " | |
246 | "Cannot compare commit %(commit1)s from repository %(repo1)s " |
|
246 | "Cannot compare commit %(commit1)s from repository %(repo1)s " | |
247 | "with commit %(commit2)s from repository %(repo2)s.") % { |
|
247 | "with commit %(commit2)s from repository %(repo2)s.") % { | |
248 | 'commit1': h.show_id(source_commit), |
|
248 | 'commit1': h.show_id(source_commit), | |
249 | 'repo1': source_repo.repo_name, |
|
249 | 'repo1': source_repo.repo_name, | |
250 | 'commit2': h.show_id(target_commit), |
|
250 | 'commit2': h.show_id(target_commit), | |
251 | 'repo2': target_repo.repo_name, |
|
251 | 'repo2': target_repo.repo_name, | |
252 | } |
|
252 | } | |
253 | h.flash(msg, category='error') |
|
253 | h.flash(msg, category='error') | |
254 | raise HTTPBadRequest() |
|
254 | raise HTTPBadRequest() | |
255 |
|
255 | |||
256 | txtdiff = source_repo.scm_instance().get_diff( |
|
256 | txtdiff = source_repo.scm_instance().get_diff( | |
257 | commit1=source_commit, commit2=target_commit, |
|
257 | commit1=source_commit, commit2=target_commit, | |
258 | path=target_path, path1=source_path) |
|
258 | path=target_path, path1=source_path) | |
259 |
|
259 | |||
260 | diff_processor = diffs.DiffProcessor( |
|
260 | diff_processor = diffs.DiffProcessor( | |
261 | txtdiff, format='newdiff', diff_limit=diff_limit, |
|
261 | txtdiff, format='newdiff', diff_limit=diff_limit, | |
262 | file_limit=file_limit, show_full_diff=c.fulldiff) |
|
262 | file_limit=file_limit, show_full_diff=c.fulldiff) | |
263 | _parsed = diff_processor.prepare() |
|
263 | _parsed = diff_processor.prepare() | |
264 |
|
264 | |||
265 | def _node_getter(commit): |
|
265 | def _node_getter(commit): | |
266 | """ Returns a function that returns a node for a commit or None """ |
|
266 | """ Returns a function that returns a node for a commit or None """ | |
267 | def get_node(fname): |
|
267 | def get_node(fname): | |
268 | try: |
|
268 | try: | |
269 | return commit.get_node(fname) |
|
269 | return commit.get_node(fname) | |
270 | except NodeDoesNotExistError: |
|
270 | except NodeDoesNotExistError: | |
271 | return None |
|
271 | return None | |
272 | return get_node |
|
272 | return get_node | |
273 |
|
273 | |||
274 |
|
|
274 | diffset = codeblocks.DiffSet( | |
275 | repo_name=source_repo.repo_name, |
|
275 | repo_name=source_repo.repo_name, | |
276 | source_node_getter=_node_getter(source_commit), |
|
276 | source_node_getter=_node_getter(source_commit), | |
277 | target_node_getter=_node_getter(target_commit), |
|
277 | target_node_getter=_node_getter(target_commit), | |
278 | ).render_patchset(_parsed, source_ref, target_ref) |
|
278 | ) | |
|
279 | c.diffset = diffset.render_patchset( | |||
|
280 | _parsed, source_ref, target_ref) | |||
279 |
|
281 | |||
280 | c.preview_mode = merge |
|
282 | c.preview_mode = merge | |
281 | c.source_commit = source_commit |
|
283 | c.source_commit = source_commit | |
282 | c.target_commit = target_commit |
|
284 | c.target_commit = target_commit | |
283 |
|
285 | |||
284 | return render('compare/compare_diff.mako') |
|
286 | return render('compare/compare_diff.mako') |
@@ -1,707 +1,707 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 |
|
2 | |||
3 | # Copyright (C) 2011-2017 RhodeCode GmbH |
|
3 | # Copyright (C) 2011-2017 RhodeCode GmbH | |
4 | # |
|
4 | # | |
5 | # This program is free software: you can redistribute it and/or modify |
|
5 | # This program is free software: you can redistribute it and/or modify | |
6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
6 | # it under the terms of the GNU Affero General Public License, version 3 | |
7 | # (only), as published by the Free Software Foundation. |
|
7 | # (only), as published by the Free Software Foundation. | |
8 | # |
|
8 | # | |
9 | # This program is distributed in the hope that it will be useful, |
|
9 | # This program is distributed in the hope that it will be useful, | |
10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | # GNU General Public License for more details. |
|
12 | # GNU General Public License for more details. | |
13 | # |
|
13 | # | |
14 | # You should have received a copy of the GNU Affero General Public License |
|
14 | # You should have received a copy of the GNU Affero General Public License | |
15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | # |
|
16 | # | |
17 | # This program is dual-licensed. If you wish to learn more about the |
|
17 | # This program is dual-licensed. If you wish to learn more about the | |
18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
18 | # RhodeCode Enterprise Edition, including its added features, Support services, | |
19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |
20 |
|
20 | |||
21 | import logging |
|
21 | import logging | |
22 | import difflib |
|
22 | import difflib | |
23 | from itertools import groupby |
|
23 | from itertools import groupby | |
24 |
|
24 | |||
25 | from pygments import lex |
|
25 | from pygments import lex | |
26 | from pygments.formatters.html import _get_ttype_class as pygment_token_class |
|
26 | from pygments.formatters.html import _get_ttype_class as pygment_token_class | |
27 | from rhodecode.lib.helpers import ( |
|
27 | from rhodecode.lib.helpers import ( | |
28 | get_lexer_for_filenode, html_escape, get_custom_lexer) |
|
28 | get_lexer_for_filenode, html_escape, get_custom_lexer) | |
29 | from rhodecode.lib.utils2 import AttributeDict |
|
29 | from rhodecode.lib.utils2 import AttributeDict | |
30 | from rhodecode.lib.vcs.nodes import FileNode |
|
30 | from rhodecode.lib.vcs.nodes import FileNode | |
31 | from rhodecode.lib.diff_match_patch import diff_match_patch |
|
31 | from rhodecode.lib.diff_match_patch import diff_match_patch | |
32 | from rhodecode.lib.diffs import LimitedDiffContainer |
|
32 | from rhodecode.lib.diffs import LimitedDiffContainer | |
33 | from pygments.lexers import get_lexer_by_name |
|
33 | from pygments.lexers import get_lexer_by_name | |
34 |
|
34 | |||
35 | plain_text_lexer = get_lexer_by_name( |
|
35 | plain_text_lexer = get_lexer_by_name( | |
36 | 'text', stripall=False, stripnl=False, ensurenl=False) |
|
36 | 'text', stripall=False, stripnl=False, ensurenl=False) | |
37 |
|
37 | |||
38 |
|
38 | |||
39 | log = logging.getLogger() |
|
39 | log = logging.getLogger() | |
40 |
|
40 | |||
41 |
|
41 | |||
42 | def filenode_as_lines_tokens(filenode, lexer=None): |
|
42 | def filenode_as_lines_tokens(filenode, lexer=None): | |
43 | org_lexer = lexer |
|
43 | org_lexer = lexer | |
44 | lexer = lexer or get_lexer_for_filenode(filenode) |
|
44 | lexer = lexer or get_lexer_for_filenode(filenode) | |
45 | log.debug('Generating file node pygment tokens for %s, %s, org_lexer:%s', |
|
45 | log.debug('Generating file node pygment tokens for %s, %s, org_lexer:%s', | |
46 | lexer, filenode, org_lexer) |
|
46 | lexer, filenode, org_lexer) | |
47 | tokens = tokenize_string(filenode.content, lexer) |
|
47 | tokens = tokenize_string(filenode.content, lexer) | |
48 | lines = split_token_stream(tokens, split_string='\n') |
|
48 | lines = split_token_stream(tokens, split_string='\n') | |
49 | rv = list(lines) |
|
49 | rv = list(lines) | |
50 | return rv |
|
50 | return rv | |
51 |
|
51 | |||
52 |
|
52 | |||
53 | def tokenize_string(content, lexer): |
|
53 | def tokenize_string(content, lexer): | |
54 | """ |
|
54 | """ | |
55 | Use pygments to tokenize some content based on a lexer |
|
55 | Use pygments to tokenize some content based on a lexer | |
56 | ensuring all original new lines and whitespace is preserved |
|
56 | ensuring all original new lines and whitespace is preserved | |
57 | """ |
|
57 | """ | |
58 |
|
58 | |||
59 | lexer.stripall = False |
|
59 | lexer.stripall = False | |
60 | lexer.stripnl = False |
|
60 | lexer.stripnl = False | |
61 | lexer.ensurenl = False |
|
61 | lexer.ensurenl = False | |
62 | for token_type, token_text in lex(content, lexer): |
|
62 | for token_type, token_text in lex(content, lexer): | |
63 | yield pygment_token_class(token_type), token_text |
|
63 | yield pygment_token_class(token_type), token_text | |
64 |
|
64 | |||
65 |
|
65 | |||
66 | def split_token_stream(tokens, split_string=u'\n'): |
|
66 | def split_token_stream(tokens, split_string=u'\n'): | |
67 | """ |
|
67 | """ | |
68 | Take a list of (TokenType, text) tuples and split them by a string |
|
68 | Take a list of (TokenType, text) tuples and split them by a string | |
69 |
|
69 | |||
70 | >>> split_token_stream([(TEXT, 'some\ntext'), (TEXT, 'more\n')]) |
|
70 | >>> split_token_stream([(TEXT, 'some\ntext'), (TEXT, 'more\n')]) | |
71 | [(TEXT, 'some'), (TEXT, 'text'), |
|
71 | [(TEXT, 'some'), (TEXT, 'text'), | |
72 | (TEXT, 'more'), (TEXT, 'text')] |
|
72 | (TEXT, 'more'), (TEXT, 'text')] | |
73 | """ |
|
73 | """ | |
74 |
|
74 | |||
75 | buffer = [] |
|
75 | buffer = [] | |
76 | for token_class, token_text in tokens: |
|
76 | for token_class, token_text in tokens: | |
77 | parts = token_text.split(split_string) |
|
77 | parts = token_text.split(split_string) | |
78 | for part in parts[:-1]: |
|
78 | for part in parts[:-1]: | |
79 | buffer.append((token_class, part)) |
|
79 | buffer.append((token_class, part)) | |
80 | yield buffer |
|
80 | yield buffer | |
81 | buffer = [] |
|
81 | buffer = [] | |
82 |
|
82 | |||
83 | buffer.append((token_class, parts[-1])) |
|
83 | buffer.append((token_class, parts[-1])) | |
84 |
|
84 | |||
85 | if buffer: |
|
85 | if buffer: | |
86 | yield buffer |
|
86 | yield buffer | |
87 |
|
87 | |||
88 |
|
88 | |||
89 | def filenode_as_annotated_lines_tokens(filenode): |
|
89 | def filenode_as_annotated_lines_tokens(filenode): | |
90 | """ |
|
90 | """ | |
91 | Take a file node and return a list of annotations => lines, if no annotation |
|
91 | Take a file node and return a list of annotations => lines, if no annotation | |
92 | is found, it will be None. |
|
92 | is found, it will be None. | |
93 |
|
93 | |||
94 | eg: |
|
94 | eg: | |
95 |
|
95 | |||
96 | [ |
|
96 | [ | |
97 | (annotation1, [ |
|
97 | (annotation1, [ | |
98 | (1, line1_tokens_list), |
|
98 | (1, line1_tokens_list), | |
99 | (2, line2_tokens_list), |
|
99 | (2, line2_tokens_list), | |
100 | ]), |
|
100 | ]), | |
101 | (annotation2, [ |
|
101 | (annotation2, [ | |
102 | (3, line1_tokens_list), |
|
102 | (3, line1_tokens_list), | |
103 | ]), |
|
103 | ]), | |
104 | (None, [ |
|
104 | (None, [ | |
105 | (4, line1_tokens_list), |
|
105 | (4, line1_tokens_list), | |
106 | ]), |
|
106 | ]), | |
107 | (annotation1, [ |
|
107 | (annotation1, [ | |
108 | (5, line1_tokens_list), |
|
108 | (5, line1_tokens_list), | |
109 | (6, line2_tokens_list), |
|
109 | (6, line2_tokens_list), | |
110 | ]) |
|
110 | ]) | |
111 | ] |
|
111 | ] | |
112 | """ |
|
112 | """ | |
113 |
|
113 | |||
114 | commit_cache = {} # cache commit_getter lookups |
|
114 | commit_cache = {} # cache commit_getter lookups | |
115 |
|
115 | |||
116 | def _get_annotation(commit_id, commit_getter): |
|
116 | def _get_annotation(commit_id, commit_getter): | |
117 | if commit_id not in commit_cache: |
|
117 | if commit_id not in commit_cache: | |
118 | commit_cache[commit_id] = commit_getter() |
|
118 | commit_cache[commit_id] = commit_getter() | |
119 | return commit_cache[commit_id] |
|
119 | return commit_cache[commit_id] | |
120 |
|
120 | |||
121 | annotation_lookup = { |
|
121 | annotation_lookup = { | |
122 | line_no: _get_annotation(commit_id, commit_getter) |
|
122 | line_no: _get_annotation(commit_id, commit_getter) | |
123 | for line_no, commit_id, commit_getter, line_content |
|
123 | for line_no, commit_id, commit_getter, line_content | |
124 | in filenode.annotate |
|
124 | in filenode.annotate | |
125 | } |
|
125 | } | |
126 |
|
126 | |||
127 | annotations_lines = ((annotation_lookup.get(line_no), line_no, tokens) |
|
127 | annotations_lines = ((annotation_lookup.get(line_no), line_no, tokens) | |
128 | for line_no, tokens |
|
128 | for line_no, tokens | |
129 | in enumerate(filenode_as_lines_tokens(filenode), 1)) |
|
129 | in enumerate(filenode_as_lines_tokens(filenode), 1)) | |
130 |
|
130 | |||
131 | grouped_annotations_lines = groupby(annotations_lines, lambda x: x[0]) |
|
131 | grouped_annotations_lines = groupby(annotations_lines, lambda x: x[0]) | |
132 |
|
132 | |||
133 | for annotation, group in grouped_annotations_lines: |
|
133 | for annotation, group in grouped_annotations_lines: | |
134 | yield ( |
|
134 | yield ( | |
135 | annotation, [(line_no, tokens) |
|
135 | annotation, [(line_no, tokens) | |
136 | for (_, line_no, tokens) in group] |
|
136 | for (_, line_no, tokens) in group] | |
137 | ) |
|
137 | ) | |
138 |
|
138 | |||
139 |
|
139 | |||
140 | def render_tokenstream(tokenstream): |
|
140 | def render_tokenstream(tokenstream): | |
141 | result = [] |
|
141 | result = [] | |
142 | for token_class, token_ops_texts in rollup_tokenstream(tokenstream): |
|
142 | for token_class, token_ops_texts in rollup_tokenstream(tokenstream): | |
143 |
|
143 | |||
144 | if token_class: |
|
144 | if token_class: | |
145 | result.append(u'<span class="%s">' % token_class) |
|
145 | result.append(u'<span class="%s">' % token_class) | |
146 | else: |
|
146 | else: | |
147 | result.append(u'<span>') |
|
147 | result.append(u'<span>') | |
148 |
|
148 | |||
149 | for op_tag, token_text in token_ops_texts: |
|
149 | for op_tag, token_text in token_ops_texts: | |
150 |
|
150 | |||
151 | if op_tag: |
|
151 | if op_tag: | |
152 | result.append(u'<%s>' % op_tag) |
|
152 | result.append(u'<%s>' % op_tag) | |
153 |
|
153 | |||
154 | escaped_text = html_escape(token_text) |
|
154 | escaped_text = html_escape(token_text) | |
155 |
|
155 | |||
156 | # TODO: dan: investigate showing hidden characters like space/nl/tab |
|
156 | # TODO: dan: investigate showing hidden characters like space/nl/tab | |
157 | # escaped_text = escaped_text.replace(' ', '<sp> </sp>') |
|
157 | # escaped_text = escaped_text.replace(' ', '<sp> </sp>') | |
158 | # escaped_text = escaped_text.replace('\n', '<nl>\n</nl>') |
|
158 | # escaped_text = escaped_text.replace('\n', '<nl>\n</nl>') | |
159 | # escaped_text = escaped_text.replace('\t', '<tab>\t</tab>') |
|
159 | # escaped_text = escaped_text.replace('\t', '<tab>\t</tab>') | |
160 |
|
160 | |||
161 | result.append(escaped_text) |
|
161 | result.append(escaped_text) | |
162 |
|
162 | |||
163 | if op_tag: |
|
163 | if op_tag: | |
164 | result.append(u'</%s>' % op_tag) |
|
164 | result.append(u'</%s>' % op_tag) | |
165 |
|
165 | |||
166 | result.append(u'</span>') |
|
166 | result.append(u'</span>') | |
167 |
|
167 | |||
168 | html = ''.join(result) |
|
168 | html = ''.join(result) | |
169 | return html |
|
169 | return html | |
170 |
|
170 | |||
171 |
|
171 | |||
172 | def rollup_tokenstream(tokenstream): |
|
172 | def rollup_tokenstream(tokenstream): | |
173 | """ |
|
173 | """ | |
174 | Group a token stream of the format: |
|
174 | Group a token stream of the format: | |
175 |
|
175 | |||
176 | ('class', 'op', 'text') |
|
176 | ('class', 'op', 'text') | |
177 | or |
|
177 | or | |
178 | ('class', 'text') |
|
178 | ('class', 'text') | |
179 |
|
179 | |||
180 | into |
|
180 | into | |
181 |
|
181 | |||
182 | [('class1', |
|
182 | [('class1', | |
183 | [('op1', 'text'), |
|
183 | [('op1', 'text'), | |
184 | ('op2', 'text')]), |
|
184 | ('op2', 'text')]), | |
185 | ('class2', |
|
185 | ('class2', | |
186 | [('op3', 'text')])] |
|
186 | [('op3', 'text')])] | |
187 |
|
187 | |||
188 | This is used to get the minimal tags necessary when |
|
188 | This is used to get the minimal tags necessary when | |
189 | rendering to html eg for a token stream ie. |
|
189 | rendering to html eg for a token stream ie. | |
190 |
|
190 | |||
191 | <span class="A"><ins>he</ins>llo</span> |
|
191 | <span class="A"><ins>he</ins>llo</span> | |
192 | vs |
|
192 | vs | |
193 | <span class="A"><ins>he</ins></span><span class="A">llo</span> |
|
193 | <span class="A"><ins>he</ins></span><span class="A">llo</span> | |
194 |
|
194 | |||
195 | If a 2 tuple is passed in, the output op will be an empty string. |
|
195 | If a 2 tuple is passed in, the output op will be an empty string. | |
196 |
|
196 | |||
197 | eg: |
|
197 | eg: | |
198 |
|
198 | |||
199 | >>> rollup_tokenstream([('classA', '', 'h'), |
|
199 | >>> rollup_tokenstream([('classA', '', 'h'), | |
200 | ('classA', 'del', 'ell'), |
|
200 | ('classA', 'del', 'ell'), | |
201 | ('classA', '', 'o'), |
|
201 | ('classA', '', 'o'), | |
202 | ('classB', '', ' '), |
|
202 | ('classB', '', ' '), | |
203 | ('classA', '', 'the'), |
|
203 | ('classA', '', 'the'), | |
204 | ('classA', '', 're'), |
|
204 | ('classA', '', 're'), | |
205 | ]) |
|
205 | ]) | |
206 |
|
206 | |||
207 | [('classA', [('', 'h'), ('del', 'ell'), ('', 'o')], |
|
207 | [('classA', [('', 'h'), ('del', 'ell'), ('', 'o')], | |
208 | ('classB', [('', ' ')], |
|
208 | ('classB', [('', ' ')], | |
209 | ('classA', [('', 'there')]] |
|
209 | ('classA', [('', 'there')]] | |
210 |
|
210 | |||
211 | """ |
|
211 | """ | |
212 | if tokenstream and len(tokenstream[0]) == 2: |
|
212 | if tokenstream and len(tokenstream[0]) == 2: | |
213 | tokenstream = ((t[0], '', t[1]) for t in tokenstream) |
|
213 | tokenstream = ((t[0], '', t[1]) for t in tokenstream) | |
214 |
|
214 | |||
215 | result = [] |
|
215 | result = [] | |
216 | for token_class, op_list in groupby(tokenstream, lambda t: t[0]): |
|
216 | for token_class, op_list in groupby(tokenstream, lambda t: t[0]): | |
217 | ops = [] |
|
217 | ops = [] | |
218 | for token_op, token_text_list in groupby(op_list, lambda o: o[1]): |
|
218 | for token_op, token_text_list in groupby(op_list, lambda o: o[1]): | |
219 | text_buffer = [] |
|
219 | text_buffer = [] | |
220 | for t_class, t_op, t_text in token_text_list: |
|
220 | for t_class, t_op, t_text in token_text_list: | |
221 | text_buffer.append(t_text) |
|
221 | text_buffer.append(t_text) | |
222 | ops.append((token_op, ''.join(text_buffer))) |
|
222 | ops.append((token_op, ''.join(text_buffer))) | |
223 | result.append((token_class, ops)) |
|
223 | result.append((token_class, ops)) | |
224 | return result |
|
224 | return result | |
225 |
|
225 | |||
226 |
|
226 | |||
227 | def tokens_diff(old_tokens, new_tokens, use_diff_match_patch=True): |
|
227 | def tokens_diff(old_tokens, new_tokens, use_diff_match_patch=True): | |
228 | """ |
|
228 | """ | |
229 | Converts a list of (token_class, token_text) tuples to a list of |
|
229 | Converts a list of (token_class, token_text) tuples to a list of | |
230 | (token_class, token_op, token_text) tuples where token_op is one of |
|
230 | (token_class, token_op, token_text) tuples where token_op is one of | |
231 | ('ins', 'del', '') |
|
231 | ('ins', 'del', '') | |
232 |
|
232 | |||
233 | :param old_tokens: list of (token_class, token_text) tuples of old line |
|
233 | :param old_tokens: list of (token_class, token_text) tuples of old line | |
234 | :param new_tokens: list of (token_class, token_text) tuples of new line |
|
234 | :param new_tokens: list of (token_class, token_text) tuples of new line | |
235 | :param use_diff_match_patch: boolean, will use google's diff match patch |
|
235 | :param use_diff_match_patch: boolean, will use google's diff match patch | |
236 | library which has options to 'smooth' out the character by character |
|
236 | library which has options to 'smooth' out the character by character | |
237 | differences making nicer ins/del blocks |
|
237 | differences making nicer ins/del blocks | |
238 | """ |
|
238 | """ | |
239 |
|
239 | |||
240 | old_tokens_result = [] |
|
240 | old_tokens_result = [] | |
241 | new_tokens_result = [] |
|
241 | new_tokens_result = [] | |
242 |
|
242 | |||
243 | similarity = difflib.SequenceMatcher(None, |
|
243 | similarity = difflib.SequenceMatcher(None, | |
244 | ''.join(token_text for token_class, token_text in old_tokens), |
|
244 | ''.join(token_text for token_class, token_text in old_tokens), | |
245 | ''.join(token_text for token_class, token_text in new_tokens) |
|
245 | ''.join(token_text for token_class, token_text in new_tokens) | |
246 | ).ratio() |
|
246 | ).ratio() | |
247 |
|
247 | |||
248 | if similarity < 0.6: # return, the blocks are too different |
|
248 | if similarity < 0.6: # return, the blocks are too different | |
249 | for token_class, token_text in old_tokens: |
|
249 | for token_class, token_text in old_tokens: | |
250 | old_tokens_result.append((token_class, '', token_text)) |
|
250 | old_tokens_result.append((token_class, '', token_text)) | |
251 | for token_class, token_text in new_tokens: |
|
251 | for token_class, token_text in new_tokens: | |
252 | new_tokens_result.append((token_class, '', token_text)) |
|
252 | new_tokens_result.append((token_class, '', token_text)) | |
253 | return old_tokens_result, new_tokens_result, similarity |
|
253 | return old_tokens_result, new_tokens_result, similarity | |
254 |
|
254 | |||
255 | token_sequence_matcher = difflib.SequenceMatcher(None, |
|
255 | token_sequence_matcher = difflib.SequenceMatcher(None, | |
256 | [x[1] for x in old_tokens], |
|
256 | [x[1] for x in old_tokens], | |
257 | [x[1] for x in new_tokens]) |
|
257 | [x[1] for x in new_tokens]) | |
258 |
|
258 | |||
259 | for tag, o1, o2, n1, n2 in token_sequence_matcher.get_opcodes(): |
|
259 | for tag, o1, o2, n1, n2 in token_sequence_matcher.get_opcodes(): | |
260 | # check the differences by token block types first to give a more |
|
260 | # check the differences by token block types first to give a more | |
261 | # nicer "block" level replacement vs character diffs |
|
261 | # nicer "block" level replacement vs character diffs | |
262 |
|
262 | |||
263 | if tag == 'equal': |
|
263 | if tag == 'equal': | |
264 | for token_class, token_text in old_tokens[o1:o2]: |
|
264 | for token_class, token_text in old_tokens[o1:o2]: | |
265 | old_tokens_result.append((token_class, '', token_text)) |
|
265 | old_tokens_result.append((token_class, '', token_text)) | |
266 | for token_class, token_text in new_tokens[n1:n2]: |
|
266 | for token_class, token_text in new_tokens[n1:n2]: | |
267 | new_tokens_result.append((token_class, '', token_text)) |
|
267 | new_tokens_result.append((token_class, '', token_text)) | |
268 | elif tag == 'delete': |
|
268 | elif tag == 'delete': | |
269 | for token_class, token_text in old_tokens[o1:o2]: |
|
269 | for token_class, token_text in old_tokens[o1:o2]: | |
270 | old_tokens_result.append((token_class, 'del', token_text)) |
|
270 | old_tokens_result.append((token_class, 'del', token_text)) | |
271 | elif tag == 'insert': |
|
271 | elif tag == 'insert': | |
272 | for token_class, token_text in new_tokens[n1:n2]: |
|
272 | for token_class, token_text in new_tokens[n1:n2]: | |
273 | new_tokens_result.append((token_class, 'ins', token_text)) |
|
273 | new_tokens_result.append((token_class, 'ins', token_text)) | |
274 | elif tag == 'replace': |
|
274 | elif tag == 'replace': | |
275 | # if same type token blocks must be replaced, do a diff on the |
|
275 | # if same type token blocks must be replaced, do a diff on the | |
276 | # characters in the token blocks to show individual changes |
|
276 | # characters in the token blocks to show individual changes | |
277 |
|
277 | |||
278 | old_char_tokens = [] |
|
278 | old_char_tokens = [] | |
279 | new_char_tokens = [] |
|
279 | new_char_tokens = [] | |
280 | for token_class, token_text in old_tokens[o1:o2]: |
|
280 | for token_class, token_text in old_tokens[o1:o2]: | |
281 | for char in token_text: |
|
281 | for char in token_text: | |
282 | old_char_tokens.append((token_class, char)) |
|
282 | old_char_tokens.append((token_class, char)) | |
283 |
|
283 | |||
284 | for token_class, token_text in new_tokens[n1:n2]: |
|
284 | for token_class, token_text in new_tokens[n1:n2]: | |
285 | for char in token_text: |
|
285 | for char in token_text: | |
286 | new_char_tokens.append((token_class, char)) |
|
286 | new_char_tokens.append((token_class, char)) | |
287 |
|
287 | |||
288 | old_string = ''.join([token_text for |
|
288 | old_string = ''.join([token_text for | |
289 | token_class, token_text in old_char_tokens]) |
|
289 | token_class, token_text in old_char_tokens]) | |
290 | new_string = ''.join([token_text for |
|
290 | new_string = ''.join([token_text for | |
291 | token_class, token_text in new_char_tokens]) |
|
291 | token_class, token_text in new_char_tokens]) | |
292 |
|
292 | |||
293 | char_sequence = difflib.SequenceMatcher( |
|
293 | char_sequence = difflib.SequenceMatcher( | |
294 | None, old_string, new_string) |
|
294 | None, old_string, new_string) | |
295 | copcodes = char_sequence.get_opcodes() |
|
295 | copcodes = char_sequence.get_opcodes() | |
296 | obuffer, nbuffer = [], [] |
|
296 | obuffer, nbuffer = [], [] | |
297 |
|
297 | |||
298 | if use_diff_match_patch: |
|
298 | if use_diff_match_patch: | |
299 | dmp = diff_match_patch() |
|
299 | dmp = diff_match_patch() | |
300 | dmp.Diff_EditCost = 11 # TODO: dan: extract this to a setting |
|
300 | dmp.Diff_EditCost = 11 # TODO: dan: extract this to a setting | |
301 | reps = dmp.diff_main(old_string, new_string) |
|
301 | reps = dmp.diff_main(old_string, new_string) | |
302 | dmp.diff_cleanupEfficiency(reps) |
|
302 | dmp.diff_cleanupEfficiency(reps) | |
303 |
|
303 | |||
304 | a, b = 0, 0 |
|
304 | a, b = 0, 0 | |
305 | for op, rep in reps: |
|
305 | for op, rep in reps: | |
306 | l = len(rep) |
|
306 | l = len(rep) | |
307 | if op == 0: |
|
307 | if op == 0: | |
308 | for i, c in enumerate(rep): |
|
308 | for i, c in enumerate(rep): | |
309 | obuffer.append((old_char_tokens[a+i][0], '', c)) |
|
309 | obuffer.append((old_char_tokens[a+i][0], '', c)) | |
310 | nbuffer.append((new_char_tokens[b+i][0], '', c)) |
|
310 | nbuffer.append((new_char_tokens[b+i][0], '', c)) | |
311 | a += l |
|
311 | a += l | |
312 | b += l |
|
312 | b += l | |
313 | elif op == -1: |
|
313 | elif op == -1: | |
314 | for i, c in enumerate(rep): |
|
314 | for i, c in enumerate(rep): | |
315 | obuffer.append((old_char_tokens[a+i][0], 'del', c)) |
|
315 | obuffer.append((old_char_tokens[a+i][0], 'del', c)) | |
316 | a += l |
|
316 | a += l | |
317 | elif op == 1: |
|
317 | elif op == 1: | |
318 | for i, c in enumerate(rep): |
|
318 | for i, c in enumerate(rep): | |
319 | nbuffer.append((new_char_tokens[b+i][0], 'ins', c)) |
|
319 | nbuffer.append((new_char_tokens[b+i][0], 'ins', c)) | |
320 | b += l |
|
320 | b += l | |
321 | else: |
|
321 | else: | |
322 | for ctag, co1, co2, cn1, cn2 in copcodes: |
|
322 | for ctag, co1, co2, cn1, cn2 in copcodes: | |
323 | if ctag == 'equal': |
|
323 | if ctag == 'equal': | |
324 | for token_class, token_text in old_char_tokens[co1:co2]: |
|
324 | for token_class, token_text in old_char_tokens[co1:co2]: | |
325 | obuffer.append((token_class, '', token_text)) |
|
325 | obuffer.append((token_class, '', token_text)) | |
326 | for token_class, token_text in new_char_tokens[cn1:cn2]: |
|
326 | for token_class, token_text in new_char_tokens[cn1:cn2]: | |
327 | nbuffer.append((token_class, '', token_text)) |
|
327 | nbuffer.append((token_class, '', token_text)) | |
328 | elif ctag == 'delete': |
|
328 | elif ctag == 'delete': | |
329 | for token_class, token_text in old_char_tokens[co1:co2]: |
|
329 | for token_class, token_text in old_char_tokens[co1:co2]: | |
330 | obuffer.append((token_class, 'del', token_text)) |
|
330 | obuffer.append((token_class, 'del', token_text)) | |
331 | elif ctag == 'insert': |
|
331 | elif ctag == 'insert': | |
332 | for token_class, token_text in new_char_tokens[cn1:cn2]: |
|
332 | for token_class, token_text in new_char_tokens[cn1:cn2]: | |
333 | nbuffer.append((token_class, 'ins', token_text)) |
|
333 | nbuffer.append((token_class, 'ins', token_text)) | |
334 | elif ctag == 'replace': |
|
334 | elif ctag == 'replace': | |
335 | for token_class, token_text in old_char_tokens[co1:co2]: |
|
335 | for token_class, token_text in old_char_tokens[co1:co2]: | |
336 | obuffer.append((token_class, 'del', token_text)) |
|
336 | obuffer.append((token_class, 'del', token_text)) | |
337 | for token_class, token_text in new_char_tokens[cn1:cn2]: |
|
337 | for token_class, token_text in new_char_tokens[cn1:cn2]: | |
338 | nbuffer.append((token_class, 'ins', token_text)) |
|
338 | nbuffer.append((token_class, 'ins', token_text)) | |
339 |
|
339 | |||
340 | old_tokens_result.extend(obuffer) |
|
340 | old_tokens_result.extend(obuffer) | |
341 | new_tokens_result.extend(nbuffer) |
|
341 | new_tokens_result.extend(nbuffer) | |
342 |
|
342 | |||
343 | return old_tokens_result, new_tokens_result, similarity |
|
343 | return old_tokens_result, new_tokens_result, similarity | |
344 |
|
344 | |||
345 |
|
345 | |||
346 | class DiffSet(object): |
|
346 | class DiffSet(object): | |
347 | """ |
|
347 | """ | |
348 | An object for parsing the diff result from diffs.DiffProcessor and |
|
348 | An object for parsing the diff result from diffs.DiffProcessor and | |
349 | adding highlighting, side by side/unified renderings and line diffs |
|
349 | adding highlighting, side by side/unified renderings and line diffs | |
350 | """ |
|
350 | """ | |
351 |
|
351 | |||
352 | HL_REAL = 'REAL' # highlights using original file, slow |
|
352 | HL_REAL = 'REAL' # highlights using original file, slow | |
353 | HL_FAST = 'FAST' # highlights using just the line, fast but not correct |
|
353 | HL_FAST = 'FAST' # highlights using just the line, fast but not correct | |
354 | # in the case of multiline code |
|
354 | # in the case of multiline code | |
355 | HL_NONE = 'NONE' # no highlighting, fastest |
|
355 | HL_NONE = 'NONE' # no highlighting, fastest | |
356 |
|
356 | |||
357 | def __init__(self, highlight_mode=HL_REAL, repo_name=None, |
|
357 | def __init__(self, highlight_mode=HL_REAL, repo_name=None, | |
358 | source_repo_name=None, |
|
358 | source_repo_name=None, | |
359 | source_node_getter=lambda filename: None, |
|
359 | source_node_getter=lambda filename: None, | |
360 | target_node_getter=lambda filename: None, |
|
360 | target_node_getter=lambda filename: None, | |
361 | source_nodes=None, target_nodes=None, |
|
361 | source_nodes=None, target_nodes=None, | |
362 | max_file_size_limit=150 * 1024, # files over this size will |
|
362 | max_file_size_limit=150 * 1024, # files over this size will | |
363 | # use fast highlighting |
|
363 | # use fast highlighting | |
364 | comments=None, |
|
364 | comments=None, | |
365 | ): |
|
365 | ): | |
366 |
|
366 | |||
367 | self.highlight_mode = highlight_mode |
|
367 | self.highlight_mode = highlight_mode | |
368 | self.highlighted_filenodes = {} |
|
368 | self.highlighted_filenodes = {} | |
369 | self.source_node_getter = source_node_getter |
|
369 | self.source_node_getter = source_node_getter | |
370 | self.target_node_getter = target_node_getter |
|
370 | self.target_node_getter = target_node_getter | |
371 | self.source_nodes = source_nodes or {} |
|
371 | self.source_nodes = source_nodes or {} | |
372 | self.target_nodes = target_nodes or {} |
|
372 | self.target_nodes = target_nodes or {} | |
373 | self.repo_name = repo_name |
|
373 | self.repo_name = repo_name | |
374 | self.source_repo_name = source_repo_name or repo_name |
|
374 | self.source_repo_name = source_repo_name or repo_name | |
375 | self.comments = comments or {} |
|
375 | self.comments = comments or {} | |
376 | self.comments_store = self.comments.copy() |
|
376 | self.comments_store = self.comments.copy() | |
377 | self.max_file_size_limit = max_file_size_limit |
|
377 | self.max_file_size_limit = max_file_size_limit | |
378 |
|
378 | |||
379 | def render_patchset(self, patchset, source_ref=None, target_ref=None): |
|
379 | def render_patchset(self, patchset, source_ref=None, target_ref=None): | |
380 | diffset = AttributeDict(dict( |
|
380 | diffset = AttributeDict(dict( | |
381 | lines_added=0, |
|
381 | lines_added=0, | |
382 | lines_deleted=0, |
|
382 | lines_deleted=0, | |
383 | changed_files=0, |
|
383 | changed_files=0, | |
384 | files=[], |
|
384 | files=[], | |
385 | file_stats={}, |
|
385 | file_stats={}, | |
386 | limited_diff=isinstance(patchset, LimitedDiffContainer), |
|
386 | limited_diff=isinstance(patchset, LimitedDiffContainer), | |
387 | repo_name=self.repo_name, |
|
387 | repo_name=self.repo_name, | |
388 | source_repo_name=self.source_repo_name, |
|
388 | source_repo_name=self.source_repo_name, | |
389 | source_ref=source_ref, |
|
389 | source_ref=source_ref, | |
390 | target_ref=target_ref, |
|
390 | target_ref=target_ref, | |
391 | )) |
|
391 | )) | |
392 | for patch in patchset: |
|
392 | for patch in patchset: | |
393 | diffset.file_stats[patch['filename']] = patch['stats'] |
|
393 | diffset.file_stats[patch['filename']] = patch['stats'] | |
394 | filediff = self.render_patch(patch) |
|
394 | filediff = self.render_patch(patch) | |
395 | filediff.diffset = diffset |
|
395 | filediff.diffset = diffset | |
396 | diffset.files.append(filediff) |
|
396 | diffset.files.append(filediff) | |
397 | diffset.changed_files += 1 |
|
397 | diffset.changed_files += 1 | |
398 | if not patch['stats']['binary']: |
|
398 | if not patch['stats']['binary']: | |
399 | diffset.lines_added += patch['stats']['added'] |
|
399 | diffset.lines_added += patch['stats']['added'] | |
400 | diffset.lines_deleted += patch['stats']['deleted'] |
|
400 | diffset.lines_deleted += patch['stats']['deleted'] | |
401 |
|
401 | |||
402 | return diffset |
|
402 | return diffset | |
403 |
|
403 | |||
404 | _lexer_cache = {} |
|
404 | _lexer_cache = {} | |
405 | def _get_lexer_for_filename(self, filename, filenode=None): |
|
405 | def _get_lexer_for_filename(self, filename, filenode=None): | |
406 | # cached because we might need to call it twice for source/target |
|
406 | # cached because we might need to call it twice for source/target | |
407 | if filename not in self._lexer_cache: |
|
407 | if filename not in self._lexer_cache: | |
408 | if filenode: |
|
408 | if filenode: | |
409 | lexer = filenode.lexer |
|
409 | lexer = filenode.lexer | |
410 | extension = filenode.extension |
|
410 | extension = filenode.extension | |
411 | else: |
|
411 | else: | |
412 | lexer = FileNode.get_lexer(filename=filename) |
|
412 | lexer = FileNode.get_lexer(filename=filename) | |
413 | extension = filename.split('.')[-1] |
|
413 | extension = filename.split('.')[-1] | |
414 |
|
414 | |||
415 | lexer = get_custom_lexer(extension) or lexer |
|
415 | lexer = get_custom_lexer(extension) or lexer | |
416 | self._lexer_cache[filename] = lexer |
|
416 | self._lexer_cache[filename] = lexer | |
417 | return self._lexer_cache[filename] |
|
417 | return self._lexer_cache[filename] | |
418 |
|
418 | |||
419 | def render_patch(self, patch): |
|
419 | def render_patch(self, patch): | |
420 | log.debug('rendering diff for %r' % patch['filename']) |
|
420 | log.debug('rendering diff for %r' % patch['filename']) | |
421 |
|
421 | |||
422 | source_filename = patch['original_filename'] |
|
422 | source_filename = patch['original_filename'] | |
423 | target_filename = patch['filename'] |
|
423 | target_filename = patch['filename'] | |
424 |
|
424 | |||
425 | source_lexer = plain_text_lexer |
|
425 | source_lexer = plain_text_lexer | |
426 | target_lexer = plain_text_lexer |
|
426 | target_lexer = plain_text_lexer | |
427 |
|
427 | |||
428 | if not patch['stats']['binary']: |
|
428 | if not patch['stats']['binary']: | |
429 | if self.highlight_mode == self.HL_REAL: |
|
429 | if self.highlight_mode == self.HL_REAL: | |
430 | if (source_filename and patch['operation'] in ('D', 'M') |
|
430 | if (source_filename and patch['operation'] in ('D', 'M') | |
431 | and source_filename not in self.source_nodes): |
|
431 | and source_filename not in self.source_nodes): | |
432 | self.source_nodes[source_filename] = ( |
|
432 | self.source_nodes[source_filename] = ( | |
433 | self.source_node_getter(source_filename)) |
|
433 | self.source_node_getter(source_filename)) | |
434 |
|
434 | |||
435 | if (target_filename and patch['operation'] in ('A', 'M') |
|
435 | if (target_filename and patch['operation'] in ('A', 'M') | |
436 | and target_filename not in self.target_nodes): |
|
436 | and target_filename not in self.target_nodes): | |
437 | self.target_nodes[target_filename] = ( |
|
437 | self.target_nodes[target_filename] = ( | |
438 | self.target_node_getter(target_filename)) |
|
438 | self.target_node_getter(target_filename)) | |
439 |
|
439 | |||
440 | elif self.highlight_mode == self.HL_FAST: |
|
440 | elif self.highlight_mode == self.HL_FAST: | |
441 | source_lexer = self._get_lexer_for_filename(source_filename) |
|
441 | source_lexer = self._get_lexer_for_filename(source_filename) | |
442 | target_lexer = self._get_lexer_for_filename(target_filename) |
|
442 | target_lexer = self._get_lexer_for_filename(target_filename) | |
443 |
|
443 | |||
444 | source_file = self.source_nodes.get(source_filename, source_filename) |
|
444 | source_file = self.source_nodes.get(source_filename, source_filename) | |
445 | target_file = self.target_nodes.get(target_filename, target_filename) |
|
445 | target_file = self.target_nodes.get(target_filename, target_filename) | |
446 |
|
446 | |||
447 | source_filenode, target_filenode = None, None |
|
447 | source_filenode, target_filenode = None, None | |
448 |
|
448 | |||
449 | # TODO: dan: FileNode.lexer works on the content of the file - which |
|
449 | # TODO: dan: FileNode.lexer works on the content of the file - which | |
450 | # can be slow - issue #4289 explains a lexer clean up - which once |
|
450 | # can be slow - issue #4289 explains a lexer clean up - which once | |
451 | # done can allow caching a lexer for a filenode to avoid the file lookup |
|
451 | # done can allow caching a lexer for a filenode to avoid the file lookup | |
452 | if isinstance(source_file, FileNode): |
|
452 | if isinstance(source_file, FileNode): | |
453 | source_filenode = source_file |
|
453 | source_filenode = source_file | |
454 | #source_lexer = source_file.lexer |
|
454 | #source_lexer = source_file.lexer | |
455 | source_lexer = self._get_lexer_for_filename(source_filename) |
|
455 | source_lexer = self._get_lexer_for_filename(source_filename) | |
456 | source_file.lexer = source_lexer |
|
456 | source_file.lexer = source_lexer | |
457 |
|
457 | |||
458 | if isinstance(target_file, FileNode): |
|
458 | if isinstance(target_file, FileNode): | |
459 | target_filenode = target_file |
|
459 | target_filenode = target_file | |
460 | #target_lexer = target_file.lexer |
|
460 | #target_lexer = target_file.lexer | |
461 | target_lexer = self._get_lexer_for_filename(target_filename) |
|
461 | target_lexer = self._get_lexer_for_filename(target_filename) | |
462 | target_file.lexer = target_lexer |
|
462 | target_file.lexer = target_lexer | |
463 |
|
463 | |||
464 | source_file_path, target_file_path = None, None |
|
464 | source_file_path, target_file_path = None, None | |
465 |
|
465 | |||
466 | if source_filename != '/dev/null': |
|
466 | if source_filename != '/dev/null': | |
467 | source_file_path = source_filename |
|
467 | source_file_path = source_filename | |
468 | if target_filename != '/dev/null': |
|
468 | if target_filename != '/dev/null': | |
469 | target_file_path = target_filename |
|
469 | target_file_path = target_filename | |
470 |
|
470 | |||
471 | source_file_type = source_lexer.name |
|
471 | source_file_type = source_lexer.name | |
472 | target_file_type = target_lexer.name |
|
472 | target_file_type = target_lexer.name | |
473 |
|
473 | |||
474 | op_hunks = patch['chunks'][0] |
|
|||
475 | hunks = patch['chunks'][1:] |
|
|||
476 |
|
||||
477 | filediff = AttributeDict({ |
|
474 | filediff = AttributeDict({ | |
478 | 'source_file_path': source_file_path, |
|
475 | 'source_file_path': source_file_path, | |
479 | 'target_file_path': target_file_path, |
|
476 | 'target_file_path': target_file_path, | |
480 | 'source_filenode': source_filenode, |
|
477 | 'source_filenode': source_filenode, | |
481 | 'target_filenode': target_filenode, |
|
478 | 'target_filenode': target_filenode, | |
482 | 'hunks': [], |
|
|||
483 | 'source_file_type': target_file_type, |
|
479 | 'source_file_type': target_file_type, | |
484 | 'target_file_type': source_file_type, |
|
480 | 'target_file_type': source_file_type, | |
485 | 'patch': patch, |
|
481 | 'patch': {'filename': patch['filename'], 'stats': patch['stats']}, | |
|
482 | 'operation': patch['operation'], | |||
486 | 'source_mode': patch['stats']['old_mode'], |
|
483 | 'source_mode': patch['stats']['old_mode'], | |
487 | 'target_mode': patch['stats']['new_mode'], |
|
484 | 'target_mode': patch['stats']['new_mode'], | |
488 | 'limited_diff': isinstance(patch, LimitedDiffContainer), |
|
485 | 'limited_diff': isinstance(patch, LimitedDiffContainer), | |
|
486 | 'hunks': [], | |||
489 | 'diffset': self, |
|
487 | 'diffset': self, | |
490 | }) |
|
488 | }) | |
491 |
|
489 | |||
492 | for hunk in hunks: |
|
490 | for hunk in patch['chunks'][1:]: | |
493 | hunkbit = self.parse_hunk(hunk, source_file, target_file) |
|
491 | hunkbit = self.parse_hunk(hunk, source_file, target_file) | |
494 |
hunkbit. |
|
492 | hunkbit.source_file_path = source_file_path | |
|
493 | hunkbit.target_file_path = target_file_path | |||
495 | filediff.hunks.append(hunkbit) |
|
494 | filediff.hunks.append(hunkbit) | |
496 |
|
495 | |||
497 | left_comments = {} |
|
496 | left_comments = {} | |
498 |
|
||||
499 | if source_file_path in self.comments_store: |
|
497 | if source_file_path in self.comments_store: | |
500 | for lineno, comments in self.comments_store[source_file_path].items(): |
|
498 | for lineno, comments in self.comments_store[source_file_path].items(): | |
501 | left_comments[lineno] = comments |
|
499 | left_comments[lineno] = comments | |
502 |
|
500 | |||
503 | if target_file_path in self.comments_store: |
|
501 | if target_file_path in self.comments_store: | |
504 | for lineno, comments in self.comments_store[target_file_path].items(): |
|
502 | for lineno, comments in self.comments_store[target_file_path].items(): | |
505 | left_comments[lineno] = comments |
|
503 | left_comments[lineno] = comments | |
|
504 | filediff.left_comments = left_comments | |||
506 |
|
505 | |||
507 | filediff.left_comments = left_comments |
|
|||
508 | return filediff |
|
506 | return filediff | |
509 |
|
507 | |||
510 | def parse_hunk(self, hunk, source_file, target_file): |
|
508 | def parse_hunk(self, hunk, source_file, target_file): | |
511 | result = AttributeDict(dict( |
|
509 | result = AttributeDict(dict( | |
512 | source_start=hunk['source_start'], |
|
510 | source_start=hunk['source_start'], | |
513 | source_length=hunk['source_length'], |
|
511 | source_length=hunk['source_length'], | |
514 | target_start=hunk['target_start'], |
|
512 | target_start=hunk['target_start'], | |
515 | target_length=hunk['target_length'], |
|
513 | target_length=hunk['target_length'], | |
516 | section_header=hunk['section_header'], |
|
514 | section_header=hunk['section_header'], | |
517 | lines=[], |
|
515 | lines=[], | |
518 | )) |
|
516 | )) | |
519 | before, after = [], [] |
|
517 | before, after = [], [] | |
520 |
|
518 | |||
521 | for line in hunk['lines']: |
|
519 | for line in hunk['lines']: | |
|
520 | ||||
522 | if line['action'] == 'unmod': |
|
521 | if line['action'] == 'unmod': | |
523 | result.lines.extend( |
|
522 | result.lines.extend( | |
524 | self.parse_lines(before, after, source_file, target_file)) |
|
523 | self.parse_lines(before, after, source_file, target_file)) | |
525 | after.append(line) |
|
524 | after.append(line) | |
526 | before.append(line) |
|
525 | before.append(line) | |
527 | elif line['action'] == 'add': |
|
526 | elif line['action'] == 'add': | |
528 | after.append(line) |
|
527 | after.append(line) | |
529 | elif line['action'] == 'del': |
|
528 | elif line['action'] == 'del': | |
530 | before.append(line) |
|
529 | before.append(line) | |
531 | elif line['action'] == 'old-no-nl': |
|
530 | elif line['action'] == 'old-no-nl': | |
532 | before.append(line) |
|
531 | before.append(line) | |
533 | elif line['action'] == 'new-no-nl': |
|
532 | elif line['action'] == 'new-no-nl': | |
534 | after.append(line) |
|
533 | after.append(line) | |
535 |
|
534 | |||
536 | result.lines.extend( |
|
535 | result.lines.extend( | |
537 | self.parse_lines(before, after, source_file, target_file)) |
|
536 | self.parse_lines(before, after, source_file, target_file)) | |
538 | result.unified = self.as_unified(result.lines) |
|
537 | result.unified = self.as_unified(result.lines) | |
539 | result.sideside = result.lines |
|
538 | result.sideside = result.lines | |
540 |
|
539 | |||
541 | return result |
|
540 | return result | |
542 |
|
541 | |||
543 | def parse_lines(self, before_lines, after_lines, source_file, target_file): |
|
542 | def parse_lines(self, before_lines, after_lines, source_file, target_file): | |
544 | # TODO: dan: investigate doing the diff comparison and fast highlighting |
|
543 | # TODO: dan: investigate doing the diff comparison and fast highlighting | |
545 | # on the entire before and after buffered block lines rather than by |
|
544 | # on the entire before and after buffered block lines rather than by | |
546 | # line, this means we can get better 'fast' highlighting if the context |
|
545 | # line, this means we can get better 'fast' highlighting if the context | |
547 | # allows it - eg. |
|
546 | # allows it - eg. | |
548 | # line 4: """ |
|
547 | # line 4: """ | |
549 | # line 5: this gets highlighted as a string |
|
548 | # line 5: this gets highlighted as a string | |
550 | # line 6: """ |
|
549 | # line 6: """ | |
551 |
|
550 | |||
552 | lines = [] |
|
551 | lines = [] | |
553 | while before_lines or after_lines: |
|
552 | while before_lines or after_lines: | |
554 | before, after = None, None |
|
553 | before, after = None, None | |
555 | before_tokens, after_tokens = None, None |
|
554 | before_tokens, after_tokens = None, None | |
556 |
|
555 | |||
557 | if before_lines: |
|
556 | if before_lines: | |
558 | before = before_lines.pop(0) |
|
557 | before = before_lines.pop(0) | |
559 | if after_lines: |
|
558 | if after_lines: | |
560 | after = after_lines.pop(0) |
|
559 | after = after_lines.pop(0) | |
561 |
|
560 | |||
562 | original = AttributeDict() |
|
561 | original = AttributeDict() | |
563 | modified = AttributeDict() |
|
562 | modified = AttributeDict() | |
564 |
|
563 | |||
565 | if before: |
|
564 | if before: | |
566 | if before['action'] == 'old-no-nl': |
|
565 | if before['action'] == 'old-no-nl': | |
567 | before_tokens = [('nonl', before['line'])] |
|
566 | before_tokens = [('nonl', before['line'])] | |
568 | else: |
|
567 | else: | |
569 | before_tokens = self.get_line_tokens( |
|
568 | before_tokens = self.get_line_tokens( | |
570 |
line_text=before['line'], |
|
569 | line_text=before['line'], | |
|
570 | line_number=before['old_lineno'], | |||
571 | file=source_file) |
|
571 | file=source_file) | |
572 | original.lineno = before['old_lineno'] |
|
572 | original.lineno = before['old_lineno'] | |
573 | original.content = before['line'] |
|
573 | original.content = before['line'] | |
574 | original.action = self.action_to_op(before['action']) |
|
574 | original.action = self.action_to_op(before['action']) | |
575 | original.comments = self.get_comments_for('old', |
|
575 | original.comments = self.get_comments_for('old', | |
576 | source_file, before['old_lineno']) |
|
576 | source_file, before['old_lineno']) | |
577 |
|
577 | |||
578 | if after: |
|
578 | if after: | |
579 | if after['action'] == 'new-no-nl': |
|
579 | if after['action'] == 'new-no-nl': | |
580 | after_tokens = [('nonl', after['line'])] |
|
580 | after_tokens = [('nonl', after['line'])] | |
581 | else: |
|
581 | else: | |
582 | after_tokens = self.get_line_tokens( |
|
582 | after_tokens = self.get_line_tokens( | |
583 | line_text=after['line'], line_number=after['new_lineno'], |
|
583 | line_text=after['line'], line_number=after['new_lineno'], | |
584 | file=target_file) |
|
584 | file=target_file) | |
585 | modified.lineno = after['new_lineno'] |
|
585 | modified.lineno = after['new_lineno'] | |
586 | modified.content = after['line'] |
|
586 | modified.content = after['line'] | |
587 | modified.action = self.action_to_op(after['action']) |
|
587 | modified.action = self.action_to_op(after['action']) | |
588 | modified.comments = self.get_comments_for('new', |
|
588 | modified.comments = self.get_comments_for('new', | |
589 | target_file, after['new_lineno']) |
|
589 | target_file, after['new_lineno']) | |
590 |
|
590 | |||
591 | # diff the lines |
|
591 | # diff the lines | |
592 | if before_tokens and after_tokens: |
|
592 | if before_tokens and after_tokens: | |
593 | o_tokens, m_tokens, similarity = tokens_diff( |
|
593 | o_tokens, m_tokens, similarity = tokens_diff( | |
594 | before_tokens, after_tokens) |
|
594 | before_tokens, after_tokens) | |
595 | original.content = render_tokenstream(o_tokens) |
|
595 | original.content = render_tokenstream(o_tokens) | |
596 | modified.content = render_tokenstream(m_tokens) |
|
596 | modified.content = render_tokenstream(m_tokens) | |
597 | elif before_tokens: |
|
597 | elif before_tokens: | |
598 | original.content = render_tokenstream( |
|
598 | original.content = render_tokenstream( | |
599 | [(x[0], '', x[1]) for x in before_tokens]) |
|
599 | [(x[0], '', x[1]) for x in before_tokens]) | |
600 | elif after_tokens: |
|
600 | elif after_tokens: | |
601 | modified.content = render_tokenstream( |
|
601 | modified.content = render_tokenstream( | |
602 | [(x[0], '', x[1]) for x in after_tokens]) |
|
602 | [(x[0], '', x[1]) for x in after_tokens]) | |
603 |
|
603 | |||
604 | lines.append(AttributeDict({ |
|
604 | lines.append(AttributeDict({ | |
605 | 'original': original, |
|
605 | 'original': original, | |
606 | 'modified': modified, |
|
606 | 'modified': modified, | |
607 | })) |
|
607 | })) | |
608 |
|
608 | |||
609 | return lines |
|
609 | return lines | |
610 |
|
610 | |||
611 | def get_comments_for(self, version, file, line_number): |
|
611 | def get_comments_for(self, version, file, line_number): | |
612 | if hasattr(file, 'unicode_path'): |
|
612 | if hasattr(file, 'unicode_path'): | |
613 | file = file.unicode_path |
|
613 | file = file.unicode_path | |
614 |
|
614 | |||
615 | if not isinstance(file, basestring): |
|
615 | if not isinstance(file, basestring): | |
616 | return None |
|
616 | return None | |
617 |
|
617 | |||
618 | line_key = { |
|
618 | line_key = { | |
619 | 'old': 'o', |
|
619 | 'old': 'o', | |
620 | 'new': 'n', |
|
620 | 'new': 'n', | |
621 | }[version] + str(line_number) |
|
621 | }[version] + str(line_number) | |
622 |
|
622 | |||
623 | if file in self.comments_store: |
|
623 | if file in self.comments_store: | |
624 | file_comments = self.comments_store[file] |
|
624 | file_comments = self.comments_store[file] | |
625 | if line_key in file_comments: |
|
625 | if line_key in file_comments: | |
626 | return file_comments.pop(line_key) |
|
626 | return file_comments.pop(line_key) | |
627 |
|
627 | |||
628 | def get_line_tokens(self, line_text, line_number, file=None): |
|
628 | def get_line_tokens(self, line_text, line_number, file=None): | |
629 | filenode = None |
|
629 | filenode = None | |
630 | filename = None |
|
630 | filename = None | |
631 |
|
631 | |||
632 | if isinstance(file, basestring): |
|
632 | if isinstance(file, basestring): | |
633 | filename = file |
|
633 | filename = file | |
634 | elif isinstance(file, FileNode): |
|
634 | elif isinstance(file, FileNode): | |
635 | filenode = file |
|
635 | filenode = file | |
636 | filename = file.unicode_path |
|
636 | filename = file.unicode_path | |
637 |
|
637 | |||
638 | if self.highlight_mode == self.HL_REAL and filenode: |
|
638 | if self.highlight_mode == self.HL_REAL and filenode: | |
639 | lexer = self._get_lexer_for_filename(filename) |
|
639 | lexer = self._get_lexer_for_filename(filename) | |
640 | file_size_allowed = file.size < self.max_file_size_limit |
|
640 | file_size_allowed = file.size < self.max_file_size_limit | |
641 | if line_number and file_size_allowed: |
|
641 | if line_number and file_size_allowed: | |
642 | return self.get_tokenized_filenode_line( |
|
642 | return self.get_tokenized_filenode_line( | |
643 | file, line_number, lexer) |
|
643 | file, line_number, lexer) | |
644 |
|
644 | |||
645 | if self.highlight_mode in (self.HL_REAL, self.HL_FAST) and filename: |
|
645 | if self.highlight_mode in (self.HL_REAL, self.HL_FAST) and filename: | |
646 | lexer = self._get_lexer_for_filename(filename) |
|
646 | lexer = self._get_lexer_for_filename(filename) | |
647 | return list(tokenize_string(line_text, lexer)) |
|
647 | return list(tokenize_string(line_text, lexer)) | |
648 |
|
648 | |||
649 | return list(tokenize_string(line_text, plain_text_lexer)) |
|
649 | return list(tokenize_string(line_text, plain_text_lexer)) | |
650 |
|
650 | |||
651 | def get_tokenized_filenode_line(self, filenode, line_number, lexer=None): |
|
651 | def get_tokenized_filenode_line(self, filenode, line_number, lexer=None): | |
652 |
|
652 | |||
653 | if filenode not in self.highlighted_filenodes: |
|
653 | if filenode not in self.highlighted_filenodes: | |
654 | tokenized_lines = filenode_as_lines_tokens(filenode, lexer) |
|
654 | tokenized_lines = filenode_as_lines_tokens(filenode, lexer) | |
655 | self.highlighted_filenodes[filenode] = tokenized_lines |
|
655 | self.highlighted_filenodes[filenode] = tokenized_lines | |
656 | return self.highlighted_filenodes[filenode][line_number - 1] |
|
656 | return self.highlighted_filenodes[filenode][line_number - 1] | |
657 |
|
657 | |||
658 | def action_to_op(self, action): |
|
658 | def action_to_op(self, action): | |
659 | return { |
|
659 | return { | |
660 | 'add': '+', |
|
660 | 'add': '+', | |
661 | 'del': '-', |
|
661 | 'del': '-', | |
662 | 'unmod': ' ', |
|
662 | 'unmod': ' ', | |
663 | 'old-no-nl': ' ', |
|
663 | 'old-no-nl': ' ', | |
664 | 'new-no-nl': ' ', |
|
664 | 'new-no-nl': ' ', | |
665 | }.get(action, action) |
|
665 | }.get(action, action) | |
666 |
|
666 | |||
667 | def as_unified(self, lines): |
|
667 | def as_unified(self, lines): | |
668 | """ |
|
668 | """ | |
669 | Return a generator that yields the lines of a diff in unified order |
|
669 | Return a generator that yields the lines of a diff in unified order | |
670 | """ |
|
670 | """ | |
671 | def generator(): |
|
671 | def generator(): | |
672 | buf = [] |
|
672 | buf = [] | |
673 | for line in lines: |
|
673 | for line in lines: | |
674 |
|
674 | |||
675 | if buf and not line.original or line.original.action == ' ': |
|
675 | if buf and not line.original or line.original.action == ' ': | |
676 | for b in buf: |
|
676 | for b in buf: | |
677 | yield b |
|
677 | yield b | |
678 | buf = [] |
|
678 | buf = [] | |
679 |
|
679 | |||
680 | if line.original: |
|
680 | if line.original: | |
681 | if line.original.action == ' ': |
|
681 | if line.original.action == ' ': | |
682 | yield (line.original.lineno, line.modified.lineno, |
|
682 | yield (line.original.lineno, line.modified.lineno, | |
683 | line.original.action, line.original.content, |
|
683 | line.original.action, line.original.content, | |
684 | line.original.comments) |
|
684 | line.original.comments) | |
685 | continue |
|
685 | continue | |
686 |
|
686 | |||
687 | if line.original.action == '-': |
|
687 | if line.original.action == '-': | |
688 | yield (line.original.lineno, None, |
|
688 | yield (line.original.lineno, None, | |
689 | line.original.action, line.original.content, |
|
689 | line.original.action, line.original.content, | |
690 | line.original.comments) |
|
690 | line.original.comments) | |
691 |
|
691 | |||
692 | if line.modified.action == '+': |
|
692 | if line.modified.action == '+': | |
693 | buf.append(( |
|
693 | buf.append(( | |
694 | None, line.modified.lineno, |
|
694 | None, line.modified.lineno, | |
695 | line.modified.action, line.modified.content, |
|
695 | line.modified.action, line.modified.content, | |
696 | line.modified.comments)) |
|
696 | line.modified.comments)) | |
697 | continue |
|
697 | continue | |
698 |
|
698 | |||
699 | if line.modified: |
|
699 | if line.modified: | |
700 | yield (None, line.modified.lineno, |
|
700 | yield (None, line.modified.lineno, | |
701 | line.modified.action, line.modified.content, |
|
701 | line.modified.action, line.modified.content, | |
702 | line.modified.comments) |
|
702 | line.modified.comments) | |
703 |
|
703 | |||
704 | for b in buf: |
|
704 | for b in buf: | |
705 | yield b |
|
705 | yield b | |
706 |
|
706 | |||
707 | return generator() |
|
707 | return generator() |
@@ -1,672 +1,671 b'' | |||||
1 | <%namespace name="commentblock" file="/changeset/changeset_file_comment.mako"/> |
|
1 | <%namespace name="commentblock" file="/changeset/changeset_file_comment.mako"/> | |
2 |
|
2 | |||
3 | <%def name="diff_line_anchor(filename, line, type)"><% |
|
3 | <%def name="diff_line_anchor(filename, line, type)"><% | |
4 | return '%s_%s_%i' % (h.safeid(filename), type, line) |
|
4 | return '%s_%s_%i' % (h.safeid(filename), type, line) | |
5 | %></%def> |
|
5 | %></%def> | |
6 |
|
6 | |||
7 | <%def name="action_class(action)"> |
|
7 | <%def name="action_class(action)"> | |
8 | <% |
|
8 | <% | |
9 | return { |
|
9 | return { | |
10 | '-': 'cb-deletion', |
|
10 | '-': 'cb-deletion', | |
11 | '+': 'cb-addition', |
|
11 | '+': 'cb-addition', | |
12 | ' ': 'cb-context', |
|
12 | ' ': 'cb-context', | |
13 | }.get(action, 'cb-empty') |
|
13 | }.get(action, 'cb-empty') | |
14 | %> |
|
14 | %> | |
15 | </%def> |
|
15 | </%def> | |
16 |
|
16 | |||
17 | <%def name="op_class(op_id)"> |
|
17 | <%def name="op_class(op_id)"> | |
18 | <% |
|
18 | <% | |
19 | return { |
|
19 | return { | |
20 | DEL_FILENODE: 'deletion', # file deleted |
|
20 | DEL_FILENODE: 'deletion', # file deleted | |
21 | BIN_FILENODE: 'warning' # binary diff hidden |
|
21 | BIN_FILENODE: 'warning' # binary diff hidden | |
22 | }.get(op_id, 'addition') |
|
22 | }.get(op_id, 'addition') | |
23 | %> |
|
23 | %> | |
24 | </%def> |
|
24 | </%def> | |
25 |
|
25 | |||
26 | <%def name="link_for(**kw)"> |
|
26 | <%def name="link_for(**kw)"> | |
27 | <% |
|
27 | <% | |
28 | new_args = request.GET.mixed() |
|
28 | new_args = request.GET.mixed() | |
29 | new_args.update(kw) |
|
29 | new_args.update(kw) | |
30 | return h.url('', **new_args) |
|
30 | return h.url('', **new_args) | |
31 | %> |
|
31 | %> | |
32 | </%def> |
|
32 | </%def> | |
33 |
|
33 | |||
34 | <%def name="render_diffset(diffset, commit=None, |
|
34 | <%def name="render_diffset(diffset, commit=None, | |
35 |
|
35 | |||
36 | # collapse all file diff entries when there are more than this amount of files in the diff |
|
36 | # collapse all file diff entries when there are more than this amount of files in the diff | |
37 | collapse_when_files_over=20, |
|
37 | collapse_when_files_over=20, | |
38 |
|
38 | |||
39 | # collapse lines in the diff when more than this amount of lines changed in the file diff |
|
39 | # collapse lines in the diff when more than this amount of lines changed in the file diff | |
40 | lines_changed_limit=500, |
|
40 | lines_changed_limit=500, | |
41 |
|
41 | |||
42 | # add a ruler at to the output |
|
42 | # add a ruler at to the output | |
43 | ruler_at_chars=0, |
|
43 | ruler_at_chars=0, | |
44 |
|
44 | |||
45 | # show inline comments |
|
45 | # show inline comments | |
46 | use_comments=False, |
|
46 | use_comments=False, | |
47 |
|
47 | |||
48 | # disable new comments |
|
48 | # disable new comments | |
49 | disable_new_comments=False, |
|
49 | disable_new_comments=False, | |
50 |
|
50 | |||
51 | # special file-comments that were deleted in previous versions |
|
51 | # special file-comments that were deleted in previous versions | |
52 | # it's used for showing outdated comments for deleted files in a PR |
|
52 | # it's used for showing outdated comments for deleted files in a PR | |
53 | deleted_files_comments=None |
|
53 | deleted_files_comments=None | |
54 |
|
54 | |||
55 | )"> |
|
55 | )"> | |
56 |
|
56 | |||
57 | %if use_comments: |
|
57 | %if use_comments: | |
58 | <div id="cb-comments-inline-container-template" class="js-template"> |
|
58 | <div id="cb-comments-inline-container-template" class="js-template"> | |
59 | ${inline_comments_container([])} |
|
59 | ${inline_comments_container([])} | |
60 | </div> |
|
60 | </div> | |
61 | <div class="js-template" id="cb-comment-inline-form-template"> |
|
61 | <div class="js-template" id="cb-comment-inline-form-template"> | |
62 | <div class="comment-inline-form ac"> |
|
62 | <div class="comment-inline-form ac"> | |
63 |
|
63 | |||
64 | %if c.rhodecode_user.username != h.DEFAULT_USER: |
|
64 | %if c.rhodecode_user.username != h.DEFAULT_USER: | |
65 | ## render template for inline comments |
|
65 | ## render template for inline comments | |
66 | ${commentblock.comment_form(form_type='inline')} |
|
66 | ${commentblock.comment_form(form_type='inline')} | |
67 | %else: |
|
67 | %else: | |
68 | ${h.form('', class_='inline-form comment-form-login', method='get')} |
|
68 | ${h.form('', class_='inline-form comment-form-login', method='get')} | |
69 | <div class="pull-left"> |
|
69 | <div class="pull-left"> | |
70 | <div class="comment-help pull-right"> |
|
70 | <div class="comment-help pull-right"> | |
71 | ${_('You need to be logged in to leave comments.')} <a href="${h.route_path('login', _query={'came_from': h.url.current()})}">${_('Login now')}</a> |
|
71 | ${_('You need to be logged in to leave comments.')} <a href="${h.route_path('login', _query={'came_from': h.url.current()})}">${_('Login now')}</a> | |
72 | </div> |
|
72 | </div> | |
73 | </div> |
|
73 | </div> | |
74 | <div class="comment-button pull-right"> |
|
74 | <div class="comment-button pull-right"> | |
75 | <button type="button" class="cb-comment-cancel" onclick="return Rhodecode.comments.cancelComment(this);"> |
|
75 | <button type="button" class="cb-comment-cancel" onclick="return Rhodecode.comments.cancelComment(this);"> | |
76 | ${_('Cancel')} |
|
76 | ${_('Cancel')} | |
77 | </button> |
|
77 | </button> | |
78 | </div> |
|
78 | </div> | |
79 | <div class="clearfix"></div> |
|
79 | <div class="clearfix"></div> | |
80 | ${h.end_form()} |
|
80 | ${h.end_form()} | |
81 | %endif |
|
81 | %endif | |
82 | </div> |
|
82 | </div> | |
83 | </div> |
|
83 | </div> | |
84 |
|
84 | |||
85 | %endif |
|
85 | %endif | |
86 | <% |
|
86 | <% | |
87 | collapse_all = len(diffset.files) > collapse_when_files_over |
|
87 | collapse_all = len(diffset.files) > collapse_when_files_over | |
88 | %> |
|
88 | %> | |
89 |
|
89 | |||
90 | %if c.diffmode == 'sideside': |
|
90 | %if c.diffmode == 'sideside': | |
91 | <style> |
|
91 | <style> | |
92 | .wrapper { |
|
92 | .wrapper { | |
93 | max-width: 1600px !important; |
|
93 | max-width: 1600px !important; | |
94 | } |
|
94 | } | |
95 | </style> |
|
95 | </style> | |
96 | %endif |
|
96 | %endif | |
97 |
|
97 | |||
98 | %if ruler_at_chars: |
|
98 | %if ruler_at_chars: | |
99 | <style> |
|
99 | <style> | |
100 | .diff table.cb .cb-content:after { |
|
100 | .diff table.cb .cb-content:after { | |
101 | content: ""; |
|
101 | content: ""; | |
102 | border-left: 1px solid blue; |
|
102 | border-left: 1px solid blue; | |
103 | position: absolute; |
|
103 | position: absolute; | |
104 | top: 0; |
|
104 | top: 0; | |
105 | height: 18px; |
|
105 | height: 18px; | |
106 | opacity: .2; |
|
106 | opacity: .2; | |
107 | z-index: 10; |
|
107 | z-index: 10; | |
108 | //## +5 to account for diff action (+/-) |
|
108 | //## +5 to account for diff action (+/-) | |
109 | left: ${ruler_at_chars + 5}ch; |
|
109 | left: ${ruler_at_chars + 5}ch; | |
110 | </style> |
|
110 | </style> | |
111 | %endif |
|
111 | %endif | |
112 |
|
112 | |||
113 | <div class="diffset ${disable_new_comments and 'diffset-comments-disabled'}"> |
|
113 | <div class="diffset ${disable_new_comments and 'diffset-comments-disabled'}"> | |
114 | <div class="diffset-heading ${diffset.limited_diff and 'diffset-heading-warning' or ''}"> |
|
114 | <div class="diffset-heading ${diffset.limited_diff and 'diffset-heading-warning' or ''}"> | |
115 | %if commit: |
|
115 | %if commit: | |
116 | <div class="pull-right"> |
|
116 | <div class="pull-right"> | |
117 | <a class="btn tooltip" title="${h.tooltip(_('Browse Files at revision {}').format(commit.raw_id))}" href="${h.url('files_home',repo_name=diffset.repo_name, revision=commit.raw_id, f_path='')}"> |
|
117 | <a class="btn tooltip" title="${h.tooltip(_('Browse Files at revision {}').format(commit.raw_id))}" href="${h.url('files_home',repo_name=diffset.repo_name, revision=commit.raw_id, f_path='')}"> | |
118 | ${_('Browse Files')} |
|
118 | ${_('Browse Files')} | |
119 | </a> |
|
119 | </a> | |
120 | </div> |
|
120 | </div> | |
121 | %endif |
|
121 | %endif | |
122 | <h2 class="clearinner"> |
|
122 | <h2 class="clearinner"> | |
123 | %if commit: |
|
123 | %if commit: | |
124 | <a class="tooltip revision" title="${h.tooltip(commit.message)}" href="${h.url('changeset_home',repo_name=c.repo_name,revision=commit.raw_id)}">${'r%s:%s' % (commit.revision,h.short_id(commit.raw_id))}</a> - |
|
124 | <a class="tooltip revision" title="${h.tooltip(commit.message)}" href="${h.url('changeset_home',repo_name=c.repo_name,revision=commit.raw_id)}">${'r%s:%s' % (commit.revision,h.short_id(commit.raw_id))}</a> - | |
125 | ${h.age_component(commit.date)} - |
|
125 | ${h.age_component(commit.date)} - | |
126 | %endif |
|
126 | %endif | |
127 | %if diffset.limited_diff: |
|
127 | %if diffset.limited_diff: | |
128 | ${_('The requested commit is too big and content was truncated.')} |
|
128 | ${_('The requested commit is too big and content was truncated.')} | |
129 |
|
129 | |||
130 | ${ungettext('%(num)s file changed.', '%(num)s files changed.', diffset.changed_files) % {'num': diffset.changed_files}} |
|
130 | ${ungettext('%(num)s file changed.', '%(num)s files changed.', diffset.changed_files) % {'num': diffset.changed_files}} | |
131 | <a href="${link_for(fulldiff=1)}" onclick="return confirm('${_("Showing a big diff might take some time and resources, continue?")}')">${_('Show full diff')}</a> |
|
131 | <a href="${link_for(fulldiff=1)}" onclick="return confirm('${_("Showing a big diff might take some time and resources, continue?")}')">${_('Show full diff')}</a> | |
132 | %else: |
|
132 | %else: | |
133 | ${ungettext('%(num)s file changed: %(linesadd)s inserted, ''%(linesdel)s deleted', |
|
133 | ${ungettext('%(num)s file changed: %(linesadd)s inserted, ''%(linesdel)s deleted', | |
134 | '%(num)s files changed: %(linesadd)s inserted, %(linesdel)s deleted', diffset.changed_files) % {'num': diffset.changed_files, 'linesadd': diffset.lines_added, 'linesdel': diffset.lines_deleted}} |
|
134 | '%(num)s files changed: %(linesadd)s inserted, %(linesdel)s deleted', diffset.changed_files) % {'num': diffset.changed_files, 'linesadd': diffset.lines_added, 'linesdel': diffset.lines_deleted}} | |
135 | %endif |
|
135 | %endif | |
136 |
|
136 | |||
137 | </h2> |
|
137 | </h2> | |
138 | </div> |
|
138 | </div> | |
139 |
|
139 | |||
140 | %if not diffset.files: |
|
140 | %if not diffset.files: | |
141 | <p class="empty_data">${_('No files')}</p> |
|
141 | <p class="empty_data">${_('No files')}</p> | |
142 | %endif |
|
142 | %endif | |
143 |
|
143 | |||
144 | <div class="filediffs"> |
|
144 | <div class="filediffs"> | |
145 | ## initial value could be marked as False later on |
|
145 | ## initial value could be marked as False later on | |
146 | <% over_lines_changed_limit = False %> |
|
146 | <% over_lines_changed_limit = False %> | |
147 | %for i, filediff in enumerate(diffset.files): |
|
147 | %for i, filediff in enumerate(diffset.files): | |
148 |
|
148 | |||
149 | <% |
|
149 | <% | |
150 |
lines_changed = filediff |
|
150 | lines_changed = filediff.patch['stats']['added'] + filediff.patch['stats']['deleted'] | |
151 | over_lines_changed_limit = lines_changed > lines_changed_limit |
|
151 | over_lines_changed_limit = lines_changed > lines_changed_limit | |
152 | %> |
|
152 | %> | |
153 | <input ${collapse_all and 'checked' or ''} class="filediff-collapse-state" id="filediff-collapse-${id(filediff)}" type="checkbox"> |
|
153 | <input ${collapse_all and 'checked' or ''} class="filediff-collapse-state" id="filediff-collapse-${id(filediff)}" type="checkbox"> | |
154 | <div |
|
154 | <div | |
155 | class="filediff" |
|
155 | class="filediff" | |
156 |
data-f-path="${filediff |
|
156 | data-f-path="${filediff.patch['filename']}" | |
157 |
id="a_${h.FID('', filediff |
|
157 | id="a_${h.FID('', filediff.patch['filename'])}"> | |
158 | <label for="filediff-collapse-${id(filediff)}" class="filediff-heading"> |
|
158 | <label for="filediff-collapse-${id(filediff)}" class="filediff-heading"> | |
159 | <div class="filediff-collapse-indicator"></div> |
|
159 | <div class="filediff-collapse-indicator"></div> | |
160 | ${diff_ops(filediff)} |
|
160 | ${diff_ops(filediff)} | |
161 | </label> |
|
161 | </label> | |
162 | ${diff_menu(filediff, use_comments=use_comments)} |
|
162 | ${diff_menu(filediff, use_comments=use_comments)} | |
163 | <table class="cb cb-diff-${c.diffmode} code-highlight ${over_lines_changed_limit and 'cb-collapsed' or ''}"> |
|
163 | <table class="cb cb-diff-${c.diffmode} code-highlight ${over_lines_changed_limit and 'cb-collapsed' or ''}"> | |
164 | %if not filediff.hunks: |
|
164 | %if not filediff.hunks: | |
165 |
%for op_id, op_text in filediff |
|
165 | %for op_id, op_text in filediff.patch['stats']['ops'].items(): | |
166 | <tr> |
|
166 | <tr> | |
167 | <td class="cb-text cb-${op_class(op_id)}" ${c.diffmode == 'unified' and 'colspan=4' or 'colspan=6'}> |
|
167 | <td class="cb-text cb-${op_class(op_id)}" ${c.diffmode == 'unified' and 'colspan=4' or 'colspan=6'}> | |
168 | %if op_id == DEL_FILENODE: |
|
168 | %if op_id == DEL_FILENODE: | |
169 | ${_('File was deleted')} |
|
169 | ${_('File was deleted')} | |
170 | %elif op_id == BIN_FILENODE: |
|
170 | %elif op_id == BIN_FILENODE: | |
171 | ${_('Binary file hidden')} |
|
171 | ${_('Binary file hidden')} | |
172 | %else: |
|
172 | %else: | |
173 | ${op_text} |
|
173 | ${op_text} | |
174 | %endif |
|
174 | %endif | |
175 | </td> |
|
175 | </td> | |
176 | </tr> |
|
176 | </tr> | |
177 | %endfor |
|
177 | %endfor | |
178 | %endif |
|
178 | %endif | |
179 |
%if filediff. |
|
179 | %if filediff.limited_diff: | |
180 | <tr class="cb-warning cb-collapser"> |
|
180 | <tr class="cb-warning cb-collapser"> | |
181 | <td class="cb-text" ${c.diffmode == 'unified' and 'colspan=4' or 'colspan=6'}> |
|
181 | <td class="cb-text" ${c.diffmode == 'unified' and 'colspan=4' or 'colspan=6'}> | |
182 | ${_('The requested commit is too big and content was truncated.')} <a href="${link_for(fulldiff=1)}" onclick="return confirm('${_("Showing a big diff might take some time and resources, continue?")}')">${_('Show full diff')}</a> |
|
182 | ${_('The requested commit is too big and content was truncated.')} <a href="${link_for(fulldiff=1)}" onclick="return confirm('${_("Showing a big diff might take some time and resources, continue?")}')">${_('Show full diff')}</a> | |
183 | </td> |
|
183 | </td> | |
184 | </tr> |
|
184 | </tr> | |
185 | %else: |
|
185 | %else: | |
186 | %if over_lines_changed_limit: |
|
186 | %if over_lines_changed_limit: | |
187 | <tr class="cb-warning cb-collapser"> |
|
187 | <tr class="cb-warning cb-collapser"> | |
188 | <td class="cb-text" ${c.diffmode == 'unified' and 'colspan=4' or 'colspan=6'}> |
|
188 | <td class="cb-text" ${c.diffmode == 'unified' and 'colspan=4' or 'colspan=6'}> | |
189 | ${_('This diff has been collapsed as it changes many lines, (%i lines changed)' % lines_changed)} |
|
189 | ${_('This diff has been collapsed as it changes many lines, (%i lines changed)' % lines_changed)} | |
190 | <a href="#" class="cb-expand" |
|
190 | <a href="#" class="cb-expand" | |
191 | onclick="$(this).closest('table').removeClass('cb-collapsed'); return false;">${_('Show them')} |
|
191 | onclick="$(this).closest('table').removeClass('cb-collapsed'); return false;">${_('Show them')} | |
192 | </a> |
|
192 | </a> | |
193 | <a href="#" class="cb-collapse" |
|
193 | <a href="#" class="cb-collapse" | |
194 | onclick="$(this).closest('table').addClass('cb-collapsed'); return false;">${_('Hide them')} |
|
194 | onclick="$(this).closest('table').addClass('cb-collapsed'); return false;">${_('Hide them')} | |
195 | </a> |
|
195 | </a> | |
196 | </td> |
|
196 | </td> | |
197 | </tr> |
|
197 | </tr> | |
198 | %endif |
|
198 | %endif | |
199 | %endif |
|
199 | %endif | |
200 |
|
200 | |||
201 | %for hunk in filediff.hunks: |
|
201 | %for hunk in filediff.hunks: | |
202 | <tr class="cb-hunk"> |
|
202 | <tr class="cb-hunk"> | |
203 | <td ${c.diffmode == 'unified' and 'colspan=3' or ''}> |
|
203 | <td ${c.diffmode == 'unified' and 'colspan=3' or ''}> | |
204 | ## TODO: dan: add ajax loading of more context here |
|
204 | ## TODO: dan: add ajax loading of more context here | |
205 | ## <a href="#"> |
|
205 | ## <a href="#"> | |
206 | <i class="icon-more"></i> |
|
206 | <i class="icon-more"></i> | |
207 | ## </a> |
|
207 | ## </a> | |
208 | </td> |
|
208 | </td> | |
209 | <td ${c.diffmode == 'sideside' and 'colspan=5' or ''}> |
|
209 | <td ${c.diffmode == 'sideside' and 'colspan=5' or ''}> | |
210 | @@ |
|
210 | @@ | |
211 | -${hunk.source_start},${hunk.source_length} |
|
211 | -${hunk.source_start},${hunk.source_length} | |
212 | +${hunk.target_start},${hunk.target_length} |
|
212 | +${hunk.target_start},${hunk.target_length} | |
213 | ${hunk.section_header} |
|
213 | ${hunk.section_header} | |
214 | </td> |
|
214 | </td> | |
215 | </tr> |
|
215 | </tr> | |
216 | %if c.diffmode == 'unified': |
|
216 | %if c.diffmode == 'unified': | |
217 | ${render_hunk_lines_unified(hunk, use_comments=use_comments)} |
|
217 | ${render_hunk_lines_unified(hunk, use_comments=use_comments)} | |
218 | %elif c.diffmode == 'sideside': |
|
218 | %elif c.diffmode == 'sideside': | |
219 | ${render_hunk_lines_sideside(hunk, use_comments=use_comments)} |
|
219 | ${render_hunk_lines_sideside(hunk, use_comments=use_comments)} | |
220 | %else: |
|
220 | %else: | |
221 | <tr class="cb-line"> |
|
221 | <tr class="cb-line"> | |
222 | <td>unknown diff mode</td> |
|
222 | <td>unknown diff mode</td> | |
223 | </tr> |
|
223 | </tr> | |
224 | %endif |
|
224 | %endif | |
225 | %endfor |
|
225 | %endfor | |
226 |
|
226 | |||
227 | ## outdated comments that do not fit into currently displayed lines |
|
227 | ## outdated comments that do not fit into currently displayed lines | |
228 | % for lineno, comments in filediff.left_comments.items(): |
|
228 | % for lineno, comments in filediff.left_comments.items(): | |
229 |
|
229 | |||
230 | %if c.diffmode == 'unified': |
|
230 | %if c.diffmode == 'unified': | |
231 | <tr class="cb-line"> |
|
231 | <tr class="cb-line"> | |
232 | <td class="cb-data cb-context"></td> |
|
232 | <td class="cb-data cb-context"></td> | |
233 | <td class="cb-lineno cb-context"></td> |
|
233 | <td class="cb-lineno cb-context"></td> | |
234 | <td class="cb-lineno cb-context"></td> |
|
234 | <td class="cb-lineno cb-context"></td> | |
235 | <td class="cb-content cb-context"> |
|
235 | <td class="cb-content cb-context"> | |
236 | ${inline_comments_container(comments)} |
|
236 | ${inline_comments_container(comments)} | |
237 | </td> |
|
237 | </td> | |
238 | </tr> |
|
238 | </tr> | |
239 | %elif c.diffmode == 'sideside': |
|
239 | %elif c.diffmode == 'sideside': | |
240 | <tr class="cb-line"> |
|
240 | <tr class="cb-line"> | |
241 | <td class="cb-data cb-context"></td> |
|
241 | <td class="cb-data cb-context"></td> | |
242 | <td class="cb-lineno cb-context"></td> |
|
242 | <td class="cb-lineno cb-context"></td> | |
243 | <td class="cb-content cb-context"></td> |
|
243 | <td class="cb-content cb-context"></td> | |
244 |
|
244 | |||
245 | <td class="cb-data cb-context"></td> |
|
245 | <td class="cb-data cb-context"></td> | |
246 | <td class="cb-lineno cb-context"></td> |
|
246 | <td class="cb-lineno cb-context"></td> | |
247 | <td class="cb-content cb-context"> |
|
247 | <td class="cb-content cb-context"> | |
248 | ${inline_comments_container(comments)} |
|
248 | ${inline_comments_container(comments)} | |
249 | </td> |
|
249 | </td> | |
250 | </tr> |
|
250 | </tr> | |
251 | %endif |
|
251 | %endif | |
252 |
|
252 | |||
253 | % endfor |
|
253 | % endfor | |
254 |
|
254 | |||
255 | </table> |
|
255 | </table> | |
256 | </div> |
|
256 | </div> | |
257 | %endfor |
|
257 | %endfor | |
258 |
|
258 | |||
259 | ## outdated comments that are made for a file that has been deleted |
|
259 | ## outdated comments that are made for a file that has been deleted | |
260 | % for filename, comments_dict in (deleted_files_comments or {}).items(): |
|
260 | % for filename, comments_dict in (deleted_files_comments or {}).items(): | |
261 |
|
261 | |||
262 | <div class="filediffs filediff-outdated" style="display: none"> |
|
262 | <div class="filediffs filediff-outdated" style="display: none"> | |
263 | <input ${collapse_all and 'checked' or ''} class="filediff-collapse-state" id="filediff-collapse-${id(filename)}" type="checkbox"> |
|
263 | <input ${collapse_all and 'checked' or ''} class="filediff-collapse-state" id="filediff-collapse-${id(filename)}" type="checkbox"> | |
264 | <div class="filediff" data-f-path="${filename}" id="a_${h.FID('', filename)}"> |
|
264 | <div class="filediff" data-f-path="${filename}" id="a_${h.FID('', filename)}"> | |
265 | <label for="filediff-collapse-${id(filename)}" class="filediff-heading"> |
|
265 | <label for="filediff-collapse-${id(filename)}" class="filediff-heading"> | |
266 | <div class="filediff-collapse-indicator"></div> |
|
266 | <div class="filediff-collapse-indicator"></div> | |
267 | <span class="pill"> |
|
267 | <span class="pill"> | |
268 | ## file was deleted |
|
268 | ## file was deleted | |
269 | <strong>${filename}</strong> |
|
269 | <strong>${filename}</strong> | |
270 | </span> |
|
270 | </span> | |
271 | <span class="pill-group" style="float: left"> |
|
271 | <span class="pill-group" style="float: left"> | |
272 | ## file op, doesn't need translation |
|
272 | ## file op, doesn't need translation | |
273 | <span class="pill" op="removed">removed in this version</span> |
|
273 | <span class="pill" op="removed">removed in this version</span> | |
274 | </span> |
|
274 | </span> | |
275 | <a class="pill filediff-anchor" href="#a_${h.FID('', filename)}">ΒΆ</a> |
|
275 | <a class="pill filediff-anchor" href="#a_${h.FID('', filename)}">ΒΆ</a> | |
276 | <span class="pill-group" style="float: right"> |
|
276 | <span class="pill-group" style="float: right"> | |
277 | <span class="pill" op="deleted">-${comments_dict['stats']}</span> |
|
277 | <span class="pill" op="deleted">-${comments_dict['stats']}</span> | |
278 | </span> |
|
278 | </span> | |
279 | </label> |
|
279 | </label> | |
280 |
|
280 | |||
281 | <table class="cb cb-diff-${c.diffmode} code-highlight ${over_lines_changed_limit and 'cb-collapsed' or ''}"> |
|
281 | <table class="cb cb-diff-${c.diffmode} code-highlight ${over_lines_changed_limit and 'cb-collapsed' or ''}"> | |
282 | <tr> |
|
282 | <tr> | |
283 | % if c.diffmode == 'unified': |
|
283 | % if c.diffmode == 'unified': | |
284 | <td></td> |
|
284 | <td></td> | |
285 | %endif |
|
285 | %endif | |
286 |
|
286 | |||
287 | <td></td> |
|
287 | <td></td> | |
288 | <td class="cb-text cb-${op_class(BIN_FILENODE)}" ${c.diffmode == 'unified' and 'colspan=4' or 'colspan=5'}> |
|
288 | <td class="cb-text cb-${op_class(BIN_FILENODE)}" ${c.diffmode == 'unified' and 'colspan=4' or 'colspan=5'}> | |
289 | ${_('File was deleted in this version, and outdated comments were made on it')} |
|
289 | ${_('File was deleted in this version, and outdated comments were made on it')} | |
290 | </td> |
|
290 | </td> | |
291 | </tr> |
|
291 | </tr> | |
292 | %if c.diffmode == 'unified': |
|
292 | %if c.diffmode == 'unified': | |
293 | <tr class="cb-line"> |
|
293 | <tr class="cb-line"> | |
294 | <td class="cb-data cb-context"></td> |
|
294 | <td class="cb-data cb-context"></td> | |
295 | <td class="cb-lineno cb-context"></td> |
|
295 | <td class="cb-lineno cb-context"></td> | |
296 | <td class="cb-lineno cb-context"></td> |
|
296 | <td class="cb-lineno cb-context"></td> | |
297 | <td class="cb-content cb-context"> |
|
297 | <td class="cb-content cb-context"> | |
298 | ${inline_comments_container(comments_dict['comments'])} |
|
298 | ${inline_comments_container(comments_dict['comments'])} | |
299 | </td> |
|
299 | </td> | |
300 | </tr> |
|
300 | </tr> | |
301 | %elif c.diffmode == 'sideside': |
|
301 | %elif c.diffmode == 'sideside': | |
302 | <tr class="cb-line"> |
|
302 | <tr class="cb-line"> | |
303 | <td class="cb-data cb-context"></td> |
|
303 | <td class="cb-data cb-context"></td> | |
304 | <td class="cb-lineno cb-context"></td> |
|
304 | <td class="cb-lineno cb-context"></td> | |
305 | <td class="cb-content cb-context"></td> |
|
305 | <td class="cb-content cb-context"></td> | |
306 |
|
306 | |||
307 | <td class="cb-data cb-context"></td> |
|
307 | <td class="cb-data cb-context"></td> | |
308 | <td class="cb-lineno cb-context"></td> |
|
308 | <td class="cb-lineno cb-context"></td> | |
309 | <td class="cb-content cb-context"> |
|
309 | <td class="cb-content cb-context"> | |
310 | ${inline_comments_container(comments_dict['comments'])} |
|
310 | ${inline_comments_container(comments_dict['comments'])} | |
311 | </td> |
|
311 | </td> | |
312 | </tr> |
|
312 | </tr> | |
313 | %endif |
|
313 | %endif | |
314 | </table> |
|
314 | </table> | |
315 | </div> |
|
315 | </div> | |
316 | </div> |
|
316 | </div> | |
317 | % endfor |
|
317 | % endfor | |
318 |
|
318 | |||
319 | </div> |
|
319 | </div> | |
320 | </div> |
|
320 | </div> | |
321 | </%def> |
|
321 | </%def> | |
322 |
|
322 | |||
323 | <%def name="diff_ops(filediff)"> |
|
323 | <%def name="diff_ops(filediff)"> | |
324 | <% |
|
324 | <% | |
325 | stats = filediff['patch']['stats'] |
|
|||
326 | from rhodecode.lib.diffs import NEW_FILENODE, DEL_FILENODE, \ |
|
325 | from rhodecode.lib.diffs import NEW_FILENODE, DEL_FILENODE, \ | |
327 | MOD_FILENODE, RENAMED_FILENODE, CHMOD_FILENODE, BIN_FILENODE, COPIED_FILENODE |
|
326 | MOD_FILENODE, RENAMED_FILENODE, CHMOD_FILENODE, BIN_FILENODE, COPIED_FILENODE | |
328 | %> |
|
327 | %> | |
329 | <span class="pill"> |
|
328 | <span class="pill"> | |
330 | %if filediff.source_file_path and filediff.target_file_path: |
|
329 | %if filediff.source_file_path and filediff.target_file_path: | |
331 | %if filediff.source_file_path != filediff.target_file_path: |
|
330 | %if filediff.source_file_path != filediff.target_file_path: | |
332 | ## file was renamed, or copied |
|
331 | ## file was renamed, or copied | |
333 | %if RENAMED_FILENODE in stats['ops']: |
|
332 | %if RENAMED_FILENODE in filediff.patch['stats']['ops']: | |
334 | <strong>${filediff.target_file_path}</strong> β¬ <del>${filediff.source_file_path}</del> |
|
333 | <strong>${filediff.target_file_path}</strong> β¬ <del>${filediff.source_file_path}</del> | |
335 | %elif COPIED_FILENODE in stats['ops']: |
|
334 | %elif COPIED_FILENODE in filediff.patch['stats']['ops']: | |
336 | <strong>${filediff.target_file_path}</strong> β¬ ${filediff.source_file_path} |
|
335 | <strong>${filediff.target_file_path}</strong> β¬ ${filediff.source_file_path} | |
337 | %endif |
|
336 | %endif | |
338 | %else: |
|
337 | %else: | |
339 | ## file was modified |
|
338 | ## file was modified | |
340 | <strong>${filediff.source_file_path}</strong> |
|
339 | <strong>${filediff.source_file_path}</strong> | |
341 | %endif |
|
340 | %endif | |
342 | %else: |
|
341 | %else: | |
343 | %if filediff.source_file_path: |
|
342 | %if filediff.source_file_path: | |
344 | ## file was deleted |
|
343 | ## file was deleted | |
345 | <strong>${filediff.source_file_path}</strong> |
|
344 | <strong>${filediff.source_file_path}</strong> | |
346 | %else: |
|
345 | %else: | |
347 | ## file was added |
|
346 | ## file was added | |
348 | <strong>${filediff.target_file_path}</strong> |
|
347 | <strong>${filediff.target_file_path}</strong> | |
349 | %endif |
|
348 | %endif | |
350 | %endif |
|
349 | %endif | |
351 | </span> |
|
350 | </span> | |
352 | <span class="pill-group" style="float: left"> |
|
351 | <span class="pill-group" style="float: left"> | |
353 |
%if filediff. |
|
352 | %if filediff.limited_diff: | |
354 | <span class="pill tooltip" op="limited" title="The stats for this diff are not complete">limited diff</span> |
|
353 | <span class="pill tooltip" op="limited" title="The stats for this diff are not complete">limited diff</span> | |
355 | %endif |
|
354 | %endif | |
356 |
|
355 | |||
357 | %if RENAMED_FILENODE in stats['ops']: |
|
356 | %if RENAMED_FILENODE in filediff.patch['stats']['ops']: | |
358 | <span class="pill" op="renamed">renamed</span> |
|
357 | <span class="pill" op="renamed">renamed</span> | |
359 | %endif |
|
358 | %endif | |
360 |
|
359 | |||
361 | %if COPIED_FILENODE in stats['ops']: |
|
360 | %if COPIED_FILENODE in filediff.patch['stats']['ops']: | |
362 | <span class="pill" op="copied">copied</span> |
|
361 | <span class="pill" op="copied">copied</span> | |
363 | %endif |
|
362 | %endif | |
364 |
|
363 | |||
365 | %if NEW_FILENODE in stats['ops']: |
|
364 | %if NEW_FILENODE in filediff.patch['stats']['ops']: | |
366 | <span class="pill" op="created">created</span> |
|
365 | <span class="pill" op="created">created</span> | |
367 | %if filediff['target_mode'].startswith('120'): |
|
366 | %if filediff['target_mode'].startswith('120'): | |
368 | <span class="pill" op="symlink">symlink</span> |
|
367 | <span class="pill" op="symlink">symlink</span> | |
369 | %else: |
|
368 | %else: | |
370 | <span class="pill" op="mode">${nice_mode(filediff['target_mode'])}</span> |
|
369 | <span class="pill" op="mode">${nice_mode(filediff['target_mode'])}</span> | |
371 | %endif |
|
370 | %endif | |
372 | %endif |
|
371 | %endif | |
373 |
|
372 | |||
374 | %if DEL_FILENODE in stats['ops']: |
|
373 | %if DEL_FILENODE in filediff.patch['stats']['ops']: | |
375 | <span class="pill" op="removed">removed</span> |
|
374 | <span class="pill" op="removed">removed</span> | |
376 | %endif |
|
375 | %endif | |
377 |
|
376 | |||
378 | %if CHMOD_FILENODE in stats['ops']: |
|
377 | %if CHMOD_FILENODE in filediff.patch['stats']['ops']: | |
379 | <span class="pill" op="mode"> |
|
378 | <span class="pill" op="mode"> | |
380 | ${nice_mode(filediff['source_mode'])} β‘ ${nice_mode(filediff['target_mode'])} |
|
379 | ${nice_mode(filediff['source_mode'])} β‘ ${nice_mode(filediff['target_mode'])} | |
381 | </span> |
|
380 | </span> | |
382 | %endif |
|
381 | %endif | |
383 | </span> |
|
382 | </span> | |
384 |
|
383 | |||
385 | <a class="pill filediff-anchor" href="#a_${h.FID('', filediff.patch['filename'])}">ΒΆ</a> |
|
384 | <a class="pill filediff-anchor" href="#a_${h.FID('', filediff.patch['filename'])}">ΒΆ</a> | |
386 |
|
385 | |||
387 | <span class="pill-group" style="float: right"> |
|
386 | <span class="pill-group" style="float: right"> | |
388 | %if BIN_FILENODE in stats['ops']: |
|
387 | %if BIN_FILENODE in filediff.patch['stats']['ops']: | |
389 | <span class="pill" op="binary">binary</span> |
|
388 | <span class="pill" op="binary">binary</span> | |
390 | %if MOD_FILENODE in stats['ops']: |
|
389 | %if MOD_FILENODE in filediff.patch['stats']['ops']: | |
391 | <span class="pill" op="modified">modified</span> |
|
390 | <span class="pill" op="modified">modified</span> | |
392 | %endif |
|
391 | %endif | |
393 | %endif |
|
392 | %endif | |
394 | %if stats['added']: |
|
393 | %if filediff.patch['stats']['added']: | |
395 | <span class="pill" op="added">+${stats['added']}</span> |
|
394 | <span class="pill" op="added">+${filediff.patch['stats']['added']}</span> | |
396 | %endif |
|
395 | %endif | |
397 | %if stats['deleted']: |
|
396 | %if filediff.patch['stats']['deleted']: | |
398 | <span class="pill" op="deleted">-${stats['deleted']}</span> |
|
397 | <span class="pill" op="deleted">-${filediff.patch['stats']['deleted']}</span> | |
399 | %endif |
|
398 | %endif | |
400 | </span> |
|
399 | </span> | |
401 |
|
400 | |||
402 | </%def> |
|
401 | </%def> | |
403 |
|
402 | |||
404 | <%def name="nice_mode(filemode)"> |
|
403 | <%def name="nice_mode(filemode)"> | |
405 | ${filemode.startswith('100') and filemode[3:] or filemode} |
|
404 | ${filemode.startswith('100') and filemode[3:] or filemode} | |
406 | </%def> |
|
405 | </%def> | |
407 |
|
406 | |||
408 | <%def name="diff_menu(filediff, use_comments=False)"> |
|
407 | <%def name="diff_menu(filediff, use_comments=False)"> | |
409 | <div class="filediff-menu"> |
|
408 | <div class="filediff-menu"> | |
410 | %if filediff.diffset.source_ref: |
|
409 | %if filediff.diffset.source_ref: | |
411 |
%if filediff. |
|
410 | %if filediff.operation in ['D', 'M']: | |
412 | <a |
|
411 | <a | |
413 | class="tooltip" |
|
412 | class="tooltip" | |
414 | href="${h.url('files_home',repo_name=filediff.diffset.repo_name,f_path=filediff.source_file_path,revision=filediff.diffset.source_ref)}" |
|
413 | href="${h.url('files_home',repo_name=filediff.diffset.repo_name,f_path=filediff.source_file_path,revision=filediff.diffset.source_ref)}" | |
415 | title="${h.tooltip(_('Show file at commit: %(commit_id)s') % {'commit_id': filediff.diffset.source_ref[:12]})}" |
|
414 | title="${h.tooltip(_('Show file at commit: %(commit_id)s') % {'commit_id': filediff.diffset.source_ref[:12]})}" | |
416 | > |
|
415 | > | |
417 | ${_('Show file before')} |
|
416 | ${_('Show file before')} | |
418 | </a> | |
|
417 | </a> | | |
419 | %else: |
|
418 | %else: | |
420 | <span |
|
419 | <span | |
421 | class="tooltip" |
|
420 | class="tooltip" | |
422 | title="${h.tooltip(_('File no longer present at commit: %(commit_id)s') % {'commit_id': filediff.diffset.source_ref[:12]})}" |
|
421 | title="${h.tooltip(_('File no longer present at commit: %(commit_id)s') % {'commit_id': filediff.diffset.source_ref[:12]})}" | |
423 | > |
|
422 | > | |
424 | ${_('Show file before')} |
|
423 | ${_('Show file before')} | |
425 | </span> | |
|
424 | </span> | | |
426 | %endif |
|
425 | %endif | |
427 |
%if filediff. |
|
426 | %if filediff.operation in ['A', 'M']: | |
428 | <a |
|
427 | <a | |
429 | class="tooltip" |
|
428 | class="tooltip" | |
430 | href="${h.url('files_home',repo_name=filediff.diffset.source_repo_name,f_path=filediff.target_file_path,revision=filediff.diffset.target_ref)}" |
|
429 | href="${h.url('files_home',repo_name=filediff.diffset.source_repo_name,f_path=filediff.target_file_path,revision=filediff.diffset.target_ref)}" | |
431 | title="${h.tooltip(_('Show file at commit: %(commit_id)s') % {'commit_id': filediff.diffset.target_ref[:12]})}" |
|
430 | title="${h.tooltip(_('Show file at commit: %(commit_id)s') % {'commit_id': filediff.diffset.target_ref[:12]})}" | |
432 | > |
|
431 | > | |
433 | ${_('Show file after')} |
|
432 | ${_('Show file after')} | |
434 | </a> | |
|
433 | </a> | | |
435 | %else: |
|
434 | %else: | |
436 | <span |
|
435 | <span | |
437 | class="tooltip" |
|
436 | class="tooltip" | |
438 | title="${h.tooltip(_('File no longer present at commit: %(commit_id)s') % {'commit_id': filediff.diffset.target_ref[:12]})}" |
|
437 | title="${h.tooltip(_('File no longer present at commit: %(commit_id)s') % {'commit_id': filediff.diffset.target_ref[:12]})}" | |
439 | > |
|
438 | > | |
440 | ${_('Show file after')} |
|
439 | ${_('Show file after')} | |
441 | </span> | |
|
440 | </span> | | |
442 | %endif |
|
441 | %endif | |
443 | <a |
|
442 | <a | |
444 | class="tooltip" |
|
443 | class="tooltip" | |
445 | title="${h.tooltip(_('Raw diff'))}" |
|
444 | title="${h.tooltip(_('Raw diff'))}" | |
446 | href="${h.url('files_diff_home',repo_name=filediff.diffset.repo_name,f_path=filediff.target_file_path,diff2=filediff.diffset.target_ref,diff1=filediff.diffset.source_ref,diff='raw')}" |
|
445 | href="${h.url('files_diff_home',repo_name=filediff.diffset.repo_name,f_path=filediff.target_file_path,diff2=filediff.diffset.target_ref,diff1=filediff.diffset.source_ref,diff='raw')}" | |
447 | > |
|
446 | > | |
448 | ${_('Raw diff')} |
|
447 | ${_('Raw diff')} | |
449 | </a> | |
|
448 | </a> | | |
450 | <a |
|
449 | <a | |
451 | class="tooltip" |
|
450 | class="tooltip" | |
452 | title="${h.tooltip(_('Download diff'))}" |
|
451 | title="${h.tooltip(_('Download diff'))}" | |
453 | href="${h.url('files_diff_home',repo_name=filediff.diffset.repo_name,f_path=filediff.target_file_path,diff2=filediff.diffset.target_ref,diff1=filediff.diffset.source_ref,diff='download')}" |
|
452 | href="${h.url('files_diff_home',repo_name=filediff.diffset.repo_name,f_path=filediff.target_file_path,diff2=filediff.diffset.target_ref,diff1=filediff.diffset.source_ref,diff='download')}" | |
454 | > |
|
453 | > | |
455 | ${_('Download diff')} |
|
454 | ${_('Download diff')} | |
456 | </a> |
|
455 | </a> | |
457 | % if use_comments: |
|
456 | % if use_comments: | |
458 | | |
|
457 | | | |
459 | % endif |
|
458 | % endif | |
460 |
|
459 | |||
461 | ## TODO: dan: refactor ignorews_url and context_url into the diff renderer same as diffmode=unified/sideside. Also use ajax to load more context (by clicking hunks) |
|
460 | ## TODO: dan: refactor ignorews_url and context_url into the diff renderer same as diffmode=unified/sideside. Also use ajax to load more context (by clicking hunks) | |
462 | %if hasattr(c, 'ignorews_url'): |
|
461 | %if hasattr(c, 'ignorews_url'): | |
463 |
${c.ignorews_url(request.GET, h.FID('', filediff |
|
462 | ${c.ignorews_url(request.GET, h.FID('', filediff.patch['filename']))} | |
464 | %endif |
|
463 | %endif | |
465 | %if hasattr(c, 'context_url'): |
|
464 | %if hasattr(c, 'context_url'): | |
466 |
${c.context_url(request.GET, h.FID('', filediff |
|
465 | ${c.context_url(request.GET, h.FID('', filediff.patch['filename']))} | |
467 | %endif |
|
466 | %endif | |
468 |
|
467 | |||
469 | %if use_comments: |
|
468 | %if use_comments: | |
470 | <a href="#" onclick="return Rhodecode.comments.toggleComments(this);"> |
|
469 | <a href="#" onclick="return Rhodecode.comments.toggleComments(this);"> | |
471 | <span class="show-comment-button">${_('Show comments')}</span><span class="hide-comment-button">${_('Hide comments')}</span> |
|
470 | <span class="show-comment-button">${_('Show comments')}</span><span class="hide-comment-button">${_('Hide comments')}</span> | |
472 | </a> |
|
471 | </a> | |
473 | %endif |
|
472 | %endif | |
474 | %endif |
|
473 | %endif | |
475 | </div> |
|
474 | </div> | |
476 | </%def> |
|
475 | </%def> | |
477 |
|
476 | |||
478 |
|
477 | |||
479 | <%def name="inline_comments_container(comments)"> |
|
478 | <%def name="inline_comments_container(comments)"> | |
480 | <div class="inline-comments"> |
|
479 | <div class="inline-comments"> | |
481 | %for comment in comments: |
|
480 | %for comment in comments: | |
482 | ${commentblock.comment_block(comment, inline=True)} |
|
481 | ${commentblock.comment_block(comment, inline=True)} | |
483 | %endfor |
|
482 | %endfor | |
484 |
|
483 | |||
485 | % if comments and comments[-1].outdated: |
|
484 | % if comments and comments[-1].outdated: | |
486 | <span class="btn btn-secondary cb-comment-add-button comment-outdated}" |
|
485 | <span class="btn btn-secondary cb-comment-add-button comment-outdated}" | |
487 | style="display: none;}"> |
|
486 | style="display: none;}"> | |
488 | ${_('Add another comment')} |
|
487 | ${_('Add another comment')} | |
489 | </span> |
|
488 | </span> | |
490 | % else: |
|
489 | % else: | |
491 | <span onclick="return Rhodecode.comments.createComment(this)" |
|
490 | <span onclick="return Rhodecode.comments.createComment(this)" | |
492 | class="btn btn-secondary cb-comment-add-button"> |
|
491 | class="btn btn-secondary cb-comment-add-button"> | |
493 | ${_('Add another comment')} |
|
492 | ${_('Add another comment')} | |
494 | </span> |
|
493 | </span> | |
495 | % endif |
|
494 | % endif | |
496 |
|
495 | |||
497 | </div> |
|
496 | </div> | |
498 | </%def> |
|
497 | </%def> | |
499 |
|
498 | |||
500 |
|
499 | |||
501 | <%def name="render_hunk_lines_sideside(hunk, use_comments=False)"> |
|
500 | <%def name="render_hunk_lines_sideside(hunk, use_comments=False)"> | |
502 | %for i, line in enumerate(hunk.sideside): |
|
501 | %for i, line in enumerate(hunk.sideside): | |
503 | <% |
|
502 | <% | |
504 | old_line_anchor, new_line_anchor = None, None |
|
503 | old_line_anchor, new_line_anchor = None, None | |
505 | if line.original.lineno: |
|
504 | if line.original.lineno: | |
506 |
old_line_anchor = diff_line_anchor(hunk |
|
505 | old_line_anchor = diff_line_anchor(hunk.source_file_path, line.original.lineno, 'o') | |
507 | if line.modified.lineno: |
|
506 | if line.modified.lineno: | |
508 |
new_line_anchor = diff_line_anchor(hunk |
|
507 | new_line_anchor = diff_line_anchor(hunk.target_file_path, line.modified.lineno, 'n') | |
509 | %> |
|
508 | %> | |
510 |
|
509 | |||
511 | <tr class="cb-line"> |
|
510 | <tr class="cb-line"> | |
512 | <td class="cb-data ${action_class(line.original.action)}" |
|
511 | <td class="cb-data ${action_class(line.original.action)}" | |
513 | data-line-number="${line.original.lineno}" |
|
512 | data-line-number="${line.original.lineno}" | |
514 | > |
|
513 | > | |
515 | <div> |
|
514 | <div> | |
516 | %if line.original.comments: |
|
515 | %if line.original.comments: | |
517 | <i class="icon-comment" onclick="return Rhodecode.comments.toggleLineComments(this)"></i> |
|
516 | <i class="icon-comment" onclick="return Rhodecode.comments.toggleLineComments(this)"></i> | |
518 | %endif |
|
517 | %endif | |
519 | </div> |
|
518 | </div> | |
520 | </td> |
|
519 | </td> | |
521 | <td class="cb-lineno ${action_class(line.original.action)}" |
|
520 | <td class="cb-lineno ${action_class(line.original.action)}" | |
522 | data-line-number="${line.original.lineno}" |
|
521 | data-line-number="${line.original.lineno}" | |
523 | %if old_line_anchor: |
|
522 | %if old_line_anchor: | |
524 | id="${old_line_anchor}" |
|
523 | id="${old_line_anchor}" | |
525 | %endif |
|
524 | %endif | |
526 | > |
|
525 | > | |
527 | %if line.original.lineno: |
|
526 | %if line.original.lineno: | |
528 | <a name="${old_line_anchor}" href="#${old_line_anchor}">${line.original.lineno}</a> |
|
527 | <a name="${old_line_anchor}" href="#${old_line_anchor}">${line.original.lineno}</a> | |
529 | %endif |
|
528 | %endif | |
530 | </td> |
|
529 | </td> | |
531 | <td class="cb-content ${action_class(line.original.action)}" |
|
530 | <td class="cb-content ${action_class(line.original.action)}" | |
532 | data-line-number="o${line.original.lineno}" |
|
531 | data-line-number="o${line.original.lineno}" | |
533 | > |
|
532 | > | |
534 | %if use_comments and line.original.lineno: |
|
533 | %if use_comments and line.original.lineno: | |
535 | ${render_add_comment_button()} |
|
534 | ${render_add_comment_button()} | |
536 | %endif |
|
535 | %endif | |
537 | <span class="cb-code">${line.original.action} ${line.original.content or '' | n}</span> |
|
536 | <span class="cb-code">${line.original.action} ${line.original.content or '' | n}</span> | |
538 | %if use_comments and line.original.lineno and line.original.comments: |
|
537 | %if use_comments and line.original.lineno and line.original.comments: | |
539 | ${inline_comments_container(line.original.comments)} |
|
538 | ${inline_comments_container(line.original.comments)} | |
540 | %endif |
|
539 | %endif | |
541 | </td> |
|
540 | </td> | |
542 | <td class="cb-data ${action_class(line.modified.action)}" |
|
541 | <td class="cb-data ${action_class(line.modified.action)}" | |
543 | data-line-number="${line.modified.lineno}" |
|
542 | data-line-number="${line.modified.lineno}" | |
544 | > |
|
543 | > | |
545 | <div> |
|
544 | <div> | |
546 | %if line.modified.comments: |
|
545 | %if line.modified.comments: | |
547 | <i class="icon-comment" onclick="return Rhodecode.comments.toggleLineComments(this)"></i> |
|
546 | <i class="icon-comment" onclick="return Rhodecode.comments.toggleLineComments(this)"></i> | |
548 | %endif |
|
547 | %endif | |
549 | </div> |
|
548 | </div> | |
550 | </td> |
|
549 | </td> | |
551 | <td class="cb-lineno ${action_class(line.modified.action)}" |
|
550 | <td class="cb-lineno ${action_class(line.modified.action)}" | |
552 | data-line-number="${line.modified.lineno}" |
|
551 | data-line-number="${line.modified.lineno}" | |
553 | %if new_line_anchor: |
|
552 | %if new_line_anchor: | |
554 | id="${new_line_anchor}" |
|
553 | id="${new_line_anchor}" | |
555 | %endif |
|
554 | %endif | |
556 | > |
|
555 | > | |
557 | %if line.modified.lineno: |
|
556 | %if line.modified.lineno: | |
558 | <a name="${new_line_anchor}" href="#${new_line_anchor}">${line.modified.lineno}</a> |
|
557 | <a name="${new_line_anchor}" href="#${new_line_anchor}">${line.modified.lineno}</a> | |
559 | %endif |
|
558 | %endif | |
560 | </td> |
|
559 | </td> | |
561 | <td class="cb-content ${action_class(line.modified.action)}" |
|
560 | <td class="cb-content ${action_class(line.modified.action)}" | |
562 | data-line-number="n${line.modified.lineno}" |
|
561 | data-line-number="n${line.modified.lineno}" | |
563 | > |
|
562 | > | |
564 | %if use_comments and line.modified.lineno: |
|
563 | %if use_comments and line.modified.lineno: | |
565 | ${render_add_comment_button()} |
|
564 | ${render_add_comment_button()} | |
566 | %endif |
|
565 | %endif | |
567 | <span class="cb-code">${line.modified.action} ${line.modified.content or '' | n}</span> |
|
566 | <span class="cb-code">${line.modified.action} ${line.modified.content or '' | n}</span> | |
568 | %if use_comments and line.modified.lineno and line.modified.comments: |
|
567 | %if use_comments and line.modified.lineno and line.modified.comments: | |
569 | ${inline_comments_container(line.modified.comments)} |
|
568 | ${inline_comments_container(line.modified.comments)} | |
570 | %endif |
|
569 | %endif | |
571 | </td> |
|
570 | </td> | |
572 | </tr> |
|
571 | </tr> | |
573 | %endfor |
|
572 | %endfor | |
574 | </%def> |
|
573 | </%def> | |
575 |
|
574 | |||
576 |
|
575 | |||
577 | <%def name="render_hunk_lines_unified(hunk, use_comments=False)"> |
|
576 | <%def name="render_hunk_lines_unified(hunk, use_comments=False)"> | |
578 | %for old_line_no, new_line_no, action, content, comments in hunk.unified: |
|
577 | %for old_line_no, new_line_no, action, content, comments in hunk.unified: | |
579 | <% |
|
578 | <% | |
580 | old_line_anchor, new_line_anchor = None, None |
|
579 | old_line_anchor, new_line_anchor = None, None | |
581 | if old_line_no: |
|
580 | if old_line_no: | |
582 |
old_line_anchor = diff_line_anchor(hunk |
|
581 | old_line_anchor = diff_line_anchor(hunk.source_file_path, old_line_no, 'o') | |
583 | if new_line_no: |
|
582 | if new_line_no: | |
584 |
new_line_anchor = diff_line_anchor(hunk |
|
583 | new_line_anchor = diff_line_anchor(hunk.target_file_path, new_line_no, 'n') | |
585 | %> |
|
584 | %> | |
586 | <tr class="cb-line"> |
|
585 | <tr class="cb-line"> | |
587 | <td class="cb-data ${action_class(action)}"> |
|
586 | <td class="cb-data ${action_class(action)}"> | |
588 | <div> |
|
587 | <div> | |
589 | %if comments: |
|
588 | %if comments: | |
590 | <i class="icon-comment" onclick="return Rhodecode.comments.toggleLineComments(this)"></i> |
|
589 | <i class="icon-comment" onclick="return Rhodecode.comments.toggleLineComments(this)"></i> | |
591 | %endif |
|
590 | %endif | |
592 | </div> |
|
591 | </div> | |
593 | </td> |
|
592 | </td> | |
594 | <td class="cb-lineno ${action_class(action)}" |
|
593 | <td class="cb-lineno ${action_class(action)}" | |
595 | data-line-number="${old_line_no}" |
|
594 | data-line-number="${old_line_no}" | |
596 | %if old_line_anchor: |
|
595 | %if old_line_anchor: | |
597 | id="${old_line_anchor}" |
|
596 | id="${old_line_anchor}" | |
598 | %endif |
|
597 | %endif | |
599 | > |
|
598 | > | |
600 | %if old_line_anchor: |
|
599 | %if old_line_anchor: | |
601 | <a name="${old_line_anchor}" href="#${old_line_anchor}">${old_line_no}</a> |
|
600 | <a name="${old_line_anchor}" href="#${old_line_anchor}">${old_line_no}</a> | |
602 | %endif |
|
601 | %endif | |
603 | </td> |
|
602 | </td> | |
604 | <td class="cb-lineno ${action_class(action)}" |
|
603 | <td class="cb-lineno ${action_class(action)}" | |
605 | data-line-number="${new_line_no}" |
|
604 | data-line-number="${new_line_no}" | |
606 | %if new_line_anchor: |
|
605 | %if new_line_anchor: | |
607 | id="${new_line_anchor}" |
|
606 | id="${new_line_anchor}" | |
608 | %endif |
|
607 | %endif | |
609 | > |
|
608 | > | |
610 | %if new_line_anchor: |
|
609 | %if new_line_anchor: | |
611 | <a name="${new_line_anchor}" href="#${new_line_anchor}">${new_line_no}</a> |
|
610 | <a name="${new_line_anchor}" href="#${new_line_anchor}">${new_line_no}</a> | |
612 | %endif |
|
611 | %endif | |
613 | </td> |
|
612 | </td> | |
614 | <td class="cb-content ${action_class(action)}" |
|
613 | <td class="cb-content ${action_class(action)}" | |
615 | data-line-number="${new_line_no and 'n' or 'o'}${new_line_no or old_line_no}" |
|
614 | data-line-number="${new_line_no and 'n' or 'o'}${new_line_no or old_line_no}" | |
616 | > |
|
615 | > | |
617 | %if use_comments: |
|
616 | %if use_comments: | |
618 | ${render_add_comment_button()} |
|
617 | ${render_add_comment_button()} | |
619 | %endif |
|
618 | %endif | |
620 | <span class="cb-code">${action} ${content or '' | n}</span> |
|
619 | <span class="cb-code">${action} ${content or '' | n}</span> | |
621 | %if use_comments and comments: |
|
620 | %if use_comments and comments: | |
622 | ${inline_comments_container(comments)} |
|
621 | ${inline_comments_container(comments)} | |
623 | %endif |
|
622 | %endif | |
624 | </td> |
|
623 | </td> | |
625 | </tr> |
|
624 | </tr> | |
626 | %endfor |
|
625 | %endfor | |
627 | </%def> |
|
626 | </%def> | |
628 |
|
627 | |||
629 | <%def name="render_add_comment_button()"> |
|
628 | <%def name="render_add_comment_button()"> | |
630 | <button class="btn btn-small btn-primary cb-comment-box-opener" onclick="return Rhodecode.comments.createComment(this)"> |
|
629 | <button class="btn btn-small btn-primary cb-comment-box-opener" onclick="return Rhodecode.comments.createComment(this)"> | |
631 | <span><i class="icon-comment"></i></span> |
|
630 | <span><i class="icon-comment"></i></span> | |
632 | </button> |
|
631 | </button> | |
633 | </%def> |
|
632 | </%def> | |
634 |
|
633 | |||
635 | <%def name="render_diffset_menu()"> |
|
634 | <%def name="render_diffset_menu()"> | |
636 |
|
635 | |||
637 | <div class="diffset-menu clearinner"> |
|
636 | <div class="diffset-menu clearinner"> | |
638 | <div class="pull-right"> |
|
637 | <div class="pull-right"> | |
639 | <div class="btn-group"> |
|
638 | <div class="btn-group"> | |
640 |
|
639 | |||
641 | <a |
|
640 | <a | |
642 | class="btn ${c.diffmode == 'sideside' and 'btn-primary'} tooltip" |
|
641 | class="btn ${c.diffmode == 'sideside' and 'btn-primary'} tooltip" | |
643 | title="${h.tooltip(_('View side by side'))}" |
|
642 | title="${h.tooltip(_('View side by side'))}" | |
644 | href="${h.url_replace(diffmode='sideside')}"> |
|
643 | href="${h.url_replace(diffmode='sideside')}"> | |
645 | <span>${_('Side by Side')}</span> |
|
644 | <span>${_('Side by Side')}</span> | |
646 | </a> |
|
645 | </a> | |
647 | <a |
|
646 | <a | |
648 | class="btn ${c.diffmode == 'unified' and 'btn-primary'} tooltip" |
|
647 | class="btn ${c.diffmode == 'unified' and 'btn-primary'} tooltip" | |
649 | title="${h.tooltip(_('View unified'))}" href="${h.url_replace(diffmode='unified')}"> |
|
648 | title="${h.tooltip(_('View unified'))}" href="${h.url_replace(diffmode='unified')}"> | |
650 | <span>${_('Unified')}</span> |
|
649 | <span>${_('Unified')}</span> | |
651 | </a> |
|
650 | </a> | |
652 | </div> |
|
651 | </div> | |
653 | </div> |
|
652 | </div> | |
654 |
|
653 | |||
655 | <div class="pull-left"> |
|
654 | <div class="pull-left"> | |
656 | <div class="btn-group"> |
|
655 | <div class="btn-group"> | |
657 | <a |
|
656 | <a | |
658 | class="btn" |
|
657 | class="btn" | |
659 | href="#" |
|
658 | href="#" | |
660 | onclick="$('input[class=filediff-collapse-state]').prop('checked', false); return false">${_('Expand All Files')}</a> |
|
659 | onclick="$('input[class=filediff-collapse-state]').prop('checked', false); return false">${_('Expand All Files')}</a> | |
661 | <a |
|
660 | <a | |
662 | class="btn" |
|
661 | class="btn" | |
663 | href="#" |
|
662 | href="#" | |
664 | onclick="$('input[class=filediff-collapse-state]').prop('checked', true); return false">${_('Collapse All Files')}</a> |
|
663 | onclick="$('input[class=filediff-collapse-state]').prop('checked', true); return false">${_('Collapse All Files')}</a> | |
665 | <a |
|
664 | <a | |
666 | class="btn" |
|
665 | class="btn" | |
667 | href="#" |
|
666 | href="#" | |
668 | onclick="return Rhodecode.comments.toggleWideMode(this)">${_('Wide Mode Diff')}</a> |
|
667 | onclick="return Rhodecode.comments.toggleWideMode(this)">${_('Wide Mode Diff')}</a> | |
669 | </div> |
|
668 | </div> | |
670 | </div> |
|
669 | </div> | |
671 | </div> |
|
670 | </div> | |
672 | </%def> |
|
671 | </%def> |
General Comments 0
You need to be logged in to leave comments.
Login now