Show More
@@ -1,461 +1,460 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 | # This program is free software: you can redistribute it and/or modify |
|
2 | # This program is free software: you can redistribute it and/or modify | |
3 | # it under the terms of the GNU General Public License as published by |
|
3 | # it under the terms of the GNU General Public License as published by | |
4 | # the Free Software Foundation, either version 3 of the License, or |
|
4 | # the Free Software Foundation, either version 3 of the License, or | |
5 | # (at your option) any later version. |
|
5 | # (at your option) any later version. | |
6 | # |
|
6 | # | |
7 | # This program is distributed in the hope that it will be useful, |
|
7 | # This program is distributed in the hope that it will be useful, | |
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | # GNU General Public License for more details. |
|
10 | # GNU General Public License for more details. | |
11 | # |
|
11 | # | |
12 | # You should have received a copy of the GNU General Public License |
|
12 | # You should have received a copy of the GNU General Public License | |
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
14 | """ |
|
14 | """ | |
15 | kallithea.controllers.changeset |
|
15 | kallithea.controllers.changeset | |
16 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
|
16 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |
17 |
|
17 | |||
18 | changeset controller showing changes between revisions |
|
18 | changeset controller showing changes between revisions | |
19 |
|
19 | |||
20 | This file was forked by the Kallithea project in July 2014. |
|
20 | This file was forked by the Kallithea project in July 2014. | |
21 | Original author and date, and relevant copyright and licensing information is below: |
|
21 | Original author and date, and relevant copyright and licensing information is below: | |
22 | :created_on: Apr 25, 2010 |
|
22 | :created_on: Apr 25, 2010 | |
23 | :author: marcink |
|
23 | :author: marcink | |
24 | :copyright: (c) 2013 RhodeCode GmbH, and others. |
|
24 | :copyright: (c) 2013 RhodeCode GmbH, and others. | |
25 | :license: GPLv3, see LICENSE.md for more details. |
|
25 | :license: GPLv3, see LICENSE.md for more details. | |
26 | """ |
|
26 | """ | |
27 |
|
27 | |||
28 | import logging |
|
28 | import logging | |
29 | import traceback |
|
29 | import traceback | |
30 | from collections import defaultdict |
|
30 | from collections import defaultdict | |
31 |
|
31 | |||
32 | from tg import tmpl_context as c, request, response |
|
32 | from tg import tmpl_context as c, request, response | |
33 | from tg.i18n import ugettext as _ |
|
33 | from tg.i18n import ugettext as _ | |
34 | from webob.exc import HTTPFound, HTTPForbidden, HTTPBadRequest, HTTPNotFound |
|
34 | from webob.exc import HTTPFound, HTTPForbidden, HTTPBadRequest, HTTPNotFound | |
35 |
|
35 | |||
36 | from kallithea.lib.vcs.exceptions import RepositoryError, \ |
|
36 | from kallithea.lib.vcs.exceptions import RepositoryError, \ | |
37 | ChangesetDoesNotExistError, EmptyRepositoryError |
|
37 | ChangesetDoesNotExistError, EmptyRepositoryError | |
38 |
|
38 | |||
39 | import kallithea.lib.helpers as h |
|
39 | import kallithea.lib.helpers as h | |
40 | from kallithea.lib.auth import LoginRequired, HasRepoPermissionLevelDecorator, \ |
|
40 | from kallithea.lib.auth import LoginRequired, HasRepoPermissionLevelDecorator, \ | |
41 | NotAnonymous |
|
41 | NotAnonymous | |
42 | from kallithea.lib.base import BaseRepoController, render, jsonify |
|
42 | from kallithea.lib.base import BaseRepoController, render, jsonify | |
43 | from kallithea.lib.utils import action_logger |
|
43 | from kallithea.lib.utils import action_logger | |
44 | from kallithea.lib.compat import OrderedDict |
|
44 | from kallithea.lib.compat import OrderedDict | |
45 | from kallithea.lib import diffs |
|
45 | from kallithea.lib import diffs | |
46 | from kallithea.model.db import ChangesetComment, ChangesetStatus |
|
46 | from kallithea.model.db import ChangesetComment, ChangesetStatus | |
47 | from kallithea.model.comment import ChangesetCommentsModel |
|
47 | from kallithea.model.comment import ChangesetCommentsModel | |
48 | from kallithea.model.changeset_status import ChangesetStatusModel |
|
48 | from kallithea.model.changeset_status import ChangesetStatusModel | |
49 | from kallithea.model.meta import Session |
|
49 | from kallithea.model.meta import Session | |
50 | from kallithea.model.repo import RepoModel |
|
50 | from kallithea.model.repo import RepoModel | |
51 | from kallithea.lib.diffs import LimitedDiffContainer |
|
51 | from kallithea.lib.diffs import LimitedDiffContainer | |
52 | from kallithea.lib.exceptions import StatusChangeOnClosedPullRequestError |
|
52 | from kallithea.lib.exceptions import StatusChangeOnClosedPullRequestError | |
53 | from kallithea.lib.vcs.backends.base import EmptyChangeset |
|
53 | from kallithea.lib.vcs.backends.base import EmptyChangeset | |
54 | from kallithea.lib.utils2 import safe_unicode |
|
54 | from kallithea.lib.utils2 import safe_unicode | |
55 | from kallithea.lib.graphmod import graph_data |
|
55 | from kallithea.lib.graphmod import graph_data | |
56 |
|
56 | |||
57 | log = logging.getLogger(__name__) |
|
57 | log = logging.getLogger(__name__) | |
58 |
|
58 | |||
59 |
|
59 | |||
60 | def _update_with_GET(params, GET): |
|
60 | def _update_with_GET(params, GET): | |
61 | for k in ['diff1', 'diff2', 'diff']: |
|
61 | for k in ['diff1', 'diff2', 'diff']: | |
62 | params[k] += GET.getall(k) |
|
62 | params[k] += GET.getall(k) | |
63 |
|
63 | |||
64 |
|
64 | |||
65 | def anchor_url(revision, path, GET): |
|
65 | def anchor_url(revision, path, GET): | |
66 | fid = h.FID(revision, path) |
|
66 | fid = h.FID(revision, path) | |
67 | return h.url.current(anchor=fid, **dict(GET)) |
|
67 | return h.url.current(anchor=fid, **dict(GET)) | |
68 |
|
68 | |||
69 |
|
69 | |||
70 | def get_ignore_ws(fid, GET): |
|
70 | def get_ignore_ws(fid, GET): | |
71 | ig_ws_global = GET.get('ignorews') |
|
71 | ig_ws_global = GET.get('ignorews') | |
72 | ig_ws = filter(lambda k: k.startswith('WS'), GET.getall(fid)) |
|
72 | ig_ws = filter(lambda k: k.startswith('WS'), GET.getall(fid)) | |
73 | if ig_ws: |
|
73 | if ig_ws: | |
74 | try: |
|
74 | try: | |
75 | return int(ig_ws[0].split(':')[-1]) |
|
75 | return int(ig_ws[0].split(':')[-1]) | |
76 | except ValueError: |
|
76 | except ValueError: | |
77 | raise HTTPBadRequest() |
|
77 | raise HTTPBadRequest() | |
78 | return ig_ws_global |
|
78 | return ig_ws_global | |
79 |
|
79 | |||
80 |
|
80 | |||
81 | def _ignorews_url(GET, fileid=None): |
|
81 | def _ignorews_url(GET, fileid=None): | |
82 | fileid = str(fileid) if fileid else None |
|
82 | fileid = str(fileid) if fileid else None | |
83 | params = defaultdict(list) |
|
83 | params = defaultdict(list) | |
84 | _update_with_GET(params, GET) |
|
84 | _update_with_GET(params, GET) | |
85 | lbl = _('Show whitespace') |
|
85 | lbl = _('Show whitespace') | |
86 | ig_ws = get_ignore_ws(fileid, GET) |
|
86 | ig_ws = get_ignore_ws(fileid, GET) | |
87 | ln_ctx = get_line_ctx(fileid, GET) |
|
87 | ln_ctx = get_line_ctx(fileid, GET) | |
88 | # global option |
|
88 | # global option | |
89 | if fileid is None: |
|
89 | if fileid is None: | |
90 | if ig_ws is None: |
|
90 | if ig_ws is None: | |
91 | params['ignorews'] += [1] |
|
91 | params['ignorews'] += [1] | |
92 | lbl = _('Ignore whitespace') |
|
92 | lbl = _('Ignore whitespace') | |
93 | ctx_key = 'context' |
|
93 | ctx_key = 'context' | |
94 | ctx_val = ln_ctx |
|
94 | ctx_val = ln_ctx | |
95 | # per file options |
|
95 | # per file options | |
96 | else: |
|
96 | else: | |
97 | if ig_ws is None: |
|
97 | if ig_ws is None: | |
98 | params[fileid] += ['WS:1'] |
|
98 | params[fileid] += ['WS:1'] | |
99 | lbl = _('Ignore whitespace') |
|
99 | lbl = _('Ignore whitespace') | |
100 |
|
100 | |||
101 | ctx_key = fileid |
|
101 | ctx_key = fileid | |
102 | ctx_val = 'C:%s' % ln_ctx |
|
102 | ctx_val = 'C:%s' % ln_ctx | |
103 | # if we have passed in ln_ctx pass it along to our params |
|
103 | # if we have passed in ln_ctx pass it along to our params | |
104 | if ln_ctx: |
|
104 | if ln_ctx: | |
105 | params[ctx_key] += [ctx_val] |
|
105 | params[ctx_key] += [ctx_val] | |
106 |
|
106 | |||
107 | params['anchor'] = fileid |
|
107 | params['anchor'] = fileid | |
108 | icon = h.literal('<i class="icon-strike"></i>') |
|
108 | icon = h.literal('<i class="icon-strike"></i>') | |
109 | return h.link_to(icon, h.url.current(**params), title=lbl, **{'data-toggle': 'tooltip'}) |
|
109 | return h.link_to(icon, h.url.current(**params), title=lbl, **{'data-toggle': 'tooltip'}) | |
110 |
|
110 | |||
111 |
|
111 | |||
112 | def get_line_ctx(fid, GET): |
|
112 | def get_line_ctx(fid, GET): | |
113 | ln_ctx_global = GET.get('context') |
|
113 | ln_ctx_global = GET.get('context') | |
114 | if fid: |
|
114 | if fid: | |
115 | ln_ctx = filter(lambda k: k.startswith('C'), GET.getall(fid)) |
|
115 | ln_ctx = filter(lambda k: k.startswith('C'), GET.getall(fid)) | |
116 | else: |
|
116 | else: | |
117 | _ln_ctx = filter(lambda k: k.startswith('C'), GET) |
|
117 | _ln_ctx = filter(lambda k: k.startswith('C'), GET) | |
118 | ln_ctx = GET.get(_ln_ctx[0]) if _ln_ctx else ln_ctx_global |
|
118 | ln_ctx = GET.get(_ln_ctx[0]) if _ln_ctx else ln_ctx_global | |
119 | if ln_ctx: |
|
119 | if ln_ctx: | |
120 | ln_ctx = [ln_ctx] |
|
120 | ln_ctx = [ln_ctx] | |
121 |
|
121 | |||
122 | if ln_ctx: |
|
122 | if ln_ctx: | |
123 | retval = ln_ctx[0].split(':')[-1] |
|
123 | retval = ln_ctx[0].split(':')[-1] | |
124 | else: |
|
124 | else: | |
125 | retval = ln_ctx_global |
|
125 | retval = ln_ctx_global | |
126 |
|
126 | |||
127 | try: |
|
127 | try: | |
128 | return int(retval) |
|
128 | return int(retval) | |
129 | except Exception: |
|
129 | except Exception: | |
130 | return 3 |
|
130 | return 3 | |
131 |
|
131 | |||
132 |
|
132 | |||
133 | def _context_url(GET, fileid=None): |
|
133 | def _context_url(GET, fileid=None): | |
134 | """ |
|
134 | """ | |
135 | Generates url for context lines |
|
135 | Generates url for context lines | |
136 |
|
136 | |||
137 | :param fileid: |
|
137 | :param fileid: | |
138 | """ |
|
138 | """ | |
139 |
|
139 | |||
140 | fileid = str(fileid) if fileid else None |
|
140 | fileid = str(fileid) if fileid else None | |
141 | ig_ws = get_ignore_ws(fileid, GET) |
|
141 | ig_ws = get_ignore_ws(fileid, GET) | |
142 | ln_ctx = (get_line_ctx(fileid, GET) or 3) * 2 |
|
142 | ln_ctx = (get_line_ctx(fileid, GET) or 3) * 2 | |
143 |
|
143 | |||
144 | params = defaultdict(list) |
|
144 | params = defaultdict(list) | |
145 | _update_with_GET(params, GET) |
|
145 | _update_with_GET(params, GET) | |
146 |
|
146 | |||
147 | # global option |
|
147 | # global option | |
148 | if fileid is None: |
|
148 | if fileid is None: | |
149 | if ln_ctx > 0: |
|
149 | if ln_ctx > 0: | |
150 | params['context'] += [ln_ctx] |
|
150 | params['context'] += [ln_ctx] | |
151 |
|
151 | |||
152 | if ig_ws: |
|
152 | if ig_ws: | |
153 | ig_ws_key = 'ignorews' |
|
153 | ig_ws_key = 'ignorews' | |
154 | ig_ws_val = 1 |
|
154 | ig_ws_val = 1 | |
155 |
|
155 | |||
156 | # per file option |
|
156 | # per file option | |
157 | else: |
|
157 | else: | |
158 | params[fileid] += ['C:%s' % ln_ctx] |
|
158 | params[fileid] += ['C:%s' % ln_ctx] | |
159 | ig_ws_key = fileid |
|
159 | ig_ws_key = fileid | |
160 | ig_ws_val = 'WS:%s' % 1 |
|
160 | ig_ws_val = 'WS:%s' % 1 | |
161 |
|
161 | |||
162 | if ig_ws: |
|
162 | if ig_ws: | |
163 | params[ig_ws_key] += [ig_ws_val] |
|
163 | params[ig_ws_key] += [ig_ws_val] | |
164 |
|
164 | |||
165 | lbl = _('Increase diff context to %(num)s lines') % {'num': ln_ctx} |
|
165 | lbl = _('Increase diff context to %(num)s lines') % {'num': ln_ctx} | |
166 |
|
166 | |||
167 | params['anchor'] = fileid |
|
167 | params['anchor'] = fileid | |
168 | icon = h.literal('<i class="icon-sort"></i>') |
|
168 | icon = h.literal('<i class="icon-sort"></i>') | |
169 | return h.link_to(icon, h.url.current(**params), title=lbl, **{'data-toggle': 'tooltip'}) |
|
169 | return h.link_to(icon, h.url.current(**params), title=lbl, **{'data-toggle': 'tooltip'}) | |
170 |
|
170 | |||
171 |
|
171 | |||
172 | # Could perhaps be nice to have in the model but is too high level ... |
|
172 | # Could perhaps be nice to have in the model but is too high level ... | |
173 | def create_comment(text, status, f_path, line_no, revision=None, pull_request_id=None, closing_pr=None): |
|
173 | def create_comment(text, status, f_path, line_no, revision=None, pull_request_id=None, closing_pr=None): | |
174 | """Comment functionality shared between changesets and pullrequests""" |
|
174 | """Comment functionality shared between changesets and pullrequests""" | |
175 | f_path = f_path or None |
|
175 | f_path = f_path or None | |
176 | line_no = line_no or None |
|
176 | line_no = line_no or None | |
177 |
|
177 | |||
178 | comment = ChangesetCommentsModel().create( |
|
178 | comment = ChangesetCommentsModel().create( | |
179 | text=text, |
|
179 | text=text, | |
180 | repo=c.db_repo.repo_id, |
|
180 | repo=c.db_repo.repo_id, | |
181 | author=request.authuser.user_id, |
|
181 | author=request.authuser.user_id, | |
182 | revision=revision, |
|
182 | revision=revision, | |
183 | pull_request=pull_request_id, |
|
183 | pull_request=pull_request_id, | |
184 | f_path=f_path, |
|
184 | f_path=f_path, | |
185 | line_no=line_no, |
|
185 | line_no=line_no, | |
186 | status_change=ChangesetStatus.get_status_lbl(status) if status else None, |
|
186 | status_change=ChangesetStatus.get_status_lbl(status) if status else None, | |
187 | closing_pr=closing_pr, |
|
187 | closing_pr=closing_pr, | |
188 | ) |
|
188 | ) | |
189 |
|
189 | |||
190 | return comment |
|
190 | return comment | |
191 |
|
191 | |||
192 |
|
192 | |||
193 | class ChangesetController(BaseRepoController): |
|
193 | class ChangesetController(BaseRepoController): | |
194 |
|
194 | |||
195 | def _before(self, *args, **kwargs): |
|
195 | def _before(self, *args, **kwargs): | |
196 | super(ChangesetController, self)._before(*args, **kwargs) |
|
196 | super(ChangesetController, self)._before(*args, **kwargs) | |
197 | c.affected_files_cut_off = 60 |
|
197 | c.affected_files_cut_off = 60 | |
198 |
|
198 | |||
199 | def __load_data(self): |
|
199 | def __load_data(self): | |
200 | repo_model = RepoModel() |
|
200 | repo_model = RepoModel() | |
201 | c.users_array = repo_model.get_users_js() |
|
201 | c.users_array = repo_model.get_users_js() | |
202 | c.user_groups_array = repo_model.get_user_groups_js() |
|
202 | c.user_groups_array = repo_model.get_user_groups_js() | |
203 |
|
203 | |||
204 | def _index(self, revision, method): |
|
204 | def _index(self, revision, method): | |
205 | c.pull_request = None |
|
205 | c.pull_request = None | |
206 | c.anchor_url = anchor_url |
|
206 | c.anchor_url = anchor_url | |
207 | c.ignorews_url = _ignorews_url |
|
207 | c.ignorews_url = _ignorews_url | |
208 | c.context_url = _context_url |
|
208 | c.context_url = _context_url | |
209 | c.fulldiff = request.GET.get('fulldiff') # for reporting number of changed files |
|
209 | c.fulldiff = request.GET.get('fulldiff') # for reporting number of changed files | |
210 | # get ranges of revisions if preset |
|
210 | # get ranges of revisions if preset | |
211 | rev_range = revision.split('...')[:2] |
|
211 | rev_range = revision.split('...')[:2] | |
212 | enable_comments = True |
|
212 | enable_comments = True | |
213 | c.cs_repo = c.db_repo |
|
213 | c.cs_repo = c.db_repo | |
214 | try: |
|
214 | try: | |
215 | if len(rev_range) == 2: |
|
215 | if len(rev_range) == 2: | |
216 | enable_comments = False |
|
216 | enable_comments = False | |
217 | rev_start = rev_range[0] |
|
217 | rev_start = rev_range[0] | |
218 | rev_end = rev_range[1] |
|
218 | rev_end = rev_range[1] | |
219 | rev_ranges = c.db_repo_scm_instance.get_changesets(start=rev_start, |
|
219 | rev_ranges = c.db_repo_scm_instance.get_changesets(start=rev_start, | |
220 | end=rev_end) |
|
220 | end=rev_end) | |
221 | else: |
|
221 | else: | |
222 | rev_ranges = [c.db_repo_scm_instance.get_changeset(revision)] |
|
222 | rev_ranges = [c.db_repo_scm_instance.get_changeset(revision)] | |
223 |
|
223 | |||
224 | c.cs_ranges = list(rev_ranges) |
|
224 | c.cs_ranges = list(rev_ranges) | |
225 | if not c.cs_ranges: |
|
225 | if not c.cs_ranges: | |
226 | raise RepositoryError('Changeset range returned empty result') |
|
226 | raise RepositoryError('Changeset range returned empty result') | |
227 |
|
227 | |||
228 | except (ChangesetDoesNotExistError, EmptyRepositoryError): |
|
228 | except (ChangesetDoesNotExistError, EmptyRepositoryError): | |
229 | log.debug(traceback.format_exc()) |
|
229 | log.debug(traceback.format_exc()) | |
230 | msg = _('Such revision does not exist for this repository') |
|
230 | msg = _('Such revision does not exist for this repository') | |
231 | h.flash(msg, category='error') |
|
231 | h.flash(msg, category='error') | |
232 | raise HTTPNotFound() |
|
232 | raise HTTPNotFound() | |
233 |
|
233 | |||
234 | c.changes = OrderedDict() |
|
234 | c.changes = OrderedDict() | |
235 |
|
235 | |||
236 | c.lines_added = 0 # count of lines added |
|
236 | c.lines_added = 0 # count of lines added | |
237 | c.lines_deleted = 0 # count of lines removes |
|
237 | c.lines_deleted = 0 # count of lines removes | |
238 |
|
238 | |||
239 | c.changeset_statuses = ChangesetStatus.STATUSES |
|
239 | c.changeset_statuses = ChangesetStatus.STATUSES | |
240 | comments = dict() |
|
240 | comments = dict() | |
241 | c.statuses = [] |
|
241 | c.statuses = [] | |
242 | c.inline_comments = [] |
|
242 | c.inline_comments = [] | |
243 | c.inline_cnt = 0 |
|
243 | c.inline_cnt = 0 | |
244 |
|
244 | |||
245 | # Iterate over ranges (default changeset view is always one changeset) |
|
245 | # Iterate over ranges (default changeset view is always one changeset) | |
246 | for changeset in c.cs_ranges: |
|
246 | for changeset in c.cs_ranges: | |
247 | if method == 'show': |
|
247 | if method == 'show': | |
248 | c.statuses.extend([ChangesetStatusModel().get_status( |
|
248 | c.statuses.extend([ChangesetStatusModel().get_status( | |
249 | c.db_repo.repo_id, changeset.raw_id)]) |
|
249 | c.db_repo.repo_id, changeset.raw_id)]) | |
250 |
|
250 | |||
251 | # Changeset comments |
|
251 | # Changeset comments | |
252 | comments.update((com.comment_id, com) |
|
252 | comments.update((com.comment_id, com) | |
253 | for com in ChangesetCommentsModel() |
|
253 | for com in ChangesetCommentsModel() | |
254 | .get_comments(c.db_repo.repo_id, |
|
254 | .get_comments(c.db_repo.repo_id, | |
255 | revision=changeset.raw_id)) |
|
255 | revision=changeset.raw_id)) | |
256 |
|
256 | |||
257 | # Status change comments - mostly from pull requests |
|
257 | # Status change comments - mostly from pull requests | |
258 | comments.update((st.comment_id, st.comment) |
|
258 | comments.update((st.comment_id, st.comment) | |
259 | for st in ChangesetStatusModel() |
|
259 | for st in ChangesetStatusModel() | |
260 | .get_statuses(c.db_repo.repo_id, |
|
260 | .get_statuses(c.db_repo.repo_id, | |
261 | changeset.raw_id, with_revisions=True) |
|
261 | changeset.raw_id, with_revisions=True) | |
262 | if st.comment_id is not None) |
|
262 | if st.comment_id is not None) | |
263 |
|
263 | |||
264 | inlines = ChangesetCommentsModel() \ |
|
264 | inlines = ChangesetCommentsModel() \ | |
265 | .get_inline_comments(c.db_repo.repo_id, |
|
265 | .get_inline_comments(c.db_repo.repo_id, | |
266 | revision=changeset.raw_id) |
|
266 | revision=changeset.raw_id) | |
267 | c.inline_comments.extend(inlines) |
|
267 | c.inline_comments.extend(inlines) | |
268 |
|
268 | |||
269 | cs2 = changeset.raw_id |
|
269 | cs2 = changeset.raw_id | |
270 | cs1 = changeset.parents[0].raw_id if changeset.parents else EmptyChangeset().raw_id |
|
270 | cs1 = changeset.parents[0].raw_id if changeset.parents else EmptyChangeset().raw_id | |
271 | context_lcl = get_line_ctx('', request.GET) |
|
271 | context_lcl = get_line_ctx('', request.GET) | |
272 | ign_whitespace_lcl = get_ignore_ws('', request.GET) |
|
272 | ign_whitespace_lcl = get_ignore_ws('', request.GET) | |
273 |
|
273 | |||
274 | _diff = c.db_repo_scm_instance.get_diff(cs1, cs2, |
|
274 | raw_diff = c.db_repo_scm_instance.get_diff(cs1, cs2, | |
275 | ignore_whitespace=ign_whitespace_lcl, context=context_lcl) |
|
275 | ignore_whitespace=ign_whitespace_lcl, context=context_lcl) | |
276 | diff_limit = None if c.fulldiff else self.cut_off_limit |
|
276 | diff_limit = None if c.fulldiff else self.cut_off_limit | |
277 | diff_processor = diffs.DiffProcessor(_diff, |
|
|||
278 | vcs=c.db_repo_scm_instance.alias, |
|
|||
279 | diff_limit=diff_limit) |
|
|||
280 | file_diff_data = [] |
|
277 | file_diff_data = [] | |
281 | if method == 'show': |
|
278 | if method == 'show': | |
|
279 | diff_processor = diffs.DiffProcessor(raw_diff, | |||
|
280 | vcs=c.db_repo_scm_instance.alias, | |||
|
281 | diff_limit=diff_limit) | |||
282 | _parsed = diff_processor.prepare() |
|
282 | _parsed = diff_processor.prepare() | |
283 | c.limited_diff = False |
|
283 | c.limited_diff = False | |
284 | if isinstance(_parsed, LimitedDiffContainer): |
|
284 | if isinstance(_parsed, LimitedDiffContainer): | |
285 | c.limited_diff = True |
|
285 | c.limited_diff = True | |
286 | for f in _parsed: |
|
286 | for f in _parsed: | |
287 | st = f['stats'] |
|
287 | st = f['stats'] | |
288 | c.lines_added += st['added'] |
|
288 | c.lines_added += st['added'] | |
289 | c.lines_deleted += st['deleted'] |
|
289 | c.lines_deleted += st['deleted'] | |
290 | filename = f['filename'] |
|
290 | filename = f['filename'] | |
291 | fid = h.FID(changeset.raw_id, filename) |
|
291 | fid = h.FID(changeset.raw_id, filename) | |
292 | url_fid = h.FID('', filename) |
|
292 | url_fid = h.FID('', filename) | |
293 | diff = diff_processor.as_html(enable_comments=enable_comments, |
|
293 | diff = diff_processor.as_html(enable_comments=enable_comments, | |
294 | parsed_lines=[f]) |
|
294 | parsed_lines=[f]) | |
295 | file_diff_data.append((fid, url_fid, f['operation'], f['old_filename'], filename, diff, st)) |
|
295 | file_diff_data.append((fid, url_fid, f['operation'], f['old_filename'], filename, diff, st)) | |
296 | else: |
|
296 | else: | |
297 | # downloads/raw we only need RAW diff nothing else |
|
297 | # downloads/raw we only need RAW diff nothing else | |
298 | diff = diff_processor.as_raw() |
|
298 | file_diff_data.append(('', None, None, None, raw_diff, None)) | |
299 | file_diff_data.append(('', None, None, None, diff, None)) |
|
|||
300 | c.changes[changeset.raw_id] = (cs1, cs2, file_diff_data) |
|
299 | c.changes[changeset.raw_id] = (cs1, cs2, file_diff_data) | |
301 |
|
300 | |||
302 | # sort comments in creation order |
|
301 | # sort comments in creation order | |
303 | c.comments = [com for com_id, com in sorted(comments.items())] |
|
302 | c.comments = [com for com_id, com in sorted(comments.items())] | |
304 |
|
303 | |||
305 | # count inline comments |
|
304 | # count inline comments | |
306 | for __, lines in c.inline_comments: |
|
305 | for __, lines in c.inline_comments: | |
307 | for comments in lines.values(): |
|
306 | for comments in lines.values(): | |
308 | c.inline_cnt += len(comments) |
|
307 | c.inline_cnt += len(comments) | |
309 |
|
308 | |||
310 | if len(c.cs_ranges) == 1: |
|
309 | if len(c.cs_ranges) == 1: | |
311 | c.changeset = c.cs_ranges[0] |
|
310 | c.changeset = c.cs_ranges[0] | |
312 | c.parent_tmpl = ''.join(['# Parent %s\n' % x.raw_id |
|
311 | c.parent_tmpl = ''.join(['# Parent %s\n' % x.raw_id | |
313 | for x in c.changeset.parents]) |
|
312 | for x in c.changeset.parents]) | |
314 | if method == 'download': |
|
313 | if method == 'download': | |
315 | response.content_type = 'text/plain' |
|
314 | response.content_type = 'text/plain' | |
316 | response.content_disposition = 'attachment; filename=%s.diff' \ |
|
315 | response.content_disposition = 'attachment; filename=%s.diff' \ | |
317 | % revision[:12] |
|
316 | % revision[:12] | |
318 | return diff |
|
317 | return raw_diff | |
319 | elif method == 'patch': |
|
318 | elif method == 'patch': | |
320 | response.content_type = 'text/plain' |
|
319 | response.content_type = 'text/plain' | |
321 | c.diff = safe_unicode(diff) |
|
320 | c.diff = safe_unicode(raw_diff) | |
322 | return render('changeset/patch_changeset.html') |
|
321 | return render('changeset/patch_changeset.html') | |
323 | elif method == 'raw': |
|
322 | elif method == 'raw': | |
324 | response.content_type = 'text/plain' |
|
323 | response.content_type = 'text/plain' | |
325 | return diff |
|
324 | return raw_diff | |
326 | elif method == 'show': |
|
325 | elif method == 'show': | |
327 | self.__load_data() |
|
326 | self.__load_data() | |
328 | if len(c.cs_ranges) == 1: |
|
327 | if len(c.cs_ranges) == 1: | |
329 | return render('changeset/changeset.html') |
|
328 | return render('changeset/changeset.html') | |
330 | else: |
|
329 | else: | |
331 | c.cs_ranges_org = None |
|
330 | c.cs_ranges_org = None | |
332 | c.cs_comments = {} |
|
331 | c.cs_comments = {} | |
333 | revs = [ctx.revision for ctx in reversed(c.cs_ranges)] |
|
332 | revs = [ctx.revision for ctx in reversed(c.cs_ranges)] | |
334 | c.jsdata = graph_data(c.db_repo_scm_instance, revs) |
|
333 | c.jsdata = graph_data(c.db_repo_scm_instance, revs) | |
335 | return render('changeset/changeset_range.html') |
|
334 | return render('changeset/changeset_range.html') | |
336 |
|
335 | |||
337 | @LoginRequired() |
|
336 | @LoginRequired() | |
338 | @HasRepoPermissionLevelDecorator('read') |
|
337 | @HasRepoPermissionLevelDecorator('read') | |
339 | def index(self, revision, method='show'): |
|
338 | def index(self, revision, method='show'): | |
340 | return self._index(revision, method=method) |
|
339 | return self._index(revision, method=method) | |
341 |
|
340 | |||
342 | @LoginRequired() |
|
341 | @LoginRequired() | |
343 | @HasRepoPermissionLevelDecorator('read') |
|
342 | @HasRepoPermissionLevelDecorator('read') | |
344 | def changeset_raw(self, revision): |
|
343 | def changeset_raw(self, revision): | |
345 | return self._index(revision, method='raw') |
|
344 | return self._index(revision, method='raw') | |
346 |
|
345 | |||
347 | @LoginRequired() |
|
346 | @LoginRequired() | |
348 | @HasRepoPermissionLevelDecorator('read') |
|
347 | @HasRepoPermissionLevelDecorator('read') | |
349 | def changeset_patch(self, revision): |
|
348 | def changeset_patch(self, revision): | |
350 | return self._index(revision, method='patch') |
|
349 | return self._index(revision, method='patch') | |
351 |
|
350 | |||
352 | @LoginRequired() |
|
351 | @LoginRequired() | |
353 | @HasRepoPermissionLevelDecorator('read') |
|
352 | @HasRepoPermissionLevelDecorator('read') | |
354 | def changeset_download(self, revision): |
|
353 | def changeset_download(self, revision): | |
355 | return self._index(revision, method='download') |
|
354 | return self._index(revision, method='download') | |
356 |
|
355 | |||
357 | @LoginRequired() |
|
356 | @LoginRequired() | |
358 | @NotAnonymous() |
|
357 | @NotAnonymous() | |
359 | @HasRepoPermissionLevelDecorator('read') |
|
358 | @HasRepoPermissionLevelDecorator('read') | |
360 | @jsonify |
|
359 | @jsonify | |
361 | def comment(self, repo_name, revision): |
|
360 | def comment(self, repo_name, revision): | |
362 | assert request.environ.get('HTTP_X_PARTIAL_XHR') |
|
361 | assert request.environ.get('HTTP_X_PARTIAL_XHR') | |
363 |
|
362 | |||
364 | status = request.POST.get('changeset_status') |
|
363 | status = request.POST.get('changeset_status') | |
365 | text = request.POST.get('text', '').strip() |
|
364 | text = request.POST.get('text', '').strip() | |
366 |
|
365 | |||
367 | c.comment = create_comment( |
|
366 | c.comment = create_comment( | |
368 | text, |
|
367 | text, | |
369 | status, |
|
368 | status, | |
370 | revision=revision, |
|
369 | revision=revision, | |
371 | f_path=request.POST.get('f_path'), |
|
370 | f_path=request.POST.get('f_path'), | |
372 | line_no=request.POST.get('line'), |
|
371 | line_no=request.POST.get('line'), | |
373 | ) |
|
372 | ) | |
374 |
|
373 | |||
375 | # get status if set ! |
|
374 | # get status if set ! | |
376 | if status: |
|
375 | if status: | |
377 | # if latest status was from pull request and it's closed |
|
376 | # if latest status was from pull request and it's closed | |
378 | # disallow changing status ! RLY? |
|
377 | # disallow changing status ! RLY? | |
379 | try: |
|
378 | try: | |
380 | ChangesetStatusModel().set_status( |
|
379 | ChangesetStatusModel().set_status( | |
381 | c.db_repo.repo_id, |
|
380 | c.db_repo.repo_id, | |
382 | status, |
|
381 | status, | |
383 | request.authuser.user_id, |
|
382 | request.authuser.user_id, | |
384 | c.comment, |
|
383 | c.comment, | |
385 | revision=revision, |
|
384 | revision=revision, | |
386 | dont_allow_on_closed_pull_request=True, |
|
385 | dont_allow_on_closed_pull_request=True, | |
387 | ) |
|
386 | ) | |
388 | except StatusChangeOnClosedPullRequestError: |
|
387 | except StatusChangeOnClosedPullRequestError: | |
389 | log.debug('cannot change status on %s with closed pull request', revision) |
|
388 | log.debug('cannot change status on %s with closed pull request', revision) | |
390 | raise HTTPBadRequest() |
|
389 | raise HTTPBadRequest() | |
391 |
|
390 | |||
392 | action_logger(request.authuser, |
|
391 | action_logger(request.authuser, | |
393 | 'user_commented_revision:%s' % revision, |
|
392 | 'user_commented_revision:%s' % revision, | |
394 | c.db_repo, request.ip_addr) |
|
393 | c.db_repo, request.ip_addr) | |
395 |
|
394 | |||
396 | Session().commit() |
|
395 | Session().commit() | |
397 |
|
396 | |||
398 | data = { |
|
397 | data = { | |
399 | 'target_id': h.safeid(h.safe_unicode(request.POST.get('f_path'))), |
|
398 | 'target_id': h.safeid(h.safe_unicode(request.POST.get('f_path'))), | |
400 | } |
|
399 | } | |
401 | if c.comment is not None: |
|
400 | if c.comment is not None: | |
402 | data.update(c.comment.get_dict()) |
|
401 | data.update(c.comment.get_dict()) | |
403 | data.update({'rendered_text': |
|
402 | data.update({'rendered_text': | |
404 | render('changeset/changeset_comment_block.html')}) |
|
403 | render('changeset/changeset_comment_block.html')}) | |
405 |
|
404 | |||
406 | return data |
|
405 | return data | |
407 |
|
406 | |||
408 | @LoginRequired() |
|
407 | @LoginRequired() | |
409 | @NotAnonymous() |
|
408 | @NotAnonymous() | |
410 | @HasRepoPermissionLevelDecorator('read') |
|
409 | @HasRepoPermissionLevelDecorator('read') | |
411 | @jsonify |
|
410 | @jsonify | |
412 | def delete_comment(self, repo_name, comment_id): |
|
411 | def delete_comment(self, repo_name, comment_id): | |
413 | co = ChangesetComment.get_or_404(comment_id) |
|
412 | co = ChangesetComment.get_or_404(comment_id) | |
414 | if co.repo.repo_name != repo_name: |
|
413 | if co.repo.repo_name != repo_name: | |
415 | raise HTTPNotFound() |
|
414 | raise HTTPNotFound() | |
416 | owner = co.author_id == request.authuser.user_id |
|
415 | owner = co.author_id == request.authuser.user_id | |
417 | repo_admin = h.HasRepoPermissionLevel('admin')(repo_name) |
|
416 | repo_admin = h.HasRepoPermissionLevel('admin')(repo_name) | |
418 | if h.HasPermissionAny('hg.admin')() or repo_admin or owner: |
|
417 | if h.HasPermissionAny('hg.admin')() or repo_admin or owner: | |
419 | ChangesetCommentsModel().delete(comment=co) |
|
418 | ChangesetCommentsModel().delete(comment=co) | |
420 | Session().commit() |
|
419 | Session().commit() | |
421 | return True |
|
420 | return True | |
422 | else: |
|
421 | else: | |
423 | raise HTTPForbidden() |
|
422 | raise HTTPForbidden() | |
424 |
|
423 | |||
425 | @LoginRequired() |
|
424 | @LoginRequired() | |
426 | @HasRepoPermissionLevelDecorator('read') |
|
425 | @HasRepoPermissionLevelDecorator('read') | |
427 | @jsonify |
|
426 | @jsonify | |
428 | def changeset_info(self, repo_name, revision): |
|
427 | def changeset_info(self, repo_name, revision): | |
429 | if request.is_xhr: |
|
428 | if request.is_xhr: | |
430 | try: |
|
429 | try: | |
431 | return c.db_repo_scm_instance.get_changeset(revision) |
|
430 | return c.db_repo_scm_instance.get_changeset(revision) | |
432 | except ChangesetDoesNotExistError as e: |
|
431 | except ChangesetDoesNotExistError as e: | |
433 | return EmptyChangeset(message=str(e)) |
|
432 | return EmptyChangeset(message=str(e)) | |
434 | else: |
|
433 | else: | |
435 | raise HTTPBadRequest() |
|
434 | raise HTTPBadRequest() | |
436 |
|
435 | |||
437 | @LoginRequired() |
|
436 | @LoginRequired() | |
438 | @HasRepoPermissionLevelDecorator('read') |
|
437 | @HasRepoPermissionLevelDecorator('read') | |
439 | @jsonify |
|
438 | @jsonify | |
440 | def changeset_children(self, repo_name, revision): |
|
439 | def changeset_children(self, repo_name, revision): | |
441 | if request.is_xhr: |
|
440 | if request.is_xhr: | |
442 | changeset = c.db_repo_scm_instance.get_changeset(revision) |
|
441 | changeset = c.db_repo_scm_instance.get_changeset(revision) | |
443 | result = {"results": []} |
|
442 | result = {"results": []} | |
444 | if changeset.children: |
|
443 | if changeset.children: | |
445 | result = {"results": changeset.children} |
|
444 | result = {"results": changeset.children} | |
446 | return result |
|
445 | return result | |
447 | else: |
|
446 | else: | |
448 | raise HTTPBadRequest() |
|
447 | raise HTTPBadRequest() | |
449 |
|
448 | |||
450 | @LoginRequired() |
|
449 | @LoginRequired() | |
451 | @HasRepoPermissionLevelDecorator('read') |
|
450 | @HasRepoPermissionLevelDecorator('read') | |
452 | @jsonify |
|
451 | @jsonify | |
453 | def changeset_parents(self, repo_name, revision): |
|
452 | def changeset_parents(self, repo_name, revision): | |
454 | if request.is_xhr: |
|
453 | if request.is_xhr: | |
455 | changeset = c.db_repo_scm_instance.get_changeset(revision) |
|
454 | changeset = c.db_repo_scm_instance.get_changeset(revision) | |
456 | result = {"results": []} |
|
455 | result = {"results": []} | |
457 | if changeset.parents: |
|
456 | if changeset.parents: | |
458 | result = {"results": changeset.parents} |
|
457 | result = {"results": changeset.parents} | |
459 | return result |
|
458 | return result | |
460 | else: |
|
459 | else: | |
461 | raise HTTPBadRequest() |
|
460 | raise HTTPBadRequest() |
@@ -1,171 +1,170 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 | # This program is free software: you can redistribute it and/or modify |
|
2 | # This program is free software: you can redistribute it and/or modify | |
3 | # it under the terms of the GNU General Public License as published by |
|
3 | # it under the terms of the GNU General Public License as published by | |
4 | # the Free Software Foundation, either version 3 of the License, or |
|
4 | # the Free Software Foundation, either version 3 of the License, or | |
5 | # (at your option) any later version. |
|
5 | # (at your option) any later version. | |
6 | # |
|
6 | # | |
7 | # This program is distributed in the hope that it will be useful, |
|
7 | # This program is distributed in the hope that it will be useful, | |
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | # GNU General Public License for more details. |
|
10 | # GNU General Public License for more details. | |
11 | # |
|
11 | # | |
12 | # You should have received a copy of the GNU General Public License |
|
12 | # You should have received a copy of the GNU General Public License | |
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
14 | """ |
|
14 | """ | |
15 | kallithea.controllers.feed |
|
15 | kallithea.controllers.feed | |
16 | ~~~~~~~~~~~~~~~~~~~~~~~~~~ |
|
16 | ~~~~~~~~~~~~~~~~~~~~~~~~~~ | |
17 |
|
17 | |||
18 | Feed controller for Kallithea |
|
18 | Feed controller for Kallithea | |
19 |
|
19 | |||
20 | This file was forked by the Kallithea project in July 2014. |
|
20 | This file was forked by the Kallithea project in July 2014. | |
21 | Original author and date, and relevant copyright and licensing information is below: |
|
21 | Original author and date, and relevant copyright and licensing information is below: | |
22 | :created_on: Apr 23, 2010 |
|
22 | :created_on: Apr 23, 2010 | |
23 | :author: marcink |
|
23 | :author: marcink | |
24 | :copyright: (c) 2013 RhodeCode GmbH, and others. |
|
24 | :copyright: (c) 2013 RhodeCode GmbH, and others. | |
25 | :license: GPLv3, see LICENSE.md for more details. |
|
25 | :license: GPLv3, see LICENSE.md for more details. | |
26 | """ |
|
26 | """ | |
27 |
|
27 | |||
28 |
|
28 | |||
29 | import logging |
|
29 | import logging | |
30 |
|
30 | |||
31 | from tg import response, tmpl_context as c |
|
31 | from tg import response, tmpl_context as c | |
32 | from tg.i18n import ugettext as _ |
|
32 | from tg.i18n import ugettext as _ | |
33 |
|
33 | |||
34 | from beaker.cache import cache_region, region_invalidate |
|
34 | from beaker.cache import cache_region, region_invalidate | |
35 | from webhelpers.feedgenerator import Atom1Feed, Rss201rev2Feed |
|
35 | from webhelpers.feedgenerator import Atom1Feed, Rss201rev2Feed | |
36 |
|
36 | |||
37 | from kallithea import CONFIG |
|
37 | from kallithea import CONFIG | |
38 | from kallithea.lib import helpers as h |
|
38 | from kallithea.lib import helpers as h | |
39 | from kallithea.lib.auth import LoginRequired, HasRepoPermissionLevelDecorator |
|
39 | from kallithea.lib.auth import LoginRequired, HasRepoPermissionLevelDecorator | |
40 | from kallithea.lib.base import BaseRepoController |
|
40 | from kallithea.lib.base import BaseRepoController | |
41 | from kallithea.lib.diffs import DiffProcessor, LimitedDiffContainer |
|
41 | from kallithea.lib.diffs import DiffProcessor, LimitedDiffContainer | |
42 | from kallithea.model.db import CacheInvalidation |
|
42 | from kallithea.model.db import CacheInvalidation | |
43 | from kallithea.lib.utils2 import safe_int, str2bool, safe_unicode |
|
43 | from kallithea.lib.utils2 import safe_int, str2bool, safe_unicode | |
44 |
|
44 | |||
45 | log = logging.getLogger(__name__) |
|
45 | log = logging.getLogger(__name__) | |
46 |
|
46 | |||
47 |
|
47 | |||
48 | language = 'en-us' |
|
48 | language = 'en-us' | |
49 | ttl = "5" |
|
49 | ttl = "5" | |
50 |
|
50 | |||
51 |
|
51 | |||
52 | class FeedController(BaseRepoController): |
|
52 | class FeedController(BaseRepoController): | |
53 |
|
53 | |||
54 | @LoginRequired(api_access=True) |
|
54 | @LoginRequired(api_access=True) | |
55 | @HasRepoPermissionLevelDecorator('read') |
|
55 | @HasRepoPermissionLevelDecorator('read') | |
56 | def _before(self, *args, **kwargs): |
|
56 | def _before(self, *args, **kwargs): | |
57 | super(FeedController, self)._before(*args, **kwargs) |
|
57 | super(FeedController, self)._before(*args, **kwargs) | |
58 |
|
58 | |||
59 | def _get_title(self, cs): |
|
59 | def _get_title(self, cs): | |
60 | return h.shorter(cs.message, 160) |
|
60 | return h.shorter(cs.message, 160) | |
61 |
|
61 | |||
62 |
def __ |
|
62 | def __get_desc(self, cs): | |
|
63 | desc_msg = [(_('%s committed on %s') | |||
|
64 | % (h.person(cs.author), h.fmt_date(cs.date))) + '<br/>'] | |||
|
65 | # branches, tags, bookmarks | |||
|
66 | if cs.branch: | |||
|
67 | desc_msg.append('branch: %s<br/>' % cs.branch) | |||
|
68 | for book in cs.bookmarks: | |||
|
69 | desc_msg.append('bookmark: %s<br/>' % book) | |||
|
70 | for tag in cs.tags: | |||
|
71 | desc_msg.append('tag: %s<br/>' % tag) | |||
|
72 | ||||
63 | changes = [] |
|
73 | changes = [] | |
64 | diff_limit = safe_int(CONFIG.get('rss_cut_off_limit', 32 * 1024)) |
|
74 | diff_limit = safe_int(CONFIG.get('rss_cut_off_limit', 32 * 1024)) | |
65 |
diff |
|
75 | raw_diff = cs.diff() | |
|
76 | diff_processor = DiffProcessor(raw_diff, | |||
66 | diff_limit=diff_limit) |
|
77 | diff_limit=diff_limit) | |
67 | _parsed = diff_processor.prepare(inline_diff=False) |
|
78 | _parsed = diff_processor.prepare(inline_diff=False) | |
68 | limited_diff = False |
|
79 | limited_diff = False | |
69 | if isinstance(_parsed, LimitedDiffContainer): |
|
80 | if isinstance(_parsed, LimitedDiffContainer): | |
70 | limited_diff = True |
|
81 | limited_diff = True | |
71 |
|
82 | |||
72 | for st in _parsed: |
|
83 | for st in _parsed: | |
73 | st.update({'added': st['stats']['added'], |
|
84 | st.update({'added': st['stats']['added'], | |
74 | 'removed': st['stats']['deleted']}) |
|
85 | 'removed': st['stats']['deleted']}) | |
75 | changes.append('\n %(operation)s %(filename)s ' |
|
86 | changes.append('\n %(operation)s %(filename)s ' | |
76 | '(%(added)s lines added, %(removed)s lines removed)' |
|
87 | '(%(added)s lines added, %(removed)s lines removed)' | |
77 | % st) |
|
88 | % st) | |
78 | if limited_diff: |
|
89 | if limited_diff: | |
79 | changes = changes + ['\n ' + |
|
90 | changes = changes + ['\n ' + | |
80 | _('Changeset was too big and was cut off...')] |
|
91 | _('Changeset was too big and was cut off...')] | |
81 | return diff_processor, changes |
|
|||
82 |
|
92 | |||
83 | def __get_desc(self, cs): |
|
|||
84 | desc_msg = [(_('%s committed on %s') |
|
|||
85 | % (h.person(cs.author), h.fmt_date(cs.date))) + '<br/>'] |
|
|||
86 | # branches, tags, bookmarks |
|
|||
87 | if cs.branch: |
|
|||
88 | desc_msg.append('branch: %s<br/>' % cs.branch) |
|
|||
89 | for book in cs.bookmarks: |
|
|||
90 | desc_msg.append('bookmark: %s<br/>' % book) |
|
|||
91 | for tag in cs.tags: |
|
|||
92 | desc_msg.append('tag: %s<br/>' % tag) |
|
|||
93 | diff_processor, changes = self.__changes(cs) |
|
|||
94 | # rev link |
|
93 | # rev link | |
95 | _url = h.canonical_url('changeset_home', repo_name=c.db_repo.repo_name, |
|
94 | _url = h.canonical_url('changeset_home', repo_name=c.db_repo.repo_name, | |
96 | revision=cs.raw_id) |
|
95 | revision=cs.raw_id) | |
97 | desc_msg.append('changeset: <a href="%s">%s</a>' % (_url, cs.raw_id[:8])) |
|
96 | desc_msg.append('changeset: <a href="%s">%s</a>' % (_url, cs.raw_id[:8])) | |
98 |
|
97 | |||
99 | desc_msg.append('<pre>') |
|
98 | desc_msg.append('<pre>') | |
100 | desc_msg.append(h.urlify_text(cs.message)) |
|
99 | desc_msg.append(h.urlify_text(cs.message)) | |
101 | desc_msg.append('\n') |
|
100 | desc_msg.append('\n') | |
102 | desc_msg.extend(changes) |
|
101 | desc_msg.extend(changes) | |
103 | if str2bool(CONFIG.get('rss_include_diff', False)): |
|
102 | if str2bool(CONFIG.get('rss_include_diff', False)): | |
104 | desc_msg.append('\n\n') |
|
103 | desc_msg.append('\n\n') | |
105 |
desc_msg.append(diff |
|
104 | desc_msg.append(raw_diff) | |
106 | desc_msg.append('</pre>') |
|
105 | desc_msg.append('</pre>') | |
107 | return map(safe_unicode, desc_msg) |
|
106 | return map(safe_unicode, desc_msg) | |
108 |
|
107 | |||
109 | def atom(self, repo_name): |
|
108 | def atom(self, repo_name): | |
110 | """Produce an atom-1.0 feed via feedgenerator module""" |
|
109 | """Produce an atom-1.0 feed via feedgenerator module""" | |
111 |
|
110 | |||
112 | @cache_region('long_term', '_get_feed_from_cache') |
|
111 | @cache_region('long_term', '_get_feed_from_cache') | |
113 | def _get_feed_from_cache(key, kind): |
|
112 | def _get_feed_from_cache(key, kind): | |
114 | feed = Atom1Feed( |
|
113 | feed = Atom1Feed( | |
115 | title=_('%s %s feed') % (c.site_name, repo_name), |
|
114 | title=_('%s %s feed') % (c.site_name, repo_name), | |
116 | link=h.canonical_url('summary_home', repo_name=repo_name), |
|
115 | link=h.canonical_url('summary_home', repo_name=repo_name), | |
117 | description=_('Changes on %s repository') % repo_name, |
|
116 | description=_('Changes on %s repository') % repo_name, | |
118 | language=language, |
|
117 | language=language, | |
119 | ttl=ttl |
|
118 | ttl=ttl | |
120 | ) |
|
119 | ) | |
121 |
|
120 | |||
122 | rss_items_per_page = safe_int(CONFIG.get('rss_items_per_page', 20)) |
|
121 | rss_items_per_page = safe_int(CONFIG.get('rss_items_per_page', 20)) | |
123 | for cs in reversed(list(c.db_repo_scm_instance[-rss_items_per_page:])): |
|
122 | for cs in reversed(list(c.db_repo_scm_instance[-rss_items_per_page:])): | |
124 | feed.add_item(title=self._get_title(cs), |
|
123 | feed.add_item(title=self._get_title(cs), | |
125 | link=h.canonical_url('changeset_home', repo_name=repo_name, |
|
124 | link=h.canonical_url('changeset_home', repo_name=repo_name, | |
126 | revision=cs.raw_id), |
|
125 | revision=cs.raw_id), | |
127 | author_name=cs.author, |
|
126 | author_name=cs.author, | |
128 | description=''.join(self.__get_desc(cs)), |
|
127 | description=''.join(self.__get_desc(cs)), | |
129 | pubdate=cs.date, |
|
128 | pubdate=cs.date, | |
130 | ) |
|
129 | ) | |
131 |
|
130 | |||
132 | response.content_type = feed.mime_type |
|
131 | response.content_type = feed.mime_type | |
133 | return feed.writeString('utf-8') |
|
132 | return feed.writeString('utf-8') | |
134 |
|
133 | |||
135 | kind = 'ATOM' |
|
134 | kind = 'ATOM' | |
136 | valid = CacheInvalidation.test_and_set_valid(repo_name, kind) |
|
135 | valid = CacheInvalidation.test_and_set_valid(repo_name, kind) | |
137 | if not valid: |
|
136 | if not valid: | |
138 | region_invalidate(_get_feed_from_cache, None, '_get_feed_from_cache', repo_name, kind) |
|
137 | region_invalidate(_get_feed_from_cache, None, '_get_feed_from_cache', repo_name, kind) | |
139 | return _get_feed_from_cache(repo_name, kind) |
|
138 | return _get_feed_from_cache(repo_name, kind) | |
140 |
|
139 | |||
141 | def rss(self, repo_name): |
|
140 | def rss(self, repo_name): | |
142 | """Produce an rss2 feed via feedgenerator module""" |
|
141 | """Produce an rss2 feed via feedgenerator module""" | |
143 |
|
142 | |||
144 | @cache_region('long_term', '_get_feed_from_cache') |
|
143 | @cache_region('long_term', '_get_feed_from_cache') | |
145 | def _get_feed_from_cache(key, kind): |
|
144 | def _get_feed_from_cache(key, kind): | |
146 | feed = Rss201rev2Feed( |
|
145 | feed = Rss201rev2Feed( | |
147 | title=_('%s %s feed') % (c.site_name, repo_name), |
|
146 | title=_('%s %s feed') % (c.site_name, repo_name), | |
148 | link=h.canonical_url('summary_home', repo_name=repo_name), |
|
147 | link=h.canonical_url('summary_home', repo_name=repo_name), | |
149 | description=_('Changes on %s repository') % repo_name, |
|
148 | description=_('Changes on %s repository') % repo_name, | |
150 | language=language, |
|
149 | language=language, | |
151 | ttl=ttl |
|
150 | ttl=ttl | |
152 | ) |
|
151 | ) | |
153 |
|
152 | |||
154 | rss_items_per_page = safe_int(CONFIG.get('rss_items_per_page', 20)) |
|
153 | rss_items_per_page = safe_int(CONFIG.get('rss_items_per_page', 20)) | |
155 | for cs in reversed(list(c.db_repo_scm_instance[-rss_items_per_page:])): |
|
154 | for cs in reversed(list(c.db_repo_scm_instance[-rss_items_per_page:])): | |
156 | feed.add_item(title=self._get_title(cs), |
|
155 | feed.add_item(title=self._get_title(cs), | |
157 | link=h.canonical_url('changeset_home', repo_name=repo_name, |
|
156 | link=h.canonical_url('changeset_home', repo_name=repo_name, | |
158 | revision=cs.raw_id), |
|
157 | revision=cs.raw_id), | |
159 | author_name=cs.author, |
|
158 | author_name=cs.author, | |
160 | description=''.join(self.__get_desc(cs)), |
|
159 | description=''.join(self.__get_desc(cs)), | |
161 | pubdate=cs.date, |
|
160 | pubdate=cs.date, | |
162 | ) |
|
161 | ) | |
163 |
|
162 | |||
164 | response.content_type = feed.mime_type |
|
163 | response.content_type = feed.mime_type | |
165 | return feed.writeString('utf-8') |
|
164 | return feed.writeString('utf-8') | |
166 |
|
165 | |||
167 | kind = 'RSS' |
|
166 | kind = 'RSS' | |
168 | valid = CacheInvalidation.test_and_set_valid(repo_name, kind) |
|
167 | valid = CacheInvalidation.test_and_set_valid(repo_name, kind) | |
169 | if not valid: |
|
168 | if not valid: | |
170 | region_invalidate(_get_feed_from_cache, None, '_get_feed_from_cache', repo_name, kind) |
|
169 | region_invalidate(_get_feed_from_cache, None, '_get_feed_from_cache', repo_name, kind) | |
171 | return _get_feed_from_cache(repo_name, kind) |
|
170 | return _get_feed_from_cache(repo_name, kind) |
@@ -1,785 +1,782 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 | # This program is free software: you can redistribute it and/or modify |
|
2 | # This program is free software: you can redistribute it and/or modify | |
3 | # it under the terms of the GNU General Public License as published by |
|
3 | # it under the terms of the GNU General Public License as published by | |
4 | # the Free Software Foundation, either version 3 of the License, or |
|
4 | # the Free Software Foundation, either version 3 of the License, or | |
5 | # (at your option) any later version. |
|
5 | # (at your option) any later version. | |
6 | # |
|
6 | # | |
7 | # This program is distributed in the hope that it will be useful, |
|
7 | # This program is distributed in the hope that it will be useful, | |
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | # GNU General Public License for more details. |
|
10 | # GNU General Public License for more details. | |
11 | # |
|
11 | # | |
12 | # You should have received a copy of the GNU General Public License |
|
12 | # You should have received a copy of the GNU General Public License | |
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
14 | """ |
|
14 | """ | |
15 | kallithea.controllers.files |
|
15 | kallithea.controllers.files | |
16 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
|
16 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |
17 |
|
17 | |||
18 | Files controller for Kallithea |
|
18 | Files controller for Kallithea | |
19 |
|
19 | |||
20 | This file was forked by the Kallithea project in July 2014. |
|
20 | This file was forked by the Kallithea project in July 2014. | |
21 | Original author and date, and relevant copyright and licensing information is below: |
|
21 | Original author and date, and relevant copyright and licensing information is below: | |
22 | :created_on: Apr 21, 2010 |
|
22 | :created_on: Apr 21, 2010 | |
23 | :author: marcink |
|
23 | :author: marcink | |
24 | :copyright: (c) 2013 RhodeCode GmbH, and others. |
|
24 | :copyright: (c) 2013 RhodeCode GmbH, and others. | |
25 | :license: GPLv3, see LICENSE.md for more details. |
|
25 | :license: GPLv3, see LICENSE.md for more details. | |
26 | """ |
|
26 | """ | |
27 |
|
27 | |||
28 | import os |
|
28 | import os | |
29 | import posixpath |
|
29 | import posixpath | |
30 | import logging |
|
30 | import logging | |
31 | import traceback |
|
31 | import traceback | |
32 | import tempfile |
|
32 | import tempfile | |
33 | import shutil |
|
33 | import shutil | |
34 |
|
34 | |||
35 | from tg import request, response, tmpl_context as c |
|
35 | from tg import request, response, tmpl_context as c | |
36 | from tg.i18n import ugettext as _ |
|
36 | from tg.i18n import ugettext as _ | |
37 | from webob.exc import HTTPFound |
|
37 | from webob.exc import HTTPFound | |
38 |
|
38 | |||
39 | from kallithea.config.routing import url |
|
39 | from kallithea.config.routing import url | |
40 | from kallithea.lib.utils import action_logger |
|
40 | from kallithea.lib.utils import action_logger | |
41 | from kallithea.lib import diffs |
|
41 | from kallithea.lib import diffs | |
42 | from kallithea.lib import helpers as h |
|
42 | from kallithea.lib import helpers as h | |
43 |
|
43 | |||
44 | from kallithea.lib.compat import OrderedDict |
|
44 | from kallithea.lib.compat import OrderedDict | |
45 | from kallithea.lib.utils2 import convert_line_endings, detect_mode, safe_str, \ |
|
45 | from kallithea.lib.utils2 import convert_line_endings, detect_mode, safe_str, \ | |
46 | str2bool, safe_int |
|
46 | str2bool, safe_int | |
47 | from kallithea.lib.auth import LoginRequired, HasRepoPermissionLevelDecorator |
|
47 | from kallithea.lib.auth import LoginRequired, HasRepoPermissionLevelDecorator | |
48 | from kallithea.lib.base import BaseRepoController, render, jsonify |
|
48 | from kallithea.lib.base import BaseRepoController, render, jsonify | |
49 | from kallithea.lib.vcs.backends.base import EmptyChangeset |
|
49 | from kallithea.lib.vcs.backends.base import EmptyChangeset | |
50 | from kallithea.lib.vcs.conf import settings |
|
50 | from kallithea.lib.vcs.conf import settings | |
51 | from kallithea.lib.vcs.exceptions import RepositoryError, \ |
|
51 | from kallithea.lib.vcs.exceptions import RepositoryError, \ | |
52 | ChangesetDoesNotExistError, EmptyRepositoryError, \ |
|
52 | ChangesetDoesNotExistError, EmptyRepositoryError, \ | |
53 | ImproperArchiveTypeError, VCSError, NodeAlreadyExistsError, \ |
|
53 | ImproperArchiveTypeError, VCSError, NodeAlreadyExistsError, \ | |
54 | NodeDoesNotExistError, ChangesetError, NodeError |
|
54 | NodeDoesNotExistError, ChangesetError, NodeError | |
55 | from kallithea.lib.vcs.nodes import FileNode |
|
55 | from kallithea.lib.vcs.nodes import FileNode | |
56 |
|
56 | |||
57 | from kallithea.model.repo import RepoModel |
|
57 | from kallithea.model.repo import RepoModel | |
58 | from kallithea.model.scm import ScmModel |
|
58 | from kallithea.model.scm import ScmModel | |
59 | from kallithea.model.db import Repository |
|
59 | from kallithea.model.db import Repository | |
60 |
|
60 | |||
61 | from kallithea.controllers.changeset import anchor_url, _ignorews_url, \ |
|
61 | from kallithea.controllers.changeset import anchor_url, _ignorews_url, \ | |
62 | _context_url, get_line_ctx, get_ignore_ws |
|
62 | _context_url, get_line_ctx, get_ignore_ws | |
63 | from webob.exc import HTTPNotFound |
|
63 | from webob.exc import HTTPNotFound | |
64 | from kallithea.lib.exceptions import NonRelativePathError |
|
64 | from kallithea.lib.exceptions import NonRelativePathError | |
65 |
|
65 | |||
66 |
|
66 | |||
67 | log = logging.getLogger(__name__) |
|
67 | log = logging.getLogger(__name__) | |
68 |
|
68 | |||
69 |
|
69 | |||
70 | class FilesController(BaseRepoController): |
|
70 | class FilesController(BaseRepoController): | |
71 |
|
71 | |||
72 | def _before(self, *args, **kwargs): |
|
72 | def _before(self, *args, **kwargs): | |
73 | super(FilesController, self)._before(*args, **kwargs) |
|
73 | super(FilesController, self)._before(*args, **kwargs) | |
74 |
|
74 | |||
75 | def __get_cs(self, rev, silent_empty=False): |
|
75 | def __get_cs(self, rev, silent_empty=False): | |
76 | """ |
|
76 | """ | |
77 | Safe way to get changeset if error occur it redirects to tip with |
|
77 | Safe way to get changeset if error occur it redirects to tip with | |
78 | proper message |
|
78 | proper message | |
79 |
|
79 | |||
80 | :param rev: revision to fetch |
|
80 | :param rev: revision to fetch | |
81 | :silent_empty: return None if repository is empty |
|
81 | :silent_empty: return None if repository is empty | |
82 | """ |
|
82 | """ | |
83 |
|
83 | |||
84 | try: |
|
84 | try: | |
85 | return c.db_repo_scm_instance.get_changeset(rev) |
|
85 | return c.db_repo_scm_instance.get_changeset(rev) | |
86 | except EmptyRepositoryError as e: |
|
86 | except EmptyRepositoryError as e: | |
87 | if silent_empty: |
|
87 | if silent_empty: | |
88 | return None |
|
88 | return None | |
89 | url_ = url('files_add_home', |
|
89 | url_ = url('files_add_home', | |
90 | repo_name=c.repo_name, |
|
90 | repo_name=c.repo_name, | |
91 | revision=0, f_path='', anchor='edit') |
|
91 | revision=0, f_path='', anchor='edit') | |
92 | add_new = h.link_to(_('Click here to add new file'), url_, class_="alert-link") |
|
92 | add_new = h.link_to(_('Click here to add new file'), url_, class_="alert-link") | |
93 | h.flash(h.literal(_('There are no files yet. %s') % add_new), |
|
93 | h.flash(h.literal(_('There are no files yet. %s') % add_new), | |
94 | category='warning') |
|
94 | category='warning') | |
95 | raise HTTPNotFound() |
|
95 | raise HTTPNotFound() | |
96 | except (ChangesetDoesNotExistError, LookupError): |
|
96 | except (ChangesetDoesNotExistError, LookupError): | |
97 | msg = _('Such revision does not exist for this repository') |
|
97 | msg = _('Such revision does not exist for this repository') | |
98 | h.flash(msg, category='error') |
|
98 | h.flash(msg, category='error') | |
99 | raise HTTPNotFound() |
|
99 | raise HTTPNotFound() | |
100 | except RepositoryError as e: |
|
100 | except RepositoryError as e: | |
101 | h.flash(safe_str(e), category='error') |
|
101 | h.flash(safe_str(e), category='error') | |
102 | raise HTTPNotFound() |
|
102 | raise HTTPNotFound() | |
103 |
|
103 | |||
104 | def __get_filenode(self, cs, path): |
|
104 | def __get_filenode(self, cs, path): | |
105 | """ |
|
105 | """ | |
106 | Returns file_node or raise HTTP error. |
|
106 | Returns file_node or raise HTTP error. | |
107 |
|
107 | |||
108 | :param cs: given changeset |
|
108 | :param cs: given changeset | |
109 | :param path: path to lookup |
|
109 | :param path: path to lookup | |
110 | """ |
|
110 | """ | |
111 |
|
111 | |||
112 | try: |
|
112 | try: | |
113 | file_node = cs.get_node(path) |
|
113 | file_node = cs.get_node(path) | |
114 | if file_node.is_dir(): |
|
114 | if file_node.is_dir(): | |
115 | raise RepositoryError('given path is a directory') |
|
115 | raise RepositoryError('given path is a directory') | |
116 | except ChangesetDoesNotExistError: |
|
116 | except ChangesetDoesNotExistError: | |
117 | msg = _('Such revision does not exist for this repository') |
|
117 | msg = _('Such revision does not exist for this repository') | |
118 | h.flash(msg, category='error') |
|
118 | h.flash(msg, category='error') | |
119 | raise HTTPNotFound() |
|
119 | raise HTTPNotFound() | |
120 | except RepositoryError as e: |
|
120 | except RepositoryError as e: | |
121 | h.flash(safe_str(e), category='error') |
|
121 | h.flash(safe_str(e), category='error') | |
122 | raise HTTPNotFound() |
|
122 | raise HTTPNotFound() | |
123 |
|
123 | |||
124 | return file_node |
|
124 | return file_node | |
125 |
|
125 | |||
126 | @LoginRequired() |
|
126 | @LoginRequired() | |
127 | @HasRepoPermissionLevelDecorator('read') |
|
127 | @HasRepoPermissionLevelDecorator('read') | |
128 | def index(self, repo_name, revision, f_path, annotate=False): |
|
128 | def index(self, repo_name, revision, f_path, annotate=False): | |
129 | # redirect to given revision from form if given |
|
129 | # redirect to given revision from form if given | |
130 | post_revision = request.POST.get('at_rev', None) |
|
130 | post_revision = request.POST.get('at_rev', None) | |
131 | if post_revision: |
|
131 | if post_revision: | |
132 | cs = self.__get_cs(post_revision) # FIXME - unused! |
|
132 | cs = self.__get_cs(post_revision) # FIXME - unused! | |
133 |
|
133 | |||
134 | c.revision = revision |
|
134 | c.revision = revision | |
135 | c.changeset = self.__get_cs(revision) |
|
135 | c.changeset = self.__get_cs(revision) | |
136 | c.branch = request.GET.get('branch', None) |
|
136 | c.branch = request.GET.get('branch', None) | |
137 | c.f_path = f_path |
|
137 | c.f_path = f_path | |
138 | c.annotate = annotate |
|
138 | c.annotate = annotate | |
139 | cur_rev = c.changeset.revision |
|
139 | cur_rev = c.changeset.revision | |
140 | # used in files_source.html: |
|
140 | # used in files_source.html: | |
141 | c.cut_off_limit = self.cut_off_limit |
|
141 | c.cut_off_limit = self.cut_off_limit | |
142 | c.fulldiff = request.GET.get('fulldiff') |
|
142 | c.fulldiff = request.GET.get('fulldiff') | |
143 |
|
143 | |||
144 | # prev link |
|
144 | # prev link | |
145 | try: |
|
145 | try: | |
146 | prev_rev = c.db_repo_scm_instance.get_changeset(cur_rev).prev(c.branch) |
|
146 | prev_rev = c.db_repo_scm_instance.get_changeset(cur_rev).prev(c.branch) | |
147 | c.url_prev = url('files_home', repo_name=c.repo_name, |
|
147 | c.url_prev = url('files_home', repo_name=c.repo_name, | |
148 | revision=prev_rev.raw_id, f_path=f_path) |
|
148 | revision=prev_rev.raw_id, f_path=f_path) | |
149 | if c.branch: |
|
149 | if c.branch: | |
150 | c.url_prev += '?branch=%s' % c.branch |
|
150 | c.url_prev += '?branch=%s' % c.branch | |
151 | except (ChangesetDoesNotExistError, VCSError): |
|
151 | except (ChangesetDoesNotExistError, VCSError): | |
152 | c.url_prev = '#' |
|
152 | c.url_prev = '#' | |
153 |
|
153 | |||
154 | # next link |
|
154 | # next link | |
155 | try: |
|
155 | try: | |
156 | next_rev = c.db_repo_scm_instance.get_changeset(cur_rev).next(c.branch) |
|
156 | next_rev = c.db_repo_scm_instance.get_changeset(cur_rev).next(c.branch) | |
157 | c.url_next = url('files_home', repo_name=c.repo_name, |
|
157 | c.url_next = url('files_home', repo_name=c.repo_name, | |
158 | revision=next_rev.raw_id, f_path=f_path) |
|
158 | revision=next_rev.raw_id, f_path=f_path) | |
159 | if c.branch: |
|
159 | if c.branch: | |
160 | c.url_next += '?branch=%s' % c.branch |
|
160 | c.url_next += '?branch=%s' % c.branch | |
161 | except (ChangesetDoesNotExistError, VCSError): |
|
161 | except (ChangesetDoesNotExistError, VCSError): | |
162 | c.url_next = '#' |
|
162 | c.url_next = '#' | |
163 |
|
163 | |||
164 | # files or dirs |
|
164 | # files or dirs | |
165 | try: |
|
165 | try: | |
166 | c.file = c.changeset.get_node(f_path) |
|
166 | c.file = c.changeset.get_node(f_path) | |
167 |
|
167 | |||
168 | if c.file.is_file(): |
|
168 | if c.file.is_file(): | |
169 | c.load_full_history = False |
|
169 | c.load_full_history = False | |
170 | # determine if we're on branch head |
|
170 | # determine if we're on branch head | |
171 | _branches = c.db_repo_scm_instance.branches |
|
171 | _branches = c.db_repo_scm_instance.branches | |
172 | c.on_branch_head = revision in _branches.keys() + _branches.values() |
|
172 | c.on_branch_head = revision in _branches.keys() + _branches.values() | |
173 | _hist = [] |
|
173 | _hist = [] | |
174 | c.file_history = [] |
|
174 | c.file_history = [] | |
175 | if c.load_full_history: |
|
175 | if c.load_full_history: | |
176 | c.file_history, _hist = self._get_node_history(c.changeset, f_path) |
|
176 | c.file_history, _hist = self._get_node_history(c.changeset, f_path) | |
177 |
|
177 | |||
178 | c.authors = [] |
|
178 | c.authors = [] | |
179 | for a in set([x.author for x in _hist]): |
|
179 | for a in set([x.author for x in _hist]): | |
180 | c.authors.append((h.email(a), h.person(a))) |
|
180 | c.authors.append((h.email(a), h.person(a))) | |
181 | else: |
|
181 | else: | |
182 | c.authors = c.file_history = [] |
|
182 | c.authors = c.file_history = [] | |
183 | except RepositoryError as e: |
|
183 | except RepositoryError as e: | |
184 | h.flash(safe_str(e), category='error') |
|
184 | h.flash(safe_str(e), category='error') | |
185 | raise HTTPNotFound() |
|
185 | raise HTTPNotFound() | |
186 |
|
186 | |||
187 | if request.environ.get('HTTP_X_PARTIAL_XHR'): |
|
187 | if request.environ.get('HTTP_X_PARTIAL_XHR'): | |
188 | return render('files/files_ypjax.html') |
|
188 | return render('files/files_ypjax.html') | |
189 |
|
189 | |||
190 | # TODO: tags and bookmarks? |
|
190 | # TODO: tags and bookmarks? | |
191 | c.revision_options = [(c.changeset.raw_id, |
|
191 | c.revision_options = [(c.changeset.raw_id, | |
192 | _('%s at %s') % (c.changeset.branch, h.short_id(c.changeset.raw_id)))] + \ |
|
192 | _('%s at %s') % (c.changeset.branch, h.short_id(c.changeset.raw_id)))] + \ | |
193 | [(n, b) for b, n in c.db_repo_scm_instance.branches.items()] |
|
193 | [(n, b) for b, n in c.db_repo_scm_instance.branches.items()] | |
194 | if c.db_repo_scm_instance.closed_branches: |
|
194 | if c.db_repo_scm_instance.closed_branches: | |
195 | prefix = _('(closed)') + ' ' |
|
195 | prefix = _('(closed)') + ' ' | |
196 | c.revision_options += [('-', '-')] + \ |
|
196 | c.revision_options += [('-', '-')] + \ | |
197 | [(n, prefix + b) for b, n in c.db_repo_scm_instance.closed_branches.items()] |
|
197 | [(n, prefix + b) for b, n in c.db_repo_scm_instance.closed_branches.items()] | |
198 |
|
198 | |||
199 | return render('files/files.html') |
|
199 | return render('files/files.html') | |
200 |
|
200 | |||
201 | @LoginRequired() |
|
201 | @LoginRequired() | |
202 | @HasRepoPermissionLevelDecorator('read') |
|
202 | @HasRepoPermissionLevelDecorator('read') | |
203 | @jsonify |
|
203 | @jsonify | |
204 | def history(self, repo_name, revision, f_path): |
|
204 | def history(self, repo_name, revision, f_path): | |
205 | changeset = self.__get_cs(revision) |
|
205 | changeset = self.__get_cs(revision) | |
206 | _file = changeset.get_node(f_path) |
|
206 | _file = changeset.get_node(f_path) | |
207 | if _file.is_file(): |
|
207 | if _file.is_file(): | |
208 | file_history, _hist = self._get_node_history(changeset, f_path) |
|
208 | file_history, _hist = self._get_node_history(changeset, f_path) | |
209 |
|
209 | |||
210 | res = [] |
|
210 | res = [] | |
211 | for obj in file_history: |
|
211 | for obj in file_history: | |
212 | res.append({ |
|
212 | res.append({ | |
213 | 'text': obj[1], |
|
213 | 'text': obj[1], | |
214 | 'children': [{'id': o[0], 'text': o[1]} for o in obj[0]] |
|
214 | 'children': [{'id': o[0], 'text': o[1]} for o in obj[0]] | |
215 | }) |
|
215 | }) | |
216 |
|
216 | |||
217 | data = { |
|
217 | data = { | |
218 | 'more': False, |
|
218 | 'more': False, | |
219 | 'results': res |
|
219 | 'results': res | |
220 | } |
|
220 | } | |
221 | return data |
|
221 | return data | |
222 |
|
222 | |||
223 | @LoginRequired() |
|
223 | @LoginRequired() | |
224 | @HasRepoPermissionLevelDecorator('read') |
|
224 | @HasRepoPermissionLevelDecorator('read') | |
225 | def authors(self, repo_name, revision, f_path): |
|
225 | def authors(self, repo_name, revision, f_path): | |
226 | changeset = self.__get_cs(revision) |
|
226 | changeset = self.__get_cs(revision) | |
227 | _file = changeset.get_node(f_path) |
|
227 | _file = changeset.get_node(f_path) | |
228 | if _file.is_file(): |
|
228 | if _file.is_file(): | |
229 | file_history, _hist = self._get_node_history(changeset, f_path) |
|
229 | file_history, _hist = self._get_node_history(changeset, f_path) | |
230 | c.authors = [] |
|
230 | c.authors = [] | |
231 | for a in set([x.author for x in _hist]): |
|
231 | for a in set([x.author for x in _hist]): | |
232 | c.authors.append((h.email(a), h.person(a))) |
|
232 | c.authors.append((h.email(a), h.person(a))) | |
233 | return render('files/files_history_box.html') |
|
233 | return render('files/files_history_box.html') | |
234 |
|
234 | |||
235 | @LoginRequired() |
|
235 | @LoginRequired() | |
236 | @HasRepoPermissionLevelDecorator('read') |
|
236 | @HasRepoPermissionLevelDecorator('read') | |
237 | def rawfile(self, repo_name, revision, f_path): |
|
237 | def rawfile(self, repo_name, revision, f_path): | |
238 | cs = self.__get_cs(revision) |
|
238 | cs = self.__get_cs(revision) | |
239 | file_node = self.__get_filenode(cs, f_path) |
|
239 | file_node = self.__get_filenode(cs, f_path) | |
240 |
|
240 | |||
241 | response.content_disposition = 'attachment; filename=%s' % \ |
|
241 | response.content_disposition = 'attachment; filename=%s' % \ | |
242 | safe_str(f_path.split(Repository.url_sep())[-1]) |
|
242 | safe_str(f_path.split(Repository.url_sep())[-1]) | |
243 |
|
243 | |||
244 | response.content_type = file_node.mimetype |
|
244 | response.content_type = file_node.mimetype | |
245 | return file_node.content |
|
245 | return file_node.content | |
246 |
|
246 | |||
247 | @LoginRequired() |
|
247 | @LoginRequired() | |
248 | @HasRepoPermissionLevelDecorator('read') |
|
248 | @HasRepoPermissionLevelDecorator('read') | |
249 | def raw(self, repo_name, revision, f_path): |
|
249 | def raw(self, repo_name, revision, f_path): | |
250 | cs = self.__get_cs(revision) |
|
250 | cs = self.__get_cs(revision) | |
251 | file_node = self.__get_filenode(cs, f_path) |
|
251 | file_node = self.__get_filenode(cs, f_path) | |
252 |
|
252 | |||
253 | raw_mimetype_mapping = { |
|
253 | raw_mimetype_mapping = { | |
254 | # map original mimetype to a mimetype used for "show as raw" |
|
254 | # map original mimetype to a mimetype used for "show as raw" | |
255 | # you can also provide a content-disposition to override the |
|
255 | # you can also provide a content-disposition to override the | |
256 | # default "attachment" disposition. |
|
256 | # default "attachment" disposition. | |
257 | # orig_type: (new_type, new_dispo) |
|
257 | # orig_type: (new_type, new_dispo) | |
258 |
|
258 | |||
259 | # show images inline: |
|
259 | # show images inline: | |
260 | 'image/x-icon': ('image/x-icon', 'inline'), |
|
260 | 'image/x-icon': ('image/x-icon', 'inline'), | |
261 | 'image/png': ('image/png', 'inline'), |
|
261 | 'image/png': ('image/png', 'inline'), | |
262 | 'image/gif': ('image/gif', 'inline'), |
|
262 | 'image/gif': ('image/gif', 'inline'), | |
263 | 'image/jpeg': ('image/jpeg', 'inline'), |
|
263 | 'image/jpeg': ('image/jpeg', 'inline'), | |
264 | 'image/svg+xml': ('image/svg+xml', 'inline'), |
|
264 | 'image/svg+xml': ('image/svg+xml', 'inline'), | |
265 | } |
|
265 | } | |
266 |
|
266 | |||
267 | mimetype = file_node.mimetype |
|
267 | mimetype = file_node.mimetype | |
268 | try: |
|
268 | try: | |
269 | mimetype, dispo = raw_mimetype_mapping[mimetype] |
|
269 | mimetype, dispo = raw_mimetype_mapping[mimetype] | |
270 | except KeyError: |
|
270 | except KeyError: | |
271 | # we don't know anything special about this, handle it safely |
|
271 | # we don't know anything special about this, handle it safely | |
272 | if file_node.is_binary: |
|
272 | if file_node.is_binary: | |
273 | # do same as download raw for binary files |
|
273 | # do same as download raw for binary files | |
274 | mimetype, dispo = 'application/octet-stream', 'attachment' |
|
274 | mimetype, dispo = 'application/octet-stream', 'attachment' | |
275 | else: |
|
275 | else: | |
276 | # do not just use the original mimetype, but force text/plain, |
|
276 | # do not just use the original mimetype, but force text/plain, | |
277 | # otherwise it would serve text/html and that might be unsafe. |
|
277 | # otherwise it would serve text/html and that might be unsafe. | |
278 | # Note: underlying vcs library fakes text/plain mimetype if the |
|
278 | # Note: underlying vcs library fakes text/plain mimetype if the | |
279 | # mimetype can not be determined and it thinks it is not |
|
279 | # mimetype can not be determined and it thinks it is not | |
280 | # binary.This might lead to erroneous text display in some |
|
280 | # binary.This might lead to erroneous text display in some | |
281 | # cases, but helps in other cases, like with text files |
|
281 | # cases, but helps in other cases, like with text files | |
282 | # without extension. |
|
282 | # without extension. | |
283 | mimetype, dispo = 'text/plain', 'inline' |
|
283 | mimetype, dispo = 'text/plain', 'inline' | |
284 |
|
284 | |||
285 | if dispo == 'attachment': |
|
285 | if dispo == 'attachment': | |
286 | dispo = 'attachment; filename=%s' % \ |
|
286 | dispo = 'attachment; filename=%s' % \ | |
287 | safe_str(f_path.split(os.sep)[-1]) |
|
287 | safe_str(f_path.split(os.sep)[-1]) | |
288 |
|
288 | |||
289 | response.content_disposition = dispo |
|
289 | response.content_disposition = dispo | |
290 | response.content_type = mimetype |
|
290 | response.content_type = mimetype | |
291 | return file_node.content |
|
291 | return file_node.content | |
292 |
|
292 | |||
293 | @LoginRequired() |
|
293 | @LoginRequired() | |
294 | @HasRepoPermissionLevelDecorator('write') |
|
294 | @HasRepoPermissionLevelDecorator('write') | |
295 | def delete(self, repo_name, revision, f_path): |
|
295 | def delete(self, repo_name, revision, f_path): | |
296 | repo = c.db_repo |
|
296 | repo = c.db_repo | |
297 | if repo.enable_locking and repo.locked[0]: |
|
297 | if repo.enable_locking and repo.locked[0]: | |
298 | h.flash(_('This repository has been locked by %s on %s') |
|
298 | h.flash(_('This repository has been locked by %s on %s') | |
299 | % (h.person_by_id(repo.locked[0]), |
|
299 | % (h.person_by_id(repo.locked[0]), | |
300 | h.fmt_date(h.time_to_datetime(repo.locked[1]))), |
|
300 | h.fmt_date(h.time_to_datetime(repo.locked[1]))), | |
301 | 'warning') |
|
301 | 'warning') | |
302 | raise HTTPFound(location=h.url('files_home', |
|
302 | raise HTTPFound(location=h.url('files_home', | |
303 | repo_name=repo_name, revision='tip')) |
|
303 | repo_name=repo_name, revision='tip')) | |
304 |
|
304 | |||
305 | # check if revision is a branch identifier- basically we cannot |
|
305 | # check if revision is a branch identifier- basically we cannot | |
306 | # create multiple heads via file editing |
|
306 | # create multiple heads via file editing | |
307 | _branches = repo.scm_instance.branches |
|
307 | _branches = repo.scm_instance.branches | |
308 | # check if revision is a branch name or branch hash |
|
308 | # check if revision is a branch name or branch hash | |
309 | if revision not in _branches.keys() + _branches.values(): |
|
309 | if revision not in _branches.keys() + _branches.values(): | |
310 | h.flash(_('You can only delete files with revision ' |
|
310 | h.flash(_('You can only delete files with revision ' | |
311 | 'being a valid branch'), category='warning') |
|
311 | 'being a valid branch'), category='warning') | |
312 | raise HTTPFound(location=h.url('files_home', |
|
312 | raise HTTPFound(location=h.url('files_home', | |
313 | repo_name=repo_name, revision='tip', |
|
313 | repo_name=repo_name, revision='tip', | |
314 | f_path=f_path)) |
|
314 | f_path=f_path)) | |
315 |
|
315 | |||
316 | r_post = request.POST |
|
316 | r_post = request.POST | |
317 |
|
317 | |||
318 | c.cs = self.__get_cs(revision) |
|
318 | c.cs = self.__get_cs(revision) | |
319 | c.file = self.__get_filenode(c.cs, f_path) |
|
319 | c.file = self.__get_filenode(c.cs, f_path) | |
320 |
|
320 | |||
321 | c.default_message = _('Deleted file %s via Kallithea') % (f_path) |
|
321 | c.default_message = _('Deleted file %s via Kallithea') % (f_path) | |
322 | c.f_path = f_path |
|
322 | c.f_path = f_path | |
323 | node_path = f_path |
|
323 | node_path = f_path | |
324 | author = request.authuser.full_contact |
|
324 | author = request.authuser.full_contact | |
325 |
|
325 | |||
326 | if r_post: |
|
326 | if r_post: | |
327 | message = r_post.get('message') or c.default_message |
|
327 | message = r_post.get('message') or c.default_message | |
328 |
|
328 | |||
329 | try: |
|
329 | try: | |
330 | nodes = { |
|
330 | nodes = { | |
331 | node_path: { |
|
331 | node_path: { | |
332 | 'content': '' |
|
332 | 'content': '' | |
333 | } |
|
333 | } | |
334 | } |
|
334 | } | |
335 | self.scm_model.delete_nodes( |
|
335 | self.scm_model.delete_nodes( | |
336 | user=request.authuser.user_id, repo=c.db_repo, |
|
336 | user=request.authuser.user_id, repo=c.db_repo, | |
337 | message=message, |
|
337 | message=message, | |
338 | nodes=nodes, |
|
338 | nodes=nodes, | |
339 | parent_cs=c.cs, |
|
339 | parent_cs=c.cs, | |
340 | author=author, |
|
340 | author=author, | |
341 | ) |
|
341 | ) | |
342 |
|
342 | |||
343 | h.flash(_('Successfully deleted file %s') % f_path, |
|
343 | h.flash(_('Successfully deleted file %s') % f_path, | |
344 | category='success') |
|
344 | category='success') | |
345 | except Exception: |
|
345 | except Exception: | |
346 | log.error(traceback.format_exc()) |
|
346 | log.error(traceback.format_exc()) | |
347 | h.flash(_('Error occurred during commit'), category='error') |
|
347 | h.flash(_('Error occurred during commit'), category='error') | |
348 | raise HTTPFound(location=url('changeset_home', |
|
348 | raise HTTPFound(location=url('changeset_home', | |
349 | repo_name=c.repo_name, revision='tip')) |
|
349 | repo_name=c.repo_name, revision='tip')) | |
350 |
|
350 | |||
351 | return render('files/files_delete.html') |
|
351 | return render('files/files_delete.html') | |
352 |
|
352 | |||
353 | @LoginRequired() |
|
353 | @LoginRequired() | |
354 | @HasRepoPermissionLevelDecorator('write') |
|
354 | @HasRepoPermissionLevelDecorator('write') | |
355 | def edit(self, repo_name, revision, f_path): |
|
355 | def edit(self, repo_name, revision, f_path): | |
356 | repo = c.db_repo |
|
356 | repo = c.db_repo | |
357 | if repo.enable_locking and repo.locked[0]: |
|
357 | if repo.enable_locking and repo.locked[0]: | |
358 | h.flash(_('This repository has been locked by %s on %s') |
|
358 | h.flash(_('This repository has been locked by %s on %s') | |
359 | % (h.person_by_id(repo.locked[0]), |
|
359 | % (h.person_by_id(repo.locked[0]), | |
360 | h.fmt_date(h.time_to_datetime(repo.locked[1]))), |
|
360 | h.fmt_date(h.time_to_datetime(repo.locked[1]))), | |
361 | 'warning') |
|
361 | 'warning') | |
362 | raise HTTPFound(location=h.url('files_home', |
|
362 | raise HTTPFound(location=h.url('files_home', | |
363 | repo_name=repo_name, revision='tip')) |
|
363 | repo_name=repo_name, revision='tip')) | |
364 |
|
364 | |||
365 | # check if revision is a branch identifier- basically we cannot |
|
365 | # check if revision is a branch identifier- basically we cannot | |
366 | # create multiple heads via file editing |
|
366 | # create multiple heads via file editing | |
367 | _branches = repo.scm_instance.branches |
|
367 | _branches = repo.scm_instance.branches | |
368 | # check if revision is a branch name or branch hash |
|
368 | # check if revision is a branch name or branch hash | |
369 | if revision not in _branches.keys() + _branches.values(): |
|
369 | if revision not in _branches.keys() + _branches.values(): | |
370 | h.flash(_('You can only edit files with revision ' |
|
370 | h.flash(_('You can only edit files with revision ' | |
371 | 'being a valid branch'), category='warning') |
|
371 | 'being a valid branch'), category='warning') | |
372 | raise HTTPFound(location=h.url('files_home', |
|
372 | raise HTTPFound(location=h.url('files_home', | |
373 | repo_name=repo_name, revision='tip', |
|
373 | repo_name=repo_name, revision='tip', | |
374 | f_path=f_path)) |
|
374 | f_path=f_path)) | |
375 |
|
375 | |||
376 | r_post = request.POST |
|
376 | r_post = request.POST | |
377 |
|
377 | |||
378 | c.cs = self.__get_cs(revision) |
|
378 | c.cs = self.__get_cs(revision) | |
379 | c.file = self.__get_filenode(c.cs, f_path) |
|
379 | c.file = self.__get_filenode(c.cs, f_path) | |
380 |
|
380 | |||
381 | if c.file.is_binary: |
|
381 | if c.file.is_binary: | |
382 | raise HTTPFound(location=url('files_home', repo_name=c.repo_name, |
|
382 | raise HTTPFound(location=url('files_home', repo_name=c.repo_name, | |
383 | revision=c.cs.raw_id, f_path=f_path)) |
|
383 | revision=c.cs.raw_id, f_path=f_path)) | |
384 | c.default_message = _('Edited file %s via Kallithea') % (f_path) |
|
384 | c.default_message = _('Edited file %s via Kallithea') % (f_path) | |
385 | c.f_path = f_path |
|
385 | c.f_path = f_path | |
386 |
|
386 | |||
387 | if r_post: |
|
387 | if r_post: | |
388 |
|
388 | |||
389 | old_content = c.file.content |
|
389 | old_content = c.file.content | |
390 | sl = old_content.splitlines(1) |
|
390 | sl = old_content.splitlines(1) | |
391 | first_line = sl[0] if sl else '' |
|
391 | first_line = sl[0] if sl else '' | |
392 | # modes: 0 - Unix, 1 - Mac, 2 - DOS |
|
392 | # modes: 0 - Unix, 1 - Mac, 2 - DOS | |
393 | mode = detect_mode(first_line, 0) |
|
393 | mode = detect_mode(first_line, 0) | |
394 | content = convert_line_endings(r_post.get('content', ''), mode) |
|
394 | content = convert_line_endings(r_post.get('content', ''), mode) | |
395 |
|
395 | |||
396 | message = r_post.get('message') or c.default_message |
|
396 | message = r_post.get('message') or c.default_message | |
397 | author = request.authuser.full_contact |
|
397 | author = request.authuser.full_contact | |
398 |
|
398 | |||
399 | if content == old_content: |
|
399 | if content == old_content: | |
400 | h.flash(_('No changes'), category='warning') |
|
400 | h.flash(_('No changes'), category='warning') | |
401 | raise HTTPFound(location=url('changeset_home', repo_name=c.repo_name, |
|
401 | raise HTTPFound(location=url('changeset_home', repo_name=c.repo_name, | |
402 | revision='tip')) |
|
402 | revision='tip')) | |
403 | try: |
|
403 | try: | |
404 | self.scm_model.commit_change(repo=c.db_repo_scm_instance, |
|
404 | self.scm_model.commit_change(repo=c.db_repo_scm_instance, | |
405 | repo_name=repo_name, cs=c.cs, |
|
405 | repo_name=repo_name, cs=c.cs, | |
406 | user=request.authuser.user_id, |
|
406 | user=request.authuser.user_id, | |
407 | author=author, message=message, |
|
407 | author=author, message=message, | |
408 | content=content, f_path=f_path) |
|
408 | content=content, f_path=f_path) | |
409 | h.flash(_('Successfully committed to %s') % f_path, |
|
409 | h.flash(_('Successfully committed to %s') % f_path, | |
410 | category='success') |
|
410 | category='success') | |
411 | except Exception: |
|
411 | except Exception: | |
412 | log.error(traceback.format_exc()) |
|
412 | log.error(traceback.format_exc()) | |
413 | h.flash(_('Error occurred during commit'), category='error') |
|
413 | h.flash(_('Error occurred during commit'), category='error') | |
414 | raise HTTPFound(location=url('changeset_home', |
|
414 | raise HTTPFound(location=url('changeset_home', | |
415 | repo_name=c.repo_name, revision='tip')) |
|
415 | repo_name=c.repo_name, revision='tip')) | |
416 |
|
416 | |||
417 | return render('files/files_edit.html') |
|
417 | return render('files/files_edit.html') | |
418 |
|
418 | |||
419 | @LoginRequired() |
|
419 | @LoginRequired() | |
420 | @HasRepoPermissionLevelDecorator('write') |
|
420 | @HasRepoPermissionLevelDecorator('write') | |
421 | def add(self, repo_name, revision, f_path): |
|
421 | def add(self, repo_name, revision, f_path): | |
422 |
|
422 | |||
423 | repo = c.db_repo |
|
423 | repo = c.db_repo | |
424 | if repo.enable_locking and repo.locked[0]: |
|
424 | if repo.enable_locking and repo.locked[0]: | |
425 | h.flash(_('This repository has been locked by %s on %s') |
|
425 | h.flash(_('This repository has been locked by %s on %s') | |
426 | % (h.person_by_id(repo.locked[0]), |
|
426 | % (h.person_by_id(repo.locked[0]), | |
427 | h.fmt_date(h.time_to_datetime(repo.locked[1]))), |
|
427 | h.fmt_date(h.time_to_datetime(repo.locked[1]))), | |
428 | 'warning') |
|
428 | 'warning') | |
429 | raise HTTPFound(location=h.url('files_home', |
|
429 | raise HTTPFound(location=h.url('files_home', | |
430 | repo_name=repo_name, revision='tip')) |
|
430 | repo_name=repo_name, revision='tip')) | |
431 |
|
431 | |||
432 | r_post = request.POST |
|
432 | r_post = request.POST | |
433 | c.cs = self.__get_cs(revision, silent_empty=True) |
|
433 | c.cs = self.__get_cs(revision, silent_empty=True) | |
434 | if c.cs is None: |
|
434 | if c.cs is None: | |
435 | c.cs = EmptyChangeset(alias=c.db_repo_scm_instance.alias) |
|
435 | c.cs = EmptyChangeset(alias=c.db_repo_scm_instance.alias) | |
436 | c.default_message = (_('Added file via Kallithea')) |
|
436 | c.default_message = (_('Added file via Kallithea')) | |
437 | c.f_path = f_path |
|
437 | c.f_path = f_path | |
438 |
|
438 | |||
439 | if r_post: |
|
439 | if r_post: | |
440 | unix_mode = 0 |
|
440 | unix_mode = 0 | |
441 | content = convert_line_endings(r_post.get('content', ''), unix_mode) |
|
441 | content = convert_line_endings(r_post.get('content', ''), unix_mode) | |
442 |
|
442 | |||
443 | message = r_post.get('message') or c.default_message |
|
443 | message = r_post.get('message') or c.default_message | |
444 | filename = r_post.get('filename') |
|
444 | filename = r_post.get('filename') | |
445 | location = r_post.get('location', '') |
|
445 | location = r_post.get('location', '') | |
446 | file_obj = r_post.get('upload_file', None) |
|
446 | file_obj = r_post.get('upload_file', None) | |
447 |
|
447 | |||
448 | if file_obj is not None and hasattr(file_obj, 'filename'): |
|
448 | if file_obj is not None and hasattr(file_obj, 'filename'): | |
449 | filename = file_obj.filename |
|
449 | filename = file_obj.filename | |
450 | content = file_obj.file |
|
450 | content = file_obj.file | |
451 |
|
451 | |||
452 | if hasattr(content, 'file'): |
|
452 | if hasattr(content, 'file'): | |
453 | # non posix systems store real file under file attr |
|
453 | # non posix systems store real file under file attr | |
454 | content = content.file |
|
454 | content = content.file | |
455 |
|
455 | |||
456 | if not content: |
|
456 | if not content: | |
457 | h.flash(_('No content'), category='warning') |
|
457 | h.flash(_('No content'), category='warning') | |
458 | raise HTTPFound(location=url('changeset_home', repo_name=c.repo_name, |
|
458 | raise HTTPFound(location=url('changeset_home', repo_name=c.repo_name, | |
459 | revision='tip')) |
|
459 | revision='tip')) | |
460 | if not filename: |
|
460 | if not filename: | |
461 | h.flash(_('No filename'), category='warning') |
|
461 | h.flash(_('No filename'), category='warning') | |
462 | raise HTTPFound(location=url('changeset_home', repo_name=c.repo_name, |
|
462 | raise HTTPFound(location=url('changeset_home', repo_name=c.repo_name, | |
463 | revision='tip')) |
|
463 | revision='tip')) | |
464 | # strip all crap out of file, just leave the basename |
|
464 | # strip all crap out of file, just leave the basename | |
465 | filename = os.path.basename(filename) |
|
465 | filename = os.path.basename(filename) | |
466 | node_path = posixpath.join(location, filename) |
|
466 | node_path = posixpath.join(location, filename) | |
467 | author = request.authuser.full_contact |
|
467 | author = request.authuser.full_contact | |
468 |
|
468 | |||
469 | try: |
|
469 | try: | |
470 | nodes = { |
|
470 | nodes = { | |
471 | node_path: { |
|
471 | node_path: { | |
472 | 'content': content |
|
472 | 'content': content | |
473 | } |
|
473 | } | |
474 | } |
|
474 | } | |
475 | self.scm_model.create_nodes( |
|
475 | self.scm_model.create_nodes( | |
476 | user=request.authuser.user_id, repo=c.db_repo, |
|
476 | user=request.authuser.user_id, repo=c.db_repo, | |
477 | message=message, |
|
477 | message=message, | |
478 | nodes=nodes, |
|
478 | nodes=nodes, | |
479 | parent_cs=c.cs, |
|
479 | parent_cs=c.cs, | |
480 | author=author, |
|
480 | author=author, | |
481 | ) |
|
481 | ) | |
482 |
|
482 | |||
483 | h.flash(_('Successfully committed to %s') % node_path, |
|
483 | h.flash(_('Successfully committed to %s') % node_path, | |
484 | category='success') |
|
484 | category='success') | |
485 | except NonRelativePathError as e: |
|
485 | except NonRelativePathError as e: | |
486 | h.flash(_('Location must be relative path and must not ' |
|
486 | h.flash(_('Location must be relative path and must not ' | |
487 | 'contain .. in path'), category='warning') |
|
487 | 'contain .. in path'), category='warning') | |
488 | raise HTTPFound(location=url('changeset_home', repo_name=c.repo_name, |
|
488 | raise HTTPFound(location=url('changeset_home', repo_name=c.repo_name, | |
489 | revision='tip')) |
|
489 | revision='tip')) | |
490 | except (NodeError, NodeAlreadyExistsError) as e: |
|
490 | except (NodeError, NodeAlreadyExistsError) as e: | |
491 | h.flash(_(e), category='error') |
|
491 | h.flash(_(e), category='error') | |
492 | except Exception: |
|
492 | except Exception: | |
493 | log.error(traceback.format_exc()) |
|
493 | log.error(traceback.format_exc()) | |
494 | h.flash(_('Error occurred during commit'), category='error') |
|
494 | h.flash(_('Error occurred during commit'), category='error') | |
495 | raise HTTPFound(location=url('changeset_home', |
|
495 | raise HTTPFound(location=url('changeset_home', | |
496 | repo_name=c.repo_name, revision='tip')) |
|
496 | repo_name=c.repo_name, revision='tip')) | |
497 |
|
497 | |||
498 | return render('files/files_add.html') |
|
498 | return render('files/files_add.html') | |
499 |
|
499 | |||
500 | @LoginRequired() |
|
500 | @LoginRequired() | |
501 | @HasRepoPermissionLevelDecorator('read') |
|
501 | @HasRepoPermissionLevelDecorator('read') | |
502 | def archivefile(self, repo_name, fname): |
|
502 | def archivefile(self, repo_name, fname): | |
503 | fileformat = None |
|
503 | fileformat = None | |
504 | revision = None |
|
504 | revision = None | |
505 | ext = None |
|
505 | ext = None | |
506 | subrepos = request.GET.get('subrepos') == 'true' |
|
506 | subrepos = request.GET.get('subrepos') == 'true' | |
507 |
|
507 | |||
508 | for a_type, ext_data in settings.ARCHIVE_SPECS.items(): |
|
508 | for a_type, ext_data in settings.ARCHIVE_SPECS.items(): | |
509 | archive_spec = fname.split(ext_data[1]) |
|
509 | archive_spec = fname.split(ext_data[1]) | |
510 | if len(archive_spec) == 2 and archive_spec[1] == '': |
|
510 | if len(archive_spec) == 2 and archive_spec[1] == '': | |
511 | fileformat = a_type or ext_data[1] |
|
511 | fileformat = a_type or ext_data[1] | |
512 | revision = archive_spec[0] |
|
512 | revision = archive_spec[0] | |
513 | ext = ext_data[1] |
|
513 | ext = ext_data[1] | |
514 |
|
514 | |||
515 | try: |
|
515 | try: | |
516 | dbrepo = RepoModel().get_by_repo_name(repo_name) |
|
516 | dbrepo = RepoModel().get_by_repo_name(repo_name) | |
517 | if not dbrepo.enable_downloads: |
|
517 | if not dbrepo.enable_downloads: | |
518 | return _('Downloads disabled') # TODO: do something else? |
|
518 | return _('Downloads disabled') # TODO: do something else? | |
519 |
|
519 | |||
520 | if c.db_repo_scm_instance.alias == 'hg': |
|
520 | if c.db_repo_scm_instance.alias == 'hg': | |
521 | # patch and reset hooks section of UI config to not run any |
|
521 | # patch and reset hooks section of UI config to not run any | |
522 | # hooks on fetching archives with subrepos |
|
522 | # hooks on fetching archives with subrepos | |
523 | for k, v in c.db_repo_scm_instance._repo.ui.configitems('hooks'): |
|
523 | for k, v in c.db_repo_scm_instance._repo.ui.configitems('hooks'): | |
524 | c.db_repo_scm_instance._repo.ui.setconfig('hooks', k, None) |
|
524 | c.db_repo_scm_instance._repo.ui.setconfig('hooks', k, None) | |
525 |
|
525 | |||
526 | cs = c.db_repo_scm_instance.get_changeset(revision) |
|
526 | cs = c.db_repo_scm_instance.get_changeset(revision) | |
527 | content_type = settings.ARCHIVE_SPECS[fileformat][0] |
|
527 | content_type = settings.ARCHIVE_SPECS[fileformat][0] | |
528 | except ChangesetDoesNotExistError: |
|
528 | except ChangesetDoesNotExistError: | |
529 | return _('Unknown revision %s') % revision |
|
529 | return _('Unknown revision %s') % revision | |
530 | except EmptyRepositoryError: |
|
530 | except EmptyRepositoryError: | |
531 | return _('Empty repository') |
|
531 | return _('Empty repository') | |
532 | except (ImproperArchiveTypeError, KeyError): |
|
532 | except (ImproperArchiveTypeError, KeyError): | |
533 | return _('Unknown archive type') |
|
533 | return _('Unknown archive type') | |
534 |
|
534 | |||
535 | from kallithea import CONFIG |
|
535 | from kallithea import CONFIG | |
536 | rev_name = cs.raw_id[:12] |
|
536 | rev_name = cs.raw_id[:12] | |
537 | archive_name = '%s-%s%s' % (safe_str(repo_name.replace('/', '_')), |
|
537 | archive_name = '%s-%s%s' % (safe_str(repo_name.replace('/', '_')), | |
538 | safe_str(rev_name), ext) |
|
538 | safe_str(rev_name), ext) | |
539 |
|
539 | |||
540 | archive_path = None |
|
540 | archive_path = None | |
541 | cached_archive_path = None |
|
541 | cached_archive_path = None | |
542 | archive_cache_dir = CONFIG.get('archive_cache_dir') |
|
542 | archive_cache_dir = CONFIG.get('archive_cache_dir') | |
543 | if archive_cache_dir and not subrepos: # TODO: subrepo caching? |
|
543 | if archive_cache_dir and not subrepos: # TODO: subrepo caching? | |
544 | if not os.path.isdir(archive_cache_dir): |
|
544 | if not os.path.isdir(archive_cache_dir): | |
545 | os.makedirs(archive_cache_dir) |
|
545 | os.makedirs(archive_cache_dir) | |
546 | cached_archive_path = os.path.join(archive_cache_dir, archive_name) |
|
546 | cached_archive_path = os.path.join(archive_cache_dir, archive_name) | |
547 | if os.path.isfile(cached_archive_path): |
|
547 | if os.path.isfile(cached_archive_path): | |
548 | log.debug('Found cached archive in %s', cached_archive_path) |
|
548 | log.debug('Found cached archive in %s', cached_archive_path) | |
549 | archive_path = cached_archive_path |
|
549 | archive_path = cached_archive_path | |
550 | else: |
|
550 | else: | |
551 | log.debug('Archive %s is not yet cached', archive_name) |
|
551 | log.debug('Archive %s is not yet cached', archive_name) | |
552 |
|
552 | |||
553 | if archive_path is None: |
|
553 | if archive_path is None: | |
554 | # generate new archive |
|
554 | # generate new archive | |
555 | fd, archive_path = tempfile.mkstemp() |
|
555 | fd, archive_path = tempfile.mkstemp() | |
556 | log.debug('Creating new temp archive in %s', archive_path) |
|
556 | log.debug('Creating new temp archive in %s', archive_path) | |
557 | with os.fdopen(fd, 'wb') as stream: |
|
557 | with os.fdopen(fd, 'wb') as stream: | |
558 | cs.fill_archive(stream=stream, kind=fileformat, subrepos=subrepos) |
|
558 | cs.fill_archive(stream=stream, kind=fileformat, subrepos=subrepos) | |
559 | # stream (and thus fd) has been closed by cs.fill_archive |
|
559 | # stream (and thus fd) has been closed by cs.fill_archive | |
560 | if cached_archive_path is not None: |
|
560 | if cached_archive_path is not None: | |
561 | # we generated the archive - move it to cache |
|
561 | # we generated the archive - move it to cache | |
562 | log.debug('Storing new archive in %s', cached_archive_path) |
|
562 | log.debug('Storing new archive in %s', cached_archive_path) | |
563 | shutil.move(archive_path, cached_archive_path) |
|
563 | shutil.move(archive_path, cached_archive_path) | |
564 | archive_path = cached_archive_path |
|
564 | archive_path = cached_archive_path | |
565 |
|
565 | |||
566 | def get_chunked_archive(archive_path): |
|
566 | def get_chunked_archive(archive_path): | |
567 | stream = open(archive_path, 'rb') |
|
567 | stream = open(archive_path, 'rb') | |
568 | while True: |
|
568 | while True: | |
569 | data = stream.read(16 * 1024) |
|
569 | data = stream.read(16 * 1024) | |
570 | if not data: |
|
570 | if not data: | |
571 | break |
|
571 | break | |
572 | yield data |
|
572 | yield data | |
573 | stream.close() |
|
573 | stream.close() | |
574 | if archive_path != cached_archive_path: |
|
574 | if archive_path != cached_archive_path: | |
575 | log.debug('Destroying temp archive %s', archive_path) |
|
575 | log.debug('Destroying temp archive %s', archive_path) | |
576 | os.remove(archive_path) |
|
576 | os.remove(archive_path) | |
577 |
|
577 | |||
578 | action_logger(user=request.authuser, |
|
578 | action_logger(user=request.authuser, | |
579 | action='user_downloaded_archive:%s' % (archive_name), |
|
579 | action='user_downloaded_archive:%s' % (archive_name), | |
580 | repo=repo_name, ipaddr=request.ip_addr, commit=True) |
|
580 | repo=repo_name, ipaddr=request.ip_addr, commit=True) | |
581 |
|
581 | |||
582 | response.content_disposition = str('attachment; filename=%s' % (archive_name)) |
|
582 | response.content_disposition = str('attachment; filename=%s' % (archive_name)) | |
583 | response.content_type = str(content_type) |
|
583 | response.content_type = str(content_type) | |
584 | return get_chunked_archive(archive_path) |
|
584 | return get_chunked_archive(archive_path) | |
585 |
|
585 | |||
586 | @LoginRequired() |
|
586 | @LoginRequired() | |
587 | @HasRepoPermissionLevelDecorator('read') |
|
587 | @HasRepoPermissionLevelDecorator('read') | |
588 | def diff(self, repo_name, f_path): |
|
588 | def diff(self, repo_name, f_path): | |
589 | ignore_whitespace = request.GET.get('ignorews') == '1' |
|
589 | ignore_whitespace = request.GET.get('ignorews') == '1' | |
590 | line_context = safe_int(request.GET.get('context'), 3) |
|
590 | line_context = safe_int(request.GET.get('context'), 3) | |
591 | diff2 = request.GET.get('diff2', '') |
|
591 | diff2 = request.GET.get('diff2', '') | |
592 | diff1 = request.GET.get('diff1', '') or diff2 |
|
592 | diff1 = request.GET.get('diff1', '') or diff2 | |
593 | c.action = request.GET.get('diff') |
|
593 | c.action = request.GET.get('diff') | |
594 | c.no_changes = diff1 == diff2 |
|
594 | c.no_changes = diff1 == diff2 | |
595 | c.f_path = f_path |
|
595 | c.f_path = f_path | |
596 | c.big_diff = False |
|
596 | c.big_diff = False | |
597 | fulldiff = request.GET.get('fulldiff') |
|
597 | fulldiff = request.GET.get('fulldiff') | |
598 | c.anchor_url = anchor_url |
|
598 | c.anchor_url = anchor_url | |
599 | c.ignorews_url = _ignorews_url |
|
599 | c.ignorews_url = _ignorews_url | |
600 | c.context_url = _context_url |
|
600 | c.context_url = _context_url | |
601 | c.changes = OrderedDict() |
|
601 | c.changes = OrderedDict() | |
602 | c.changes[diff2] = [] |
|
602 | c.changes[diff2] = [] | |
603 |
|
603 | |||
604 | # special case if we want a show rev only, it's impl here |
|
604 | # special case if we want a show rev only, it's impl here | |
605 | # to reduce JS and callbacks |
|
605 | # to reduce JS and callbacks | |
606 |
|
606 | |||
607 | if request.GET.get('show_rev'): |
|
607 | if request.GET.get('show_rev'): | |
608 | if str2bool(request.GET.get('annotate', 'False')): |
|
608 | if str2bool(request.GET.get('annotate', 'False')): | |
609 | _url = url('files_annotate_home', repo_name=c.repo_name, |
|
609 | _url = url('files_annotate_home', repo_name=c.repo_name, | |
610 | revision=diff1, f_path=c.f_path) |
|
610 | revision=diff1, f_path=c.f_path) | |
611 | else: |
|
611 | else: | |
612 | _url = url('files_home', repo_name=c.repo_name, |
|
612 | _url = url('files_home', repo_name=c.repo_name, | |
613 | revision=diff1, f_path=c.f_path) |
|
613 | revision=diff1, f_path=c.f_path) | |
614 |
|
614 | |||
615 | raise HTTPFound(location=_url) |
|
615 | raise HTTPFound(location=_url) | |
616 | try: |
|
616 | try: | |
617 | if diff1 not in ['', None, 'None', '0' * 12, '0' * 40]: |
|
617 | if diff1 not in ['', None, 'None', '0' * 12, '0' * 40]: | |
618 | c.changeset_1 = c.db_repo_scm_instance.get_changeset(diff1) |
|
618 | c.changeset_1 = c.db_repo_scm_instance.get_changeset(diff1) | |
619 | try: |
|
619 | try: | |
620 | node1 = c.changeset_1.get_node(f_path) |
|
620 | node1 = c.changeset_1.get_node(f_path) | |
621 | if node1.is_dir(): |
|
621 | if node1.is_dir(): | |
622 | raise NodeError('%s path is a %s not a file' |
|
622 | raise NodeError('%s path is a %s not a file' | |
623 | % (node1, type(node1))) |
|
623 | % (node1, type(node1))) | |
624 | except NodeDoesNotExistError: |
|
624 | except NodeDoesNotExistError: | |
625 | c.changeset_1 = EmptyChangeset(cs=diff1, |
|
625 | c.changeset_1 = EmptyChangeset(cs=diff1, | |
626 | revision=c.changeset_1.revision, |
|
626 | revision=c.changeset_1.revision, | |
627 | repo=c.db_repo_scm_instance) |
|
627 | repo=c.db_repo_scm_instance) | |
628 | node1 = FileNode(f_path, '', changeset=c.changeset_1) |
|
628 | node1 = FileNode(f_path, '', changeset=c.changeset_1) | |
629 | else: |
|
629 | else: | |
630 | c.changeset_1 = EmptyChangeset(repo=c.db_repo_scm_instance) |
|
630 | c.changeset_1 = EmptyChangeset(repo=c.db_repo_scm_instance) | |
631 | node1 = FileNode(f_path, '', changeset=c.changeset_1) |
|
631 | node1 = FileNode(f_path, '', changeset=c.changeset_1) | |
632 |
|
632 | |||
633 | if diff2 not in ['', None, 'None', '0' * 12, '0' * 40]: |
|
633 | if diff2 not in ['', None, 'None', '0' * 12, '0' * 40]: | |
634 | c.changeset_2 = c.db_repo_scm_instance.get_changeset(diff2) |
|
634 | c.changeset_2 = c.db_repo_scm_instance.get_changeset(diff2) | |
635 | try: |
|
635 | try: | |
636 | node2 = c.changeset_2.get_node(f_path) |
|
636 | node2 = c.changeset_2.get_node(f_path) | |
637 | if node2.is_dir(): |
|
637 | if node2.is_dir(): | |
638 | raise NodeError('%s path is a %s not a file' |
|
638 | raise NodeError('%s path is a %s not a file' | |
639 | % (node2, type(node2))) |
|
639 | % (node2, type(node2))) | |
640 | except NodeDoesNotExistError: |
|
640 | except NodeDoesNotExistError: | |
641 | c.changeset_2 = EmptyChangeset(cs=diff2, |
|
641 | c.changeset_2 = EmptyChangeset(cs=diff2, | |
642 | revision=c.changeset_2.revision, |
|
642 | revision=c.changeset_2.revision, | |
643 | repo=c.db_repo_scm_instance) |
|
643 | repo=c.db_repo_scm_instance) | |
644 | node2 = FileNode(f_path, '', changeset=c.changeset_2) |
|
644 | node2 = FileNode(f_path, '', changeset=c.changeset_2) | |
645 | else: |
|
645 | else: | |
646 | c.changeset_2 = EmptyChangeset(repo=c.db_repo_scm_instance) |
|
646 | c.changeset_2 = EmptyChangeset(repo=c.db_repo_scm_instance) | |
647 | node2 = FileNode(f_path, '', changeset=c.changeset_2) |
|
647 | node2 = FileNode(f_path, '', changeset=c.changeset_2) | |
648 | except (RepositoryError, NodeError): |
|
648 | except (RepositoryError, NodeError): | |
649 | log.error(traceback.format_exc()) |
|
649 | log.error(traceback.format_exc()) | |
650 | raise HTTPFound(location=url('files_home', repo_name=c.repo_name, |
|
650 | raise HTTPFound(location=url('files_home', repo_name=c.repo_name, | |
651 | f_path=f_path)) |
|
651 | f_path=f_path)) | |
652 |
|
652 | |||
653 | if c.action == 'download': |
|
653 | if c.action == 'download': | |
654 | _diff = diffs.get_gitdiff(node1, node2, |
|
654 | raw_diff = diffs.get_gitdiff(node1, node2, | |
655 | ignore_whitespace=ignore_whitespace, |
|
655 | ignore_whitespace=ignore_whitespace, | |
656 | context=line_context) |
|
656 | context=line_context) | |
657 | diff = diffs.DiffProcessor(_diff) |
|
|||
658 |
|
||||
659 | diff_name = '%s_vs_%s.diff' % (diff1, diff2) |
|
657 | diff_name = '%s_vs_%s.diff' % (diff1, diff2) | |
660 | response.content_type = 'text/plain' |
|
658 | response.content_type = 'text/plain' | |
661 | response.content_disposition = ( |
|
659 | response.content_disposition = ( | |
662 | 'attachment; filename=%s' % diff_name |
|
660 | 'attachment; filename=%s' % diff_name | |
663 | ) |
|
661 | ) | |
664 |
return diff |
|
662 | return raw_diff | |
665 |
|
663 | |||
666 | elif c.action == 'raw': |
|
664 | elif c.action == 'raw': | |
667 | _diff = diffs.get_gitdiff(node1, node2, |
|
665 | raw_diff = diffs.get_gitdiff(node1, node2, | |
668 | ignore_whitespace=ignore_whitespace, |
|
666 | ignore_whitespace=ignore_whitespace, | |
669 | context=line_context) |
|
667 | context=line_context) | |
670 | diff = diffs.DiffProcessor(_diff) |
|
|||
671 | response.content_type = 'text/plain' |
|
668 | response.content_type = 'text/plain' | |
672 |
return diff |
|
669 | return raw_diff | |
673 |
|
670 | |||
674 | else: |
|
671 | else: | |
675 | fid = h.FID(diff2, node2.path) |
|
672 | fid = h.FID(diff2, node2.path) | |
676 | line_context_lcl = get_line_ctx(fid, request.GET) |
|
673 | line_context_lcl = get_line_ctx(fid, request.GET) | |
677 | ign_whitespace_lcl = get_ignore_ws(fid, request.GET) |
|
674 | ign_whitespace_lcl = get_ignore_ws(fid, request.GET) | |
678 |
|
675 | |||
679 | diff_limit = None if fulldiff else self.cut_off_limit |
|
676 | diff_limit = None if fulldiff else self.cut_off_limit | |
680 | c.a_rev, c.cs_rev, a_path, diff, st, op = diffs.wrapped_diff(filenode_old=node1, |
|
677 | c.a_rev, c.cs_rev, a_path, diff, st, op = diffs.wrapped_diff(filenode_old=node1, | |
681 | filenode_new=node2, |
|
678 | filenode_new=node2, | |
682 | diff_limit=diff_limit, |
|
679 | diff_limit=diff_limit, | |
683 | ignore_whitespace=ign_whitespace_lcl, |
|
680 | ignore_whitespace=ign_whitespace_lcl, | |
684 | line_context=line_context_lcl, |
|
681 | line_context=line_context_lcl, | |
685 | enable_comments=False) |
|
682 | enable_comments=False) | |
686 | c.file_diff_data = [(fid, fid, op, a_path, node2.path, diff, st)] |
|
683 | c.file_diff_data = [(fid, fid, op, a_path, node2.path, diff, st)] | |
687 |
|
684 | |||
688 | return render('files/file_diff.html') |
|
685 | return render('files/file_diff.html') | |
689 |
|
686 | |||
690 | @LoginRequired() |
|
687 | @LoginRequired() | |
691 | @HasRepoPermissionLevelDecorator('read') |
|
688 | @HasRepoPermissionLevelDecorator('read') | |
692 | def diff_2way(self, repo_name, f_path): |
|
689 | def diff_2way(self, repo_name, f_path): | |
693 | diff1 = request.GET.get('diff1', '') |
|
690 | diff1 = request.GET.get('diff1', '') | |
694 | diff2 = request.GET.get('diff2', '') |
|
691 | diff2 = request.GET.get('diff2', '') | |
695 | try: |
|
692 | try: | |
696 | if diff1 not in ['', None, 'None', '0' * 12, '0' * 40]: |
|
693 | if diff1 not in ['', None, 'None', '0' * 12, '0' * 40]: | |
697 | c.changeset_1 = c.db_repo_scm_instance.get_changeset(diff1) |
|
694 | c.changeset_1 = c.db_repo_scm_instance.get_changeset(diff1) | |
698 | try: |
|
695 | try: | |
699 | node1 = c.changeset_1.get_node(f_path) |
|
696 | node1 = c.changeset_1.get_node(f_path) | |
700 | if node1.is_dir(): |
|
697 | if node1.is_dir(): | |
701 | raise NodeError('%s path is a %s not a file' |
|
698 | raise NodeError('%s path is a %s not a file' | |
702 | % (node1, type(node1))) |
|
699 | % (node1, type(node1))) | |
703 | except NodeDoesNotExistError: |
|
700 | except NodeDoesNotExistError: | |
704 | c.changeset_1 = EmptyChangeset(cs=diff1, |
|
701 | c.changeset_1 = EmptyChangeset(cs=diff1, | |
705 | revision=c.changeset_1.revision, |
|
702 | revision=c.changeset_1.revision, | |
706 | repo=c.db_repo_scm_instance) |
|
703 | repo=c.db_repo_scm_instance) | |
707 | node1 = FileNode(f_path, '', changeset=c.changeset_1) |
|
704 | node1 = FileNode(f_path, '', changeset=c.changeset_1) | |
708 | else: |
|
705 | else: | |
709 | c.changeset_1 = EmptyChangeset(repo=c.db_repo_scm_instance) |
|
706 | c.changeset_1 = EmptyChangeset(repo=c.db_repo_scm_instance) | |
710 | node1 = FileNode(f_path, '', changeset=c.changeset_1) |
|
707 | node1 = FileNode(f_path, '', changeset=c.changeset_1) | |
711 |
|
708 | |||
712 | if diff2 not in ['', None, 'None', '0' * 12, '0' * 40]: |
|
709 | if diff2 not in ['', None, 'None', '0' * 12, '0' * 40]: | |
713 | c.changeset_2 = c.db_repo_scm_instance.get_changeset(diff2) |
|
710 | c.changeset_2 = c.db_repo_scm_instance.get_changeset(diff2) | |
714 | try: |
|
711 | try: | |
715 | node2 = c.changeset_2.get_node(f_path) |
|
712 | node2 = c.changeset_2.get_node(f_path) | |
716 | if node2.is_dir(): |
|
713 | if node2.is_dir(): | |
717 | raise NodeError('%s path is a %s not a file' |
|
714 | raise NodeError('%s path is a %s not a file' | |
718 | % (node2, type(node2))) |
|
715 | % (node2, type(node2))) | |
719 | except NodeDoesNotExistError: |
|
716 | except NodeDoesNotExistError: | |
720 | c.changeset_2 = EmptyChangeset(cs=diff2, |
|
717 | c.changeset_2 = EmptyChangeset(cs=diff2, | |
721 | revision=c.changeset_2.revision, |
|
718 | revision=c.changeset_2.revision, | |
722 | repo=c.db_repo_scm_instance) |
|
719 | repo=c.db_repo_scm_instance) | |
723 | node2 = FileNode(f_path, '', changeset=c.changeset_2) |
|
720 | node2 = FileNode(f_path, '', changeset=c.changeset_2) | |
724 | else: |
|
721 | else: | |
725 | c.changeset_2 = EmptyChangeset(repo=c.db_repo_scm_instance) |
|
722 | c.changeset_2 = EmptyChangeset(repo=c.db_repo_scm_instance) | |
726 | node2 = FileNode(f_path, '', changeset=c.changeset_2) |
|
723 | node2 = FileNode(f_path, '', changeset=c.changeset_2) | |
727 | except ChangesetDoesNotExistError as e: |
|
724 | except ChangesetDoesNotExistError as e: | |
728 | msg = _('Such revision does not exist for this repository') |
|
725 | msg = _('Such revision does not exist for this repository') | |
729 | h.flash(msg, category='error') |
|
726 | h.flash(msg, category='error') | |
730 | raise HTTPNotFound() |
|
727 | raise HTTPNotFound() | |
731 | c.node1 = node1 |
|
728 | c.node1 = node1 | |
732 | c.node2 = node2 |
|
729 | c.node2 = node2 | |
733 | c.cs1 = c.changeset_1 |
|
730 | c.cs1 = c.changeset_1 | |
734 | c.cs2 = c.changeset_2 |
|
731 | c.cs2 = c.changeset_2 | |
735 |
|
732 | |||
736 | return render('files/diff_2way.html') |
|
733 | return render('files/diff_2way.html') | |
737 |
|
734 | |||
738 | def _get_node_history(self, cs, f_path, changesets=None): |
|
735 | def _get_node_history(self, cs, f_path, changesets=None): | |
739 | """ |
|
736 | """ | |
740 | get changesets history for given node |
|
737 | get changesets history for given node | |
741 |
|
738 | |||
742 | :param cs: changeset to calculate history |
|
739 | :param cs: changeset to calculate history | |
743 | :param f_path: path for node to calculate history for |
|
740 | :param f_path: path for node to calculate history for | |
744 | :param changesets: if passed don't calculate history and take |
|
741 | :param changesets: if passed don't calculate history and take | |
745 | changesets defined in this list |
|
742 | changesets defined in this list | |
746 | """ |
|
743 | """ | |
747 | # calculate history based on tip |
|
744 | # calculate history based on tip | |
748 | tip_cs = c.db_repo_scm_instance.get_changeset() |
|
745 | tip_cs = c.db_repo_scm_instance.get_changeset() | |
749 | if changesets is None: |
|
746 | if changesets is None: | |
750 | try: |
|
747 | try: | |
751 | changesets = tip_cs.get_file_history(f_path) |
|
748 | changesets = tip_cs.get_file_history(f_path) | |
752 | except (NodeDoesNotExistError, ChangesetError): |
|
749 | except (NodeDoesNotExistError, ChangesetError): | |
753 | # this node is not present at tip ! |
|
750 | # this node is not present at tip ! | |
754 | changesets = cs.get_file_history(f_path) |
|
751 | changesets = cs.get_file_history(f_path) | |
755 | hist_l = [] |
|
752 | hist_l = [] | |
756 |
|
753 | |||
757 | changesets_group = ([], _("Changesets")) |
|
754 | changesets_group = ([], _("Changesets")) | |
758 | branches_group = ([], _("Branches")) |
|
755 | branches_group = ([], _("Branches")) | |
759 | tags_group = ([], _("Tags")) |
|
756 | tags_group = ([], _("Tags")) | |
760 | for chs in changesets: |
|
757 | for chs in changesets: | |
761 | #_branch = '(%s)' % chs.branch if (cs.repository.alias == 'hg') else '' |
|
758 | #_branch = '(%s)' % chs.branch if (cs.repository.alias == 'hg') else '' | |
762 | _branch = chs.branch |
|
759 | _branch = chs.branch | |
763 | n_desc = '%s (%s)' % (h.show_id(chs), _branch) |
|
760 | n_desc = '%s (%s)' % (h.show_id(chs), _branch) | |
764 | changesets_group[0].append((chs.raw_id, n_desc,)) |
|
761 | changesets_group[0].append((chs.raw_id, n_desc,)) | |
765 | hist_l.append(changesets_group) |
|
762 | hist_l.append(changesets_group) | |
766 |
|
763 | |||
767 | for name, chs in c.db_repo_scm_instance.branches.items(): |
|
764 | for name, chs in c.db_repo_scm_instance.branches.items(): | |
768 | branches_group[0].append((chs, name),) |
|
765 | branches_group[0].append((chs, name),) | |
769 | hist_l.append(branches_group) |
|
766 | hist_l.append(branches_group) | |
770 |
|
767 | |||
771 | for name, chs in c.db_repo_scm_instance.tags.items(): |
|
768 | for name, chs in c.db_repo_scm_instance.tags.items(): | |
772 | tags_group[0].append((chs, name),) |
|
769 | tags_group[0].append((chs, name),) | |
773 | hist_l.append(tags_group) |
|
770 | hist_l.append(tags_group) | |
774 |
|
771 | |||
775 | return hist_l, changesets |
|
772 | return hist_l, changesets | |
776 |
|
773 | |||
777 | @LoginRequired() |
|
774 | @LoginRequired() | |
778 | @HasRepoPermissionLevelDecorator('read') |
|
775 | @HasRepoPermissionLevelDecorator('read') | |
779 | @jsonify |
|
776 | @jsonify | |
780 | def nodelist(self, repo_name, revision, f_path): |
|
777 | def nodelist(self, repo_name, revision, f_path): | |
781 | if request.environ.get('HTTP_X_PARTIAL_XHR'): |
|
778 | if request.environ.get('HTTP_X_PARTIAL_XHR'): | |
782 | cs = self.__get_cs(revision) |
|
779 | cs = self.__get_cs(revision) | |
783 | _d, _f = ScmModel().get_nodes(repo_name, cs.raw_id, f_path, |
|
780 | _d, _f = ScmModel().get_nodes(repo_name, cs.raw_id, f_path, | |
784 | flat=False) |
|
781 | flat=False) | |
785 | return {'nodes': _d + _f} |
|
782 | return {'nodes': _d + _f} |
@@ -1,713 +1,707 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 | # This program is free software: you can redistribute it and/or modify |
|
2 | # This program is free software: you can redistribute it and/or modify | |
3 | # it under the terms of the GNU General Public License as published by |
|
3 | # it under the terms of the GNU General Public License as published by | |
4 | # the Free Software Foundation, either version 3 of the License, or |
|
4 | # the Free Software Foundation, either version 3 of the License, or | |
5 | # (at your option) any later version. |
|
5 | # (at your option) any later version. | |
6 | # |
|
6 | # | |
7 | # This program is distributed in the hope that it will be useful, |
|
7 | # This program is distributed in the hope that it will be useful, | |
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | # GNU General Public License for more details. |
|
10 | # GNU General Public License for more details. | |
11 | # |
|
11 | # | |
12 | # You should have received a copy of the GNU General Public License |
|
12 | # You should have received a copy of the GNU General Public License | |
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
14 | """ |
|
14 | """ | |
15 | kallithea.lib.diffs |
|
15 | kallithea.lib.diffs | |
16 | ~~~~~~~~~~~~~~~~~~~ |
|
16 | ~~~~~~~~~~~~~~~~~~~ | |
17 |
|
17 | |||
18 | Set of diffing helpers, previously part of vcs |
|
18 | Set of diffing helpers, previously part of vcs | |
19 |
|
19 | |||
20 |
|
20 | |||
21 | This file was forked by the Kallithea project in July 2014. |
|
21 | This file was forked by the Kallithea project in July 2014. | |
22 | Original author and date, and relevant copyright and licensing information is below: |
|
22 | Original author and date, and relevant copyright and licensing information is below: | |
23 | :created_on: Dec 4, 2011 |
|
23 | :created_on: Dec 4, 2011 | |
24 | :author: marcink |
|
24 | :author: marcink | |
25 | :copyright: (c) 2013 RhodeCode GmbH, and others. |
|
25 | :copyright: (c) 2013 RhodeCode GmbH, and others. | |
26 | :license: GPLv3, see LICENSE.md for more details. |
|
26 | :license: GPLv3, see LICENSE.md for more details. | |
27 | """ |
|
27 | """ | |
28 | import re |
|
28 | import re | |
29 | import difflib |
|
29 | import difflib | |
30 | import logging |
|
30 | import logging | |
31 |
|
31 | |||
32 | from itertools import imap |
|
32 | from itertools import imap | |
33 |
|
33 | |||
34 | from tg.i18n import ugettext as _ |
|
34 | from tg.i18n import ugettext as _ | |
35 |
|
35 | |||
36 | from kallithea.lib.vcs.exceptions import VCSError |
|
36 | from kallithea.lib.vcs.exceptions import VCSError | |
37 | from kallithea.lib.vcs.nodes import FileNode, SubModuleNode |
|
37 | from kallithea.lib.vcs.nodes import FileNode, SubModuleNode | |
38 | from kallithea.lib.vcs.backends.base import EmptyChangeset |
|
38 | from kallithea.lib.vcs.backends.base import EmptyChangeset | |
39 | from kallithea.lib.helpers import escape |
|
39 | from kallithea.lib.helpers import escape | |
40 | from kallithea.lib.utils2 import safe_unicode |
|
40 | from kallithea.lib.utils2 import safe_unicode | |
41 |
|
41 | |||
42 | log = logging.getLogger(__name__) |
|
42 | log = logging.getLogger(__name__) | |
43 |
|
43 | |||
44 |
|
44 | |||
45 | def wrap_to_table(html): |
|
45 | def wrap_to_table(html): | |
46 | """Given a string with html, return it wrapped in a table, similar to what |
|
46 | """Given a string with html, return it wrapped in a table, similar to what | |
47 | DiffProcessor returns.""" |
|
47 | DiffProcessor returns.""" | |
48 | return '''\ |
|
48 | return '''\ | |
49 | <table class="code-difftable"> |
|
49 | <table class="code-difftable"> | |
50 | <tr class="line no-comment"> |
|
50 | <tr class="line no-comment"> | |
51 | <td class="lineno new"></td> |
|
51 | <td class="lineno new"></td> | |
52 | <td class="code no-comment"><pre>%s</pre></td> |
|
52 | <td class="code no-comment"><pre>%s</pre></td> | |
53 | </tr> |
|
53 | </tr> | |
54 | </table>''' % html |
|
54 | </table>''' % html | |
55 |
|
55 | |||
56 |
|
56 | |||
57 | def wrapped_diff(filenode_old, filenode_new, diff_limit=None, |
|
57 | def wrapped_diff(filenode_old, filenode_new, diff_limit=None, | |
58 | ignore_whitespace=True, line_context=3, |
|
58 | ignore_whitespace=True, line_context=3, | |
59 | enable_comments=False): |
|
59 | enable_comments=False): | |
60 | """ |
|
60 | """ | |
61 | Returns a file diff wrapped into a table. |
|
61 | Returns a file diff wrapped into a table. | |
62 | Checks for diff_limit and presents a message if the diff is too big. |
|
62 | Checks for diff_limit and presents a message if the diff is too big. | |
63 | """ |
|
63 | """ | |
64 | if filenode_old is None: |
|
64 | if filenode_old is None: | |
65 | filenode_old = FileNode(filenode_new.path, '', EmptyChangeset()) |
|
65 | filenode_old = FileNode(filenode_new.path, '', EmptyChangeset()) | |
66 |
|
66 | |||
67 | op = None |
|
67 | op = None | |
68 | a_path = filenode_old.path # default, might be overriden by actual rename in diff |
|
68 | a_path = filenode_old.path # default, might be overriden by actual rename in diff | |
69 | if filenode_old.is_binary or filenode_new.is_binary: |
|
69 | if filenode_old.is_binary or filenode_new.is_binary: | |
70 | diff = wrap_to_table(_('Binary file')) |
|
70 | diff = wrap_to_table(_('Binary file')) | |
71 | stats = (0, 0) |
|
71 | stats = (0, 0) | |
72 |
|
72 | |||
73 | elif diff_limit != -1 and ( |
|
73 | elif diff_limit != -1 and ( | |
74 | diff_limit is None or |
|
74 | diff_limit is None or | |
75 | (filenode_old.size < diff_limit and filenode_new.size < diff_limit)): |
|
75 | (filenode_old.size < diff_limit and filenode_new.size < diff_limit)): | |
76 |
|
76 | |||
77 | f_gitdiff = get_gitdiff(filenode_old, filenode_new, |
|
77 | f_gitdiff = get_gitdiff(filenode_old, filenode_new, | |
78 | ignore_whitespace=ignore_whitespace, |
|
78 | ignore_whitespace=ignore_whitespace, | |
79 | context=line_context) |
|
79 | context=line_context) | |
80 | diff_processor = DiffProcessor(f_gitdiff) |
|
80 | diff_processor = DiffProcessor(f_gitdiff) | |
81 | _parsed = diff_processor.prepare() |
|
81 | _parsed = diff_processor.prepare() | |
82 | if _parsed: # there should be exactly one element, for the specified file |
|
82 | if _parsed: # there should be exactly one element, for the specified file | |
83 | f = _parsed[0] |
|
83 | f = _parsed[0] | |
84 | op = f['operation'] |
|
84 | op = f['operation'] | |
85 | a_path = f['old_filename'] |
|
85 | a_path = f['old_filename'] | |
86 |
|
86 | |||
87 | diff = diff_processor.as_html(enable_comments=enable_comments) |
|
87 | diff = diff_processor.as_html(enable_comments=enable_comments) | |
88 | stats = diff_processor.stat() |
|
88 | stats = diff_processor.stat() | |
89 |
|
89 | |||
90 | else: |
|
90 | else: | |
91 | diff = wrap_to_table(_('Changeset was too big and was cut off, use ' |
|
91 | diff = wrap_to_table(_('Changeset was too big and was cut off, use ' | |
92 | 'diff menu to display this diff')) |
|
92 | 'diff menu to display this diff')) | |
93 | stats = (0, 0) |
|
93 | stats = (0, 0) | |
94 |
|
94 | |||
95 | if not diff: |
|
95 | if not diff: | |
96 | submodules = filter(lambda o: isinstance(o, SubModuleNode), |
|
96 | submodules = filter(lambda o: isinstance(o, SubModuleNode), | |
97 | [filenode_new, filenode_old]) |
|
97 | [filenode_new, filenode_old]) | |
98 | if submodules: |
|
98 | if submodules: | |
99 | diff = wrap_to_table(escape('Submodule %r' % submodules[0])) |
|
99 | diff = wrap_to_table(escape('Submodule %r' % submodules[0])) | |
100 | else: |
|
100 | else: | |
101 | diff = wrap_to_table(_('No changes detected')) |
|
101 | diff = wrap_to_table(_('No changes detected')) | |
102 |
|
102 | |||
103 | cs1 = filenode_old.changeset.raw_id |
|
103 | cs1 = filenode_old.changeset.raw_id | |
104 | cs2 = filenode_new.changeset.raw_id |
|
104 | cs2 = filenode_new.changeset.raw_id | |
105 |
|
105 | |||
106 | return cs1, cs2, a_path, diff, stats, op |
|
106 | return cs1, cs2, a_path, diff, stats, op | |
107 |
|
107 | |||
108 |
|
108 | |||
109 | def get_gitdiff(filenode_old, filenode_new, ignore_whitespace=True, context=3): |
|
109 | def get_gitdiff(filenode_old, filenode_new, ignore_whitespace=True, context=3): | |
110 | """ |
|
110 | """ | |
111 | Returns git style diff between given ``filenode_old`` and ``filenode_new``. |
|
111 | Returns git style diff between given ``filenode_old`` and ``filenode_new``. | |
112 | """ |
|
112 | """ | |
113 | # make sure we pass in default context |
|
113 | # make sure we pass in default context | |
114 | context = context or 3 |
|
114 | context = context or 3 | |
115 | submodules = filter(lambda o: isinstance(o, SubModuleNode), |
|
115 | submodules = filter(lambda o: isinstance(o, SubModuleNode), | |
116 | [filenode_new, filenode_old]) |
|
116 | [filenode_new, filenode_old]) | |
117 | if submodules: |
|
117 | if submodules: | |
118 | return '' |
|
118 | return '' | |
119 |
|
119 | |||
120 | for filenode in (filenode_old, filenode_new): |
|
120 | for filenode in (filenode_old, filenode_new): | |
121 | if not isinstance(filenode, FileNode): |
|
121 | if not isinstance(filenode, FileNode): | |
122 | raise VCSError("Given object should be FileNode object, not %s" |
|
122 | raise VCSError("Given object should be FileNode object, not %s" | |
123 | % filenode.__class__) |
|
123 | % filenode.__class__) | |
124 |
|
124 | |||
125 | repo = filenode_new.changeset.repository |
|
125 | repo = filenode_new.changeset.repository | |
126 | old_raw_id = getattr(filenode_old.changeset, 'raw_id', repo.EMPTY_CHANGESET) |
|
126 | old_raw_id = getattr(filenode_old.changeset, 'raw_id', repo.EMPTY_CHANGESET) | |
127 | new_raw_id = getattr(filenode_new.changeset, 'raw_id', repo.EMPTY_CHANGESET) |
|
127 | new_raw_id = getattr(filenode_new.changeset, 'raw_id', repo.EMPTY_CHANGESET) | |
128 |
|
128 | |||
129 | vcs_gitdiff = repo.get_diff(old_raw_id, new_raw_id, filenode_new.path, |
|
129 | vcs_gitdiff = repo.get_diff(old_raw_id, new_raw_id, filenode_new.path, | |
130 | ignore_whitespace, context) |
|
130 | ignore_whitespace, context) | |
131 | return vcs_gitdiff |
|
131 | return vcs_gitdiff | |
132 |
|
132 | |||
133 |
|
133 | |||
134 | NEW_FILENODE = 1 |
|
134 | NEW_FILENODE = 1 | |
135 | DEL_FILENODE = 2 |
|
135 | DEL_FILENODE = 2 | |
136 | MOD_FILENODE = 3 |
|
136 | MOD_FILENODE = 3 | |
137 | RENAMED_FILENODE = 4 |
|
137 | RENAMED_FILENODE = 4 | |
138 | COPIED_FILENODE = 5 |
|
138 | COPIED_FILENODE = 5 | |
139 | CHMOD_FILENODE = 6 |
|
139 | CHMOD_FILENODE = 6 | |
140 | BIN_FILENODE = 7 |
|
140 | BIN_FILENODE = 7 | |
141 |
|
141 | |||
142 |
|
142 | |||
143 | class DiffLimitExceeded(Exception): |
|
143 | class DiffLimitExceeded(Exception): | |
144 | pass |
|
144 | pass | |
145 |
|
145 | |||
146 |
|
146 | |||
147 | class LimitedDiffContainer(object): |
|
147 | class LimitedDiffContainer(object): | |
148 |
|
148 | |||
149 | def __init__(self, diff_limit, cur_diff_size, diff): |
|
149 | def __init__(self, diff_limit, cur_diff_size, diff): | |
150 | self.diff = diff |
|
150 | self.diff = diff | |
151 | self.diff_limit = diff_limit |
|
151 | self.diff_limit = diff_limit | |
152 | self.cur_diff_size = cur_diff_size |
|
152 | self.cur_diff_size = cur_diff_size | |
153 |
|
153 | |||
154 | def __iter__(self): |
|
154 | def __iter__(self): | |
155 | for l in self.diff: |
|
155 | for l in self.diff: | |
156 | yield l |
|
156 | yield l | |
157 |
|
157 | |||
158 |
|
158 | |||
159 | class DiffProcessor(object): |
|
159 | class DiffProcessor(object): | |
160 | """ |
|
160 | """ | |
161 | Give it a unified or git diff and it returns a list of the files that were |
|
161 | Give it a unified or git diff and it returns a list of the files that were | |
162 | mentioned in the diff together with a dict of meta information that |
|
162 | mentioned in the diff together with a dict of meta information that | |
163 | can be used to render it in a HTML template. |
|
163 | can be used to render it in a HTML template. | |
164 | """ |
|
164 | """ | |
165 | _chunk_re = re.compile(r'^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@(.*)') |
|
165 | _chunk_re = re.compile(r'^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@(.*)') | |
166 | _newline_marker = re.compile(r'^\\ No newline at end of file') |
|
166 | _newline_marker = re.compile(r'^\\ No newline at end of file') | |
167 | _git_header_re = re.compile(r""" |
|
167 | _git_header_re = re.compile(r""" | |
168 | # has already been split on this: |
|
168 | # has already been split on this: | |
169 | # ^diff[ ]--git |
|
169 | # ^diff[ ]--git | |
170 | [ ]a/(?P<a_path>.+?)[ ]b/(?P<b_path>.+?)\n |
|
170 | [ ]a/(?P<a_path>.+?)[ ]b/(?P<b_path>.+?)\n | |
171 | (?:^old[ ]mode[ ](?P<old_mode>\d+)\n |
|
171 | (?:^old[ ]mode[ ](?P<old_mode>\d+)\n | |
172 | ^new[ ]mode[ ](?P<new_mode>\d+)(?:\n|$))? |
|
172 | ^new[ ]mode[ ](?P<new_mode>\d+)(?:\n|$))? | |
173 | (?:^similarity[ ]index[ ](?P<similarity_index>\d+)%\n |
|
173 | (?:^similarity[ ]index[ ](?P<similarity_index>\d+)%\n | |
174 | ^rename[ ]from[ ](?P<rename_from>.+)\n |
|
174 | ^rename[ ]from[ ](?P<rename_from>.+)\n | |
175 | ^rename[ ]to[ ](?P<rename_to>.+)(?:\n|$))? |
|
175 | ^rename[ ]to[ ](?P<rename_to>.+)(?:\n|$))? | |
176 | (?:^new[ ]file[ ]mode[ ](?P<new_file_mode>.+)(?:\n|$))? |
|
176 | (?:^new[ ]file[ ]mode[ ](?P<new_file_mode>.+)(?:\n|$))? | |
177 | (?:^deleted[ ]file[ ]mode[ ](?P<deleted_file_mode>.+)(?:\n|$))? |
|
177 | (?:^deleted[ ]file[ ]mode[ ](?P<deleted_file_mode>.+)(?:\n|$))? | |
178 | (?:^index[ ](?P<a_blob_id>[0-9A-Fa-f]+) |
|
178 | (?:^index[ ](?P<a_blob_id>[0-9A-Fa-f]+) | |
179 | \.\.(?P<b_blob_id>[0-9A-Fa-f]+)[ ]?(?P<b_mode>.+)?(?:\n|$))? |
|
179 | \.\.(?P<b_blob_id>[0-9A-Fa-f]+)[ ]?(?P<b_mode>.+)?(?:\n|$))? | |
180 | (?:^(?P<bin_patch>GIT[ ]binary[ ]patch)(?:\n|$))? |
|
180 | (?:^(?P<bin_patch>GIT[ ]binary[ ]patch)(?:\n|$))? | |
181 | (?:^---[ ](a/(?P<a_file>.+?)|/dev/null)\t?(?:\n|$))? |
|
181 | (?:^---[ ](a/(?P<a_file>.+?)|/dev/null)\t?(?:\n|$))? | |
182 | (?:^\+\+\+[ ](b/(?P<b_file>.+?)|/dev/null)\t?(?:\n|$))? |
|
182 | (?:^\+\+\+[ ](b/(?P<b_file>.+?)|/dev/null)\t?(?:\n|$))? | |
183 | """, re.VERBOSE | re.MULTILINE) |
|
183 | """, re.VERBOSE | re.MULTILINE) | |
184 | _hg_header_re = re.compile(r""" |
|
184 | _hg_header_re = re.compile(r""" | |
185 | # has already been split on this: |
|
185 | # has already been split on this: | |
186 | # ^diff[ ]--git |
|
186 | # ^diff[ ]--git | |
187 | [ ]a/(?P<a_path>.+?)[ ]b/(?P<b_path>.+?)\n |
|
187 | [ ]a/(?P<a_path>.+?)[ ]b/(?P<b_path>.+?)\n | |
188 | (?:^old[ ]mode[ ](?P<old_mode>\d+)\n |
|
188 | (?:^old[ ]mode[ ](?P<old_mode>\d+)\n | |
189 | ^new[ ]mode[ ](?P<new_mode>\d+)(?:\n|$))? |
|
189 | ^new[ ]mode[ ](?P<new_mode>\d+)(?:\n|$))? | |
190 | (?:^similarity[ ]index[ ](?P<similarity_index>\d+)%(?:\n|$))? |
|
190 | (?:^similarity[ ]index[ ](?P<similarity_index>\d+)%(?:\n|$))? | |
191 | (?:^rename[ ]from[ ](?P<rename_from>.+)\n |
|
191 | (?:^rename[ ]from[ ](?P<rename_from>.+)\n | |
192 | ^rename[ ]to[ ](?P<rename_to>.+)(?:\n|$))? |
|
192 | ^rename[ ]to[ ](?P<rename_to>.+)(?:\n|$))? | |
193 | (?:^copy[ ]from[ ](?P<copy_from>.+)\n |
|
193 | (?:^copy[ ]from[ ](?P<copy_from>.+)\n | |
194 | ^copy[ ]to[ ](?P<copy_to>.+)(?:\n|$))? |
|
194 | ^copy[ ]to[ ](?P<copy_to>.+)(?:\n|$))? | |
195 | (?:^new[ ]file[ ]mode[ ](?P<new_file_mode>.+)(?:\n|$))? |
|
195 | (?:^new[ ]file[ ]mode[ ](?P<new_file_mode>.+)(?:\n|$))? | |
196 | (?:^deleted[ ]file[ ]mode[ ](?P<deleted_file_mode>.+)(?:\n|$))? |
|
196 | (?:^deleted[ ]file[ ]mode[ ](?P<deleted_file_mode>.+)(?:\n|$))? | |
197 | (?:^index[ ](?P<a_blob_id>[0-9A-Fa-f]+) |
|
197 | (?:^index[ ](?P<a_blob_id>[0-9A-Fa-f]+) | |
198 | \.\.(?P<b_blob_id>[0-9A-Fa-f]+)[ ]?(?P<b_mode>.+)?(?:\n|$))? |
|
198 | \.\.(?P<b_blob_id>[0-9A-Fa-f]+)[ ]?(?P<b_mode>.+)?(?:\n|$))? | |
199 | (?:^(?P<bin_patch>GIT[ ]binary[ ]patch)(?:\n|$))? |
|
199 | (?:^(?P<bin_patch>GIT[ ]binary[ ]patch)(?:\n|$))? | |
200 | (?:^---[ ](a/(?P<a_file>.+?)|/dev/null)\t?(?:\n|$))? |
|
200 | (?:^---[ ](a/(?P<a_file>.+?)|/dev/null)\t?(?:\n|$))? | |
201 | (?:^\+\+\+[ ](b/(?P<b_file>.+?)|/dev/null)\t?(?:\n|$))? |
|
201 | (?:^\+\+\+[ ](b/(?P<b_file>.+?)|/dev/null)\t?(?:\n|$))? | |
202 | """, re.VERBOSE | re.MULTILINE) |
|
202 | """, re.VERBOSE | re.MULTILINE) | |
203 |
|
203 | |||
204 | # Used for inline highlighter word split, must match the substitutions in _escaper |
|
204 | # Used for inline highlighter word split, must match the substitutions in _escaper | |
205 | _token_re = re.compile(r'()(&|<|>|<u>\t</u>|<u class="cr"></u>| <i></i>|\W+?)') |
|
205 | _token_re = re.compile(r'()(&|<|>|<u>\t</u>|<u class="cr"></u>| <i></i>|\W+?)') | |
206 |
|
206 | |||
207 | _escape_re = re.compile(r'(&)|(<)|(>)|(\t)|(\r)|(?<=.)( \n| $)') |
|
207 | _escape_re = re.compile(r'(&)|(<)|(>)|(\t)|(\r)|(?<=.)( \n| $)') | |
208 |
|
208 | |||
209 | def __init__(self, diff, vcs='hg', diff_limit=None): |
|
209 | def __init__(self, diff, vcs='hg', diff_limit=None): | |
210 | """ |
|
210 | """ | |
211 | :param diff: a text in diff format |
|
211 | :param diff: a text in diff format | |
212 | :param vcs: type of version control hg or git |
|
212 | :param vcs: type of version control hg or git | |
213 | :param diff_limit: define the size of diff that is considered "big" |
|
213 | :param diff_limit: define the size of diff that is considered "big" | |
214 | based on that parameter cut off will be triggered, set to None |
|
214 | based on that parameter cut off will be triggered, set to None | |
215 | to show full diff |
|
215 | to show full diff | |
216 | """ |
|
216 | """ | |
217 | if not isinstance(diff, basestring): |
|
217 | if not isinstance(diff, basestring): | |
218 | raise Exception('Diff must be a basestring got %s instead' % type(diff)) |
|
218 | raise Exception('Diff must be a basestring got %s instead' % type(diff)) | |
219 |
|
219 | |||
220 | self._diff = diff |
|
220 | self._diff = diff | |
221 | self.adds = 0 |
|
221 | self.adds = 0 | |
222 | self.removes = 0 |
|
222 | self.removes = 0 | |
223 | # calculate diff size |
|
223 | # calculate diff size | |
224 | self.diff_size = len(diff) |
|
224 | self.diff_size = len(diff) | |
225 | self.diff_limit = diff_limit |
|
225 | self.diff_limit = diff_limit | |
226 | self.cur_diff_size = 0 |
|
226 | self.cur_diff_size = 0 | |
227 | self.parsed = False |
|
227 | self.parsed = False | |
228 | self.parsed_diff = [] |
|
228 | self.parsed_diff = [] | |
229 | self.vcs = vcs |
|
229 | self.vcs = vcs | |
230 |
|
230 | |||
231 | def _escaper(self, string): |
|
231 | def _escaper(self, string): | |
232 | """ |
|
232 | """ | |
233 | Do HTML escaping/markup and check the diff limit |
|
233 | Do HTML escaping/markup and check the diff limit | |
234 | """ |
|
234 | """ | |
235 | self.cur_diff_size += len(string) |
|
235 | self.cur_diff_size += len(string) | |
236 |
|
236 | |||
237 | # escaper gets iterated on each .next() call and it checks if each |
|
237 | # escaper gets iterated on each .next() call and it checks if each | |
238 | # parsed line doesn't exceed the diff limit |
|
238 | # parsed line doesn't exceed the diff limit | |
239 | if self.diff_limit is not None and self.cur_diff_size > self.diff_limit: |
|
239 | if self.diff_limit is not None and self.cur_diff_size > self.diff_limit: | |
240 | raise DiffLimitExceeded('Diff Limit Exceeded') |
|
240 | raise DiffLimitExceeded('Diff Limit Exceeded') | |
241 |
|
241 | |||
242 | def substitute(m): |
|
242 | def substitute(m): | |
243 | groups = m.groups() |
|
243 | groups = m.groups() | |
244 | if groups[0]: |
|
244 | if groups[0]: | |
245 | return '&' |
|
245 | return '&' | |
246 | if groups[1]: |
|
246 | if groups[1]: | |
247 | return '<' |
|
247 | return '<' | |
248 | if groups[2]: |
|
248 | if groups[2]: | |
249 | return '>' |
|
249 | return '>' | |
250 | if groups[3]: |
|
250 | if groups[3]: | |
251 | return '<u>\t</u>' |
|
251 | return '<u>\t</u>' | |
252 | if groups[4]: |
|
252 | if groups[4]: | |
253 | return '<u class="cr"></u>' |
|
253 | return '<u class="cr"></u>' | |
254 | if groups[5]: |
|
254 | if groups[5]: | |
255 | return ' <i></i>' |
|
255 | return ' <i></i>' | |
256 | assert False |
|
256 | assert False | |
257 |
|
257 | |||
258 | return self._escape_re.sub(substitute, safe_unicode(string)) |
|
258 | return self._escape_re.sub(substitute, safe_unicode(string)) | |
259 |
|
259 | |||
260 | def _highlight_inline_diff(self, old, new): |
|
260 | def _highlight_inline_diff(self, old, new): | |
261 | """ |
|
261 | """ | |
262 | Highlight simple add/remove in two lines given as info dicts. They are |
|
262 | Highlight simple add/remove in two lines given as info dicts. They are | |
263 | modified in place and given markup with <del>/<ins>. |
|
263 | modified in place and given markup with <del>/<ins>. | |
264 | """ |
|
264 | """ | |
265 | assert old['action'] == 'del' |
|
265 | assert old['action'] == 'del' | |
266 | assert new['action'] == 'add' |
|
266 | assert new['action'] == 'add' | |
267 |
|
267 | |||
268 | oldwords = self._token_re.split(old['line']) |
|
268 | oldwords = self._token_re.split(old['line']) | |
269 | newwords = self._token_re.split(new['line']) |
|
269 | newwords = self._token_re.split(new['line']) | |
270 | sequence = difflib.SequenceMatcher(None, oldwords, newwords) |
|
270 | sequence = difflib.SequenceMatcher(None, oldwords, newwords) | |
271 |
|
271 | |||
272 | oldfragments, newfragments = [], [] |
|
272 | oldfragments, newfragments = [], [] | |
273 | for tag, i1, i2, j1, j2 in sequence.get_opcodes(): |
|
273 | for tag, i1, i2, j1, j2 in sequence.get_opcodes(): | |
274 | oldfrag = ''.join(oldwords[i1:i2]) |
|
274 | oldfrag = ''.join(oldwords[i1:i2]) | |
275 | newfrag = ''.join(newwords[j1:j2]) |
|
275 | newfrag = ''.join(newwords[j1:j2]) | |
276 | if tag != 'equal': |
|
276 | if tag != 'equal': | |
277 | if oldfrag: |
|
277 | if oldfrag: | |
278 | oldfrag = '<del>%s</del>' % oldfrag |
|
278 | oldfrag = '<del>%s</del>' % oldfrag | |
279 | if newfrag: |
|
279 | if newfrag: | |
280 | newfrag = '<ins>%s</ins>' % newfrag |
|
280 | newfrag = '<ins>%s</ins>' % newfrag | |
281 | oldfragments.append(oldfrag) |
|
281 | oldfragments.append(oldfrag) | |
282 | newfragments.append(newfrag) |
|
282 | newfragments.append(newfrag) | |
283 |
|
283 | |||
284 | old['line'] = "".join(oldfragments) |
|
284 | old['line'] = "".join(oldfragments) | |
285 | new['line'] = "".join(newfragments) |
|
285 | new['line'] = "".join(newfragments) | |
286 |
|
286 | |||
287 | def _get_header(self, diff_chunk): |
|
287 | def _get_header(self, diff_chunk): | |
288 | """ |
|
288 | """ | |
289 | Parses a Git diff for a single file (header and chunks) and returns a tuple with: |
|
289 | Parses a Git diff for a single file (header and chunks) and returns a tuple with: | |
290 |
|
290 | |||
291 | 1. A dict with meta info: |
|
291 | 1. A dict with meta info: | |
292 |
|
292 | |||
293 | a_path, b_path, similarity_index, rename_from, rename_to, |
|
293 | a_path, b_path, similarity_index, rename_from, rename_to, | |
294 | old_mode, new_mode, new_file_mode, deleted_file_mode, |
|
294 | old_mode, new_mode, new_file_mode, deleted_file_mode, | |
295 | a_blob_id, b_blob_id, b_mode, a_file, b_file |
|
295 | a_blob_id, b_blob_id, b_mode, a_file, b_file | |
296 |
|
296 | |||
297 | 2. An iterator yielding lines with simple HTML markup. |
|
297 | 2. An iterator yielding lines with simple HTML markup. | |
298 | """ |
|
298 | """ | |
299 | match = None |
|
299 | match = None | |
300 | if self.vcs == 'git': |
|
300 | if self.vcs == 'git': | |
301 | match = self._git_header_re.match(diff_chunk) |
|
301 | match = self._git_header_re.match(diff_chunk) | |
302 | elif self.vcs == 'hg': |
|
302 | elif self.vcs == 'hg': | |
303 | match = self._hg_header_re.match(diff_chunk) |
|
303 | match = self._hg_header_re.match(diff_chunk) | |
304 | if match is None: |
|
304 | if match is None: | |
305 | raise Exception('diff not recognized as valid %s diff' % self.vcs) |
|
305 | raise Exception('diff not recognized as valid %s diff' % self.vcs) | |
306 | meta_info = match.groupdict() |
|
306 | meta_info = match.groupdict() | |
307 | rest = diff_chunk[match.end():] |
|
307 | rest = diff_chunk[match.end():] | |
308 | if rest and not rest.startswith('@') and not rest.startswith('literal ') and not rest.startswith('delta '): |
|
308 | if rest and not rest.startswith('@') and not rest.startswith('literal ') and not rest.startswith('delta '): | |
309 | raise Exception('cannot parse %s diff header: %r followed by %r' % (self.vcs, diff_chunk[:match.end()], rest[:1000])) |
|
309 | raise Exception('cannot parse %s diff header: %r followed by %r' % (self.vcs, diff_chunk[:match.end()], rest[:1000])) | |
310 | difflines = imap(self._escaper, re.findall(r'.*\n|.+$', rest)) # don't split on \r as str.splitlines do |
|
310 | difflines = imap(self._escaper, re.findall(r'.*\n|.+$', rest)) # don't split on \r as str.splitlines do | |
311 | return meta_info, difflines |
|
311 | return meta_info, difflines | |
312 |
|
312 | |||
313 | def _clean_line(self, line, command): |
|
313 | def _clean_line(self, line, command): | |
314 | """Given a diff line, strip the leading character if it is a plus/minus/context line.""" |
|
314 | """Given a diff line, strip the leading character if it is a plus/minus/context line.""" | |
315 | if command in ['+', '-', ' ']: |
|
315 | if command in ['+', '-', ' ']: | |
316 | line = line[1:] |
|
316 | line = line[1:] | |
317 | return line |
|
317 | return line | |
318 |
|
318 | |||
319 | def _parse_gitdiff(self, inline_diff=True): |
|
319 | def _parse_gitdiff(self, inline_diff=True): | |
320 | """Parse self._diff and return a list of dicts with meta info and chunks for each file. |
|
320 | """Parse self._diff and return a list of dicts with meta info and chunks for each file. | |
321 | If diff is truncated, wrap it in LimitedDiffContainer. |
|
321 | If diff is truncated, wrap it in LimitedDiffContainer. | |
322 | Optionally, do an extra pass and to extra markup of one-liner changes. |
|
322 | Optionally, do an extra pass and to extra markup of one-liner changes. | |
323 | """ |
|
323 | """ | |
324 | _files = [] # list of dicts with meta info and chunks |
|
324 | _files = [] # list of dicts with meta info and chunks | |
325 | diff_container = lambda arg: arg |
|
325 | diff_container = lambda arg: arg | |
326 |
|
326 | |||
327 | # split the diff in chunks of separate --git a/file b/file chunks |
|
327 | # split the diff in chunks of separate --git a/file b/file chunks | |
328 | for raw_diff in ('\n' + self._diff).split('\ndiff --git')[1:]: |
|
328 | for raw_diff in ('\n' + self._diff).split('\ndiff --git')[1:]: | |
329 | head, diff = self._get_header(raw_diff) |
|
329 | head, diff = self._get_header(raw_diff) | |
330 |
|
330 | |||
331 | op = None |
|
331 | op = None | |
332 | stats = { |
|
332 | stats = { | |
333 | 'added': 0, |
|
333 | 'added': 0, | |
334 | 'deleted': 0, |
|
334 | 'deleted': 0, | |
335 | 'binary': False, |
|
335 | 'binary': False, | |
336 | 'ops': {}, |
|
336 | 'ops': {}, | |
337 | } |
|
337 | } | |
338 |
|
338 | |||
339 | if head['deleted_file_mode']: |
|
339 | if head['deleted_file_mode']: | |
340 | op = 'D' |
|
340 | op = 'D' | |
341 | stats['binary'] = True |
|
341 | stats['binary'] = True | |
342 | stats['ops'][DEL_FILENODE] = 'deleted file' |
|
342 | stats['ops'][DEL_FILENODE] = 'deleted file' | |
343 |
|
343 | |||
344 | elif head['new_file_mode']: |
|
344 | elif head['new_file_mode']: | |
345 | op = 'A' |
|
345 | op = 'A' | |
346 | stats['binary'] = True |
|
346 | stats['binary'] = True | |
347 | stats['ops'][NEW_FILENODE] = 'new file %s' % head['new_file_mode'] |
|
347 | stats['ops'][NEW_FILENODE] = 'new file %s' % head['new_file_mode'] | |
348 | else: # modify operation, can be cp, rename, chmod |
|
348 | else: # modify operation, can be cp, rename, chmod | |
349 | # CHMOD |
|
349 | # CHMOD | |
350 | if head['new_mode'] and head['old_mode']: |
|
350 | if head['new_mode'] and head['old_mode']: | |
351 | op = 'M' |
|
351 | op = 'M' | |
352 | stats['binary'] = True |
|
352 | stats['binary'] = True | |
353 | stats['ops'][CHMOD_FILENODE] = ('modified file chmod %s => %s' |
|
353 | stats['ops'][CHMOD_FILENODE] = ('modified file chmod %s => %s' | |
354 | % (head['old_mode'], head['new_mode'])) |
|
354 | % (head['old_mode'], head['new_mode'])) | |
355 | # RENAME |
|
355 | # RENAME | |
356 | if (head['rename_from'] and head['rename_to'] |
|
356 | if (head['rename_from'] and head['rename_to'] | |
357 | and head['rename_from'] != head['rename_to']): |
|
357 | and head['rename_from'] != head['rename_to']): | |
358 | op = 'R' |
|
358 | op = 'R' | |
359 | stats['binary'] = True |
|
359 | stats['binary'] = True | |
360 | stats['ops'][RENAMED_FILENODE] = ('file renamed from %s to %s' |
|
360 | stats['ops'][RENAMED_FILENODE] = ('file renamed from %s to %s' | |
361 | % (head['rename_from'], head['rename_to'])) |
|
361 | % (head['rename_from'], head['rename_to'])) | |
362 | # COPY |
|
362 | # COPY | |
363 | if head.get('copy_from') and head.get('copy_to'): |
|
363 | if head.get('copy_from') and head.get('copy_to'): | |
364 | op = 'M' |
|
364 | op = 'M' | |
365 | stats['binary'] = True |
|
365 | stats['binary'] = True | |
366 | stats['ops'][COPIED_FILENODE] = ('file copied from %s to %s' |
|
366 | stats['ops'][COPIED_FILENODE] = ('file copied from %s to %s' | |
367 | % (head['copy_from'], head['copy_to'])) |
|
367 | % (head['copy_from'], head['copy_to'])) | |
368 | # FALL BACK: detect missed old style add or remove |
|
368 | # FALL BACK: detect missed old style add or remove | |
369 | if op is None: |
|
369 | if op is None: | |
370 | if not head['a_file'] and head['b_file']: |
|
370 | if not head['a_file'] and head['b_file']: | |
371 | op = 'A' |
|
371 | op = 'A' | |
372 | stats['binary'] = True |
|
372 | stats['binary'] = True | |
373 | stats['ops'][NEW_FILENODE] = 'new file' |
|
373 | stats['ops'][NEW_FILENODE] = 'new file' | |
374 |
|
374 | |||
375 | elif head['a_file'] and not head['b_file']: |
|
375 | elif head['a_file'] and not head['b_file']: | |
376 | op = 'D' |
|
376 | op = 'D' | |
377 | stats['binary'] = True |
|
377 | stats['binary'] = True | |
378 | stats['ops'][DEL_FILENODE] = 'deleted file' |
|
378 | stats['ops'][DEL_FILENODE] = 'deleted file' | |
379 |
|
379 | |||
380 | # it's not ADD not DELETE |
|
380 | # it's not ADD not DELETE | |
381 | if op is None: |
|
381 | if op is None: | |
382 | op = 'M' |
|
382 | op = 'M' | |
383 | stats['binary'] = True |
|
383 | stats['binary'] = True | |
384 | stats['ops'][MOD_FILENODE] = 'modified file' |
|
384 | stats['ops'][MOD_FILENODE] = 'modified file' | |
385 |
|
385 | |||
386 | # a real non-binary diff |
|
386 | # a real non-binary diff | |
387 | if head['a_file'] or head['b_file']: |
|
387 | if head['a_file'] or head['b_file']: | |
388 | try: |
|
388 | try: | |
389 | chunks, added, deleted = self._parse_lines(diff) |
|
389 | chunks, added, deleted = self._parse_lines(diff) | |
390 | stats['binary'] = False |
|
390 | stats['binary'] = False | |
391 | stats['added'] = added |
|
391 | stats['added'] = added | |
392 | stats['deleted'] = deleted |
|
392 | stats['deleted'] = deleted | |
393 | # explicit mark that it's a modified file |
|
393 | # explicit mark that it's a modified file | |
394 | if op == 'M': |
|
394 | if op == 'M': | |
395 | stats['ops'][MOD_FILENODE] = 'modified file' |
|
395 | stats['ops'][MOD_FILENODE] = 'modified file' | |
396 |
|
396 | |||
397 | except DiffLimitExceeded: |
|
397 | except DiffLimitExceeded: | |
398 | diff_container = lambda _diff: \ |
|
398 | diff_container = lambda _diff: \ | |
399 | LimitedDiffContainer(self.diff_limit, |
|
399 | LimitedDiffContainer(self.diff_limit, | |
400 | self.cur_diff_size, _diff) |
|
400 | self.cur_diff_size, _diff) | |
401 | break |
|
401 | break | |
402 | else: # Git binary patch (or empty diff) |
|
402 | else: # Git binary patch (or empty diff) | |
403 | # Git binary patch |
|
403 | # Git binary patch | |
404 | if head['bin_patch']: |
|
404 | if head['bin_patch']: | |
405 | stats['ops'][BIN_FILENODE] = 'binary diff not shown' |
|
405 | stats['ops'][BIN_FILENODE] = 'binary diff not shown' | |
406 | chunks = [] |
|
406 | chunks = [] | |
407 |
|
407 | |||
408 | if op == 'D' and chunks: |
|
408 | if op == 'D' and chunks: | |
409 | # a way of seeing deleted content could perhaps be nice - but |
|
409 | # a way of seeing deleted content could perhaps be nice - but | |
410 | # not with the current UI |
|
410 | # not with the current UI | |
411 | chunks = [] |
|
411 | chunks = [] | |
412 |
|
412 | |||
413 | chunks.insert(0, [{ |
|
413 | chunks.insert(0, [{ | |
414 | 'old_lineno': '', |
|
414 | 'old_lineno': '', | |
415 | 'new_lineno': '', |
|
415 | 'new_lineno': '', | |
416 | 'action': 'context', |
|
416 | 'action': 'context', | |
417 | 'line': msg, |
|
417 | 'line': msg, | |
418 | } for _op, msg in stats['ops'].iteritems() |
|
418 | } for _op, msg in stats['ops'].iteritems() | |
419 | if _op not in [MOD_FILENODE]]) |
|
419 | if _op not in [MOD_FILENODE]]) | |
420 |
|
420 | |||
421 | _files.append({ |
|
421 | _files.append({ | |
422 | 'old_filename': head['a_path'], |
|
422 | 'old_filename': head['a_path'], | |
423 | 'filename': head['b_path'], |
|
423 | 'filename': head['b_path'], | |
424 | 'old_revision': head['a_blob_id'], |
|
424 | 'old_revision': head['a_blob_id'], | |
425 | 'new_revision': head['b_blob_id'], |
|
425 | 'new_revision': head['b_blob_id'], | |
426 | 'chunks': chunks, |
|
426 | 'chunks': chunks, | |
427 | 'operation': op, |
|
427 | 'operation': op, | |
428 | 'stats': stats, |
|
428 | 'stats': stats, | |
429 | }) |
|
429 | }) | |
430 |
|
430 | |||
431 | if not inline_diff: |
|
431 | if not inline_diff: | |
432 | return diff_container(_files) |
|
432 | return diff_container(_files) | |
433 |
|
433 | |||
434 | # highlight inline changes when one del is followed by one add |
|
434 | # highlight inline changes when one del is followed by one add | |
435 | for diff_data in _files: |
|
435 | for diff_data in _files: | |
436 | for chunk in diff_data['chunks']: |
|
436 | for chunk in diff_data['chunks']: | |
437 | lineiter = iter(chunk) |
|
437 | lineiter = iter(chunk) | |
438 | try: |
|
438 | try: | |
439 | peekline = lineiter.next() |
|
439 | peekline = lineiter.next() | |
440 | while True: |
|
440 | while True: | |
441 | # find a first del line |
|
441 | # find a first del line | |
442 | while peekline['action'] != 'del': |
|
442 | while peekline['action'] != 'del': | |
443 | peekline = lineiter.next() |
|
443 | peekline = lineiter.next() | |
444 | delline = peekline |
|
444 | delline = peekline | |
445 | peekline = lineiter.next() |
|
445 | peekline = lineiter.next() | |
446 | # if not followed by add, eat all following del lines |
|
446 | # if not followed by add, eat all following del lines | |
447 | if peekline['action'] != 'add': |
|
447 | if peekline['action'] != 'add': | |
448 | while peekline['action'] == 'del': |
|
448 | while peekline['action'] == 'del': | |
449 | peekline = lineiter.next() |
|
449 | peekline = lineiter.next() | |
450 | continue |
|
450 | continue | |
451 | # found an add - make sure it is the only one |
|
451 | # found an add - make sure it is the only one | |
452 | addline = peekline |
|
452 | addline = peekline | |
453 | try: |
|
453 | try: | |
454 | peekline = lineiter.next() |
|
454 | peekline = lineiter.next() | |
455 | except StopIteration: |
|
455 | except StopIteration: | |
456 | # add was last line - ok |
|
456 | # add was last line - ok | |
457 | self._highlight_inline_diff(delline, addline) |
|
457 | self._highlight_inline_diff(delline, addline) | |
458 | raise |
|
458 | raise | |
459 | if peekline['action'] != 'add': |
|
459 | if peekline['action'] != 'add': | |
460 | # there was only one add line - ok |
|
460 | # there was only one add line - ok | |
461 | self._highlight_inline_diff(delline, addline) |
|
461 | self._highlight_inline_diff(delline, addline) | |
462 | except StopIteration: |
|
462 | except StopIteration: | |
463 | pass |
|
463 | pass | |
464 |
|
464 | |||
465 | return diff_container(_files) |
|
465 | return diff_container(_files) | |
466 |
|
466 | |||
467 | def _parse_lines(self, diff): |
|
467 | def _parse_lines(self, diff): | |
468 | """ |
|
468 | """ | |
469 | Given an iterator of diff body lines, parse them and return a dict per |
|
469 | Given an iterator of diff body lines, parse them and return a dict per | |
470 | line and added/removed totals. |
|
470 | line and added/removed totals. | |
471 | """ |
|
471 | """ | |
472 | added = deleted = 0 |
|
472 | added = deleted = 0 | |
473 | old_line = old_end = new_line = new_end = None |
|
473 | old_line = old_end = new_line = new_end = None | |
474 |
|
474 | |||
475 | try: |
|
475 | try: | |
476 | chunks = [] |
|
476 | chunks = [] | |
477 | line = diff.next() |
|
477 | line = diff.next() | |
478 |
|
478 | |||
479 | while True: |
|
479 | while True: | |
480 | lines = [] |
|
480 | lines = [] | |
481 | chunks.append(lines) |
|
481 | chunks.append(lines) | |
482 |
|
482 | |||
483 | match = self._chunk_re.match(line) |
|
483 | match = self._chunk_re.match(line) | |
484 |
|
484 | |||
485 | if not match: |
|
485 | if not match: | |
486 | raise Exception('error parsing diff @@ line %r' % line) |
|
486 | raise Exception('error parsing diff @@ line %r' % line) | |
487 |
|
487 | |||
488 | gr = match.groups() |
|
488 | gr = match.groups() | |
489 | (old_line, old_end, |
|
489 | (old_line, old_end, | |
490 | new_line, new_end) = [int(x or 1) for x in gr[:-1]] |
|
490 | new_line, new_end) = [int(x or 1) for x in gr[:-1]] | |
491 | old_line -= 1 |
|
491 | old_line -= 1 | |
492 | new_line -= 1 |
|
492 | new_line -= 1 | |
493 |
|
493 | |||
494 | context = len(gr) == 5 |
|
494 | context = len(gr) == 5 | |
495 | old_end += old_line |
|
495 | old_end += old_line | |
496 | new_end += new_line |
|
496 | new_end += new_line | |
497 |
|
497 | |||
498 | if context: |
|
498 | if context: | |
499 | # skip context only if it's first line |
|
499 | # skip context only if it's first line | |
500 | if int(gr[0]) > 1: |
|
500 | if int(gr[0]) > 1: | |
501 | lines.append({ |
|
501 | lines.append({ | |
502 | 'old_lineno': '...', |
|
502 | 'old_lineno': '...', | |
503 | 'new_lineno': '...', |
|
503 | 'new_lineno': '...', | |
504 | 'action': 'context', |
|
504 | 'action': 'context', | |
505 | 'line': line, |
|
505 | 'line': line, | |
506 | }) |
|
506 | }) | |
507 |
|
507 | |||
508 | line = diff.next() |
|
508 | line = diff.next() | |
509 |
|
509 | |||
510 | while old_line < old_end or new_line < new_end: |
|
510 | while old_line < old_end or new_line < new_end: | |
511 | if not line: |
|
511 | if not line: | |
512 | raise Exception('error parsing diff - empty line at -%s+%s' % (old_line, new_line)) |
|
512 | raise Exception('error parsing diff - empty line at -%s+%s' % (old_line, new_line)) | |
513 |
|
513 | |||
514 | affects_old = affects_new = False |
|
514 | affects_old = affects_new = False | |
515 |
|
515 | |||
516 | command = line[0] |
|
516 | command = line[0] | |
517 | if command == '+': |
|
517 | if command == '+': | |
518 | affects_new = True |
|
518 | affects_new = True | |
519 | action = 'add' |
|
519 | action = 'add' | |
520 | added += 1 |
|
520 | added += 1 | |
521 | elif command == '-': |
|
521 | elif command == '-': | |
522 | affects_old = True |
|
522 | affects_old = True | |
523 | action = 'del' |
|
523 | action = 'del' | |
524 | deleted += 1 |
|
524 | deleted += 1 | |
525 | elif command == ' ': |
|
525 | elif command == ' ': | |
526 | affects_old = affects_new = True |
|
526 | affects_old = affects_new = True | |
527 | action = 'unmod' |
|
527 | action = 'unmod' | |
528 | else: |
|
528 | else: | |
529 | raise Exception('error parsing diff - unknown command in line %r at -%s+%s' % (line, old_line, new_line)) |
|
529 | raise Exception('error parsing diff - unknown command in line %r at -%s+%s' % (line, old_line, new_line)) | |
530 |
|
530 | |||
531 | if not self._newline_marker.match(line): |
|
531 | if not self._newline_marker.match(line): | |
532 | old_line += affects_old |
|
532 | old_line += affects_old | |
533 | new_line += affects_new |
|
533 | new_line += affects_new | |
534 | lines.append({ |
|
534 | lines.append({ | |
535 | 'old_lineno': affects_old and old_line or '', |
|
535 | 'old_lineno': affects_old and old_line or '', | |
536 | 'new_lineno': affects_new and new_line or '', |
|
536 | 'new_lineno': affects_new and new_line or '', | |
537 | 'action': action, |
|
537 | 'action': action, | |
538 | 'line': self._clean_line(line, command) |
|
538 | 'line': self._clean_line(line, command) | |
539 | }) |
|
539 | }) | |
540 |
|
540 | |||
541 | line = diff.next() |
|
541 | line = diff.next() | |
542 |
|
542 | |||
543 | if self._newline_marker.match(line): |
|
543 | if self._newline_marker.match(line): | |
544 | # we need to append to lines, since this is not |
|
544 | # we need to append to lines, since this is not | |
545 | # counted in the line specs of diff |
|
545 | # counted in the line specs of diff | |
546 | lines.append({ |
|
546 | lines.append({ | |
547 | 'old_lineno': '...', |
|
547 | 'old_lineno': '...', | |
548 | 'new_lineno': '...', |
|
548 | 'new_lineno': '...', | |
549 | 'action': 'context', |
|
549 | 'action': 'context', | |
550 | 'line': self._clean_line(line, command) |
|
550 | 'line': self._clean_line(line, command) | |
551 | }) |
|
551 | }) | |
552 | line = diff.next() |
|
552 | line = diff.next() | |
553 | if old_line > old_end: |
|
553 | if old_line > old_end: | |
554 | raise Exception('error parsing diff - more than %s "-" lines at -%s+%s' % (old_end, old_line, new_line)) |
|
554 | raise Exception('error parsing diff - more than %s "-" lines at -%s+%s' % (old_end, old_line, new_line)) | |
555 | if new_line > new_end: |
|
555 | if new_line > new_end: | |
556 | raise Exception('error parsing diff - more than %s "+" lines at -%s+%s' % (new_end, old_line, new_line)) |
|
556 | raise Exception('error parsing diff - more than %s "+" lines at -%s+%s' % (new_end, old_line, new_line)) | |
557 | except StopIteration: |
|
557 | except StopIteration: | |
558 | pass |
|
558 | pass | |
559 | if old_line != old_end or new_line != new_end: |
|
559 | if old_line != old_end or new_line != new_end: | |
560 | raise Exception('diff processing broken when old %s<>%s or new %s<>%s line %r' % (old_line, old_end, new_line, new_end, line)) |
|
560 | raise Exception('diff processing broken when old %s<>%s or new %s<>%s line %r' % (old_line, old_end, new_line, new_end, line)) | |
561 |
|
561 | |||
562 | return chunks, added, deleted |
|
562 | return chunks, added, deleted | |
563 |
|
563 | |||
564 | def _safe_id(self, idstring): |
|
564 | def _safe_id(self, idstring): | |
565 | """Make a string safe for including in an id attribute. |
|
565 | """Make a string safe for including in an id attribute. | |
566 |
|
566 | |||
567 | The HTML spec says that id attributes 'must begin with |
|
567 | The HTML spec says that id attributes 'must begin with | |
568 | a letter ([A-Za-z]) and may be followed by any number |
|
568 | a letter ([A-Za-z]) and may be followed by any number | |
569 | of letters, digits ([0-9]), hyphens ("-"), underscores |
|
569 | of letters, digits ([0-9]), hyphens ("-"), underscores | |
570 | ("_"), colons (":"), and periods (".")'. These regexps |
|
570 | ("_"), colons (":"), and periods (".")'. These regexps | |
571 | are slightly over-zealous, in that they remove colons |
|
571 | are slightly over-zealous, in that they remove colons | |
572 | and periods unnecessarily. |
|
572 | and periods unnecessarily. | |
573 |
|
573 | |||
574 | Whitespace is transformed into underscores, and then |
|
574 | Whitespace is transformed into underscores, and then | |
575 | anything which is not a hyphen or a character that |
|
575 | anything which is not a hyphen or a character that | |
576 | matches \w (alphanumerics and underscore) is removed. |
|
576 | matches \w (alphanumerics and underscore) is removed. | |
577 |
|
577 | |||
578 | """ |
|
578 | """ | |
579 | # Transform all whitespace to underscore |
|
579 | # Transform all whitespace to underscore | |
580 | idstring = re.sub(r'\s', "_", idstring) |
|
580 | idstring = re.sub(r'\s', "_", idstring) | |
581 | # Remove everything that is not a hyphen or a member of \w |
|
581 | # Remove everything that is not a hyphen or a member of \w | |
582 | idstring = re.sub(r'(?!-)\W', "", idstring).lower() |
|
582 | idstring = re.sub(r'(?!-)\W', "", idstring).lower() | |
583 | return idstring |
|
583 | return idstring | |
584 |
|
584 | |||
585 | def prepare(self, inline_diff=True): |
|
585 | def prepare(self, inline_diff=True): | |
586 | """ |
|
586 | """ | |
587 | Prepare the passed udiff for HTML rendering. It'll return a list |
|
587 | Prepare the passed udiff for HTML rendering. It'll return a list | |
588 | of dicts with diff information |
|
588 | of dicts with diff information | |
589 | """ |
|
589 | """ | |
590 | parsed = self._parse_gitdiff(inline_diff=inline_diff) |
|
590 | parsed = self._parse_gitdiff(inline_diff=inline_diff) | |
591 | self.parsed = True |
|
591 | self.parsed = True | |
592 | self.parsed_diff = parsed |
|
592 | self.parsed_diff = parsed | |
593 | return parsed |
|
593 | return parsed | |
594 |
|
594 | |||
595 | def as_raw(self): |
|
|||
596 | """ |
|
|||
597 | Returns raw string diff, exactly as it was passed in the first place. |
|
|||
598 | """ |
|
|||
599 | return self._diff |
|
|||
600 |
|
||||
601 | def as_html(self, table_class='code-difftable', line_class='line', |
|
595 | def as_html(self, table_class='code-difftable', line_class='line', | |
602 | old_lineno_class='lineno old', new_lineno_class='lineno new', |
|
596 | old_lineno_class='lineno old', new_lineno_class='lineno new', | |
603 | no_lineno_class='lineno', |
|
597 | no_lineno_class='lineno', | |
604 | code_class='code', enable_comments=False, parsed_lines=None): |
|
598 | code_class='code', enable_comments=False, parsed_lines=None): | |
605 | """ |
|
599 | """ | |
606 | Return given diff as html table with customized css classes |
|
600 | Return given diff as html table with customized css classes | |
607 | """ |
|
601 | """ | |
608 | def _link_to_if(condition, label, url): |
|
602 | def _link_to_if(condition, label, url): | |
609 | """ |
|
603 | """ | |
610 | Generates a link if condition is meet or just the label if not. |
|
604 | Generates a link if condition is meet or just the label if not. | |
611 | """ |
|
605 | """ | |
612 |
|
606 | |||
613 | if condition: |
|
607 | if condition: | |
614 | return '''<a href="%(url)s">%(label)s</a>''' % { |
|
608 | return '''<a href="%(url)s">%(label)s</a>''' % { | |
615 | 'url': url, |
|
609 | 'url': url, | |
616 | 'label': label |
|
610 | 'label': label | |
617 | } |
|
611 | } | |
618 | else: |
|
612 | else: | |
619 | return label |
|
613 | return label | |
620 | if not self.parsed: |
|
614 | if not self.parsed: | |
621 | self.prepare() |
|
615 | self.prepare() | |
622 |
|
616 | |||
623 | diff_lines = self.parsed_diff |
|
617 | diff_lines = self.parsed_diff | |
624 | if parsed_lines: |
|
618 | if parsed_lines: | |
625 | diff_lines = parsed_lines |
|
619 | diff_lines = parsed_lines | |
626 |
|
620 | |||
627 | _html_empty = True |
|
621 | _html_empty = True | |
628 | _html = [] |
|
622 | _html = [] | |
629 | _html.append('''<table class="%(table_class)s">\n''' % { |
|
623 | _html.append('''<table class="%(table_class)s">\n''' % { | |
630 | 'table_class': table_class |
|
624 | 'table_class': table_class | |
631 | }) |
|
625 | }) | |
632 |
|
626 | |||
633 | for diff in diff_lines: |
|
627 | for diff in diff_lines: | |
634 | for line in diff['chunks']: |
|
628 | for line in diff['chunks']: | |
635 | _html_empty = False |
|
629 | _html_empty = False | |
636 | for change in line: |
|
630 | for change in line: | |
637 | _html.append('''<tr class="%(lc)s %(action)s">\n''' % { |
|
631 | _html.append('''<tr class="%(lc)s %(action)s">\n''' % { | |
638 | 'lc': line_class, |
|
632 | 'lc': line_class, | |
639 | 'action': change['action'] |
|
633 | 'action': change['action'] | |
640 | }) |
|
634 | }) | |
641 | anchor_old_id = '' |
|
635 | anchor_old_id = '' | |
642 | anchor_new_id = '' |
|
636 | anchor_new_id = '' | |
643 | anchor_old = "%(filename)s_o%(oldline_no)s" % { |
|
637 | anchor_old = "%(filename)s_o%(oldline_no)s" % { | |
644 | 'filename': self._safe_id(diff['filename']), |
|
638 | 'filename': self._safe_id(diff['filename']), | |
645 | 'oldline_no': change['old_lineno'] |
|
639 | 'oldline_no': change['old_lineno'] | |
646 | } |
|
640 | } | |
647 | anchor_new = "%(filename)s_n%(oldline_no)s" % { |
|
641 | anchor_new = "%(filename)s_n%(oldline_no)s" % { | |
648 | 'filename': self._safe_id(diff['filename']), |
|
642 | 'filename': self._safe_id(diff['filename']), | |
649 | 'oldline_no': change['new_lineno'] |
|
643 | 'oldline_no': change['new_lineno'] | |
650 | } |
|
644 | } | |
651 | cond_old = (change['old_lineno'] != '...' and |
|
645 | cond_old = (change['old_lineno'] != '...' and | |
652 | change['old_lineno']) |
|
646 | change['old_lineno']) | |
653 | cond_new = (change['new_lineno'] != '...' and |
|
647 | cond_new = (change['new_lineno'] != '...' and | |
654 | change['new_lineno']) |
|
648 | change['new_lineno']) | |
655 | no_lineno = (change['old_lineno'] == '...' and |
|
649 | no_lineno = (change['old_lineno'] == '...' and | |
656 | change['new_lineno'] == '...') |
|
650 | change['new_lineno'] == '...') | |
657 | if cond_old: |
|
651 | if cond_old: | |
658 | anchor_old_id = 'id="%s"' % anchor_old |
|
652 | anchor_old_id = 'id="%s"' % anchor_old | |
659 | if cond_new: |
|
653 | if cond_new: | |
660 | anchor_new_id = 'id="%s"' % anchor_new |
|
654 | anchor_new_id = 'id="%s"' % anchor_new | |
661 | ########################################################### |
|
655 | ########################################################### | |
662 | # OLD LINE NUMBER |
|
656 | # OLD LINE NUMBER | |
663 | ########################################################### |
|
657 | ########################################################### | |
664 | _html.append('''\t<td %(a_id)s class="%(olc)s" %(colspan)s>''' % { |
|
658 | _html.append('''\t<td %(a_id)s class="%(olc)s" %(colspan)s>''' % { | |
665 | 'a_id': anchor_old_id, |
|
659 | 'a_id': anchor_old_id, | |
666 | 'olc': no_lineno_class if no_lineno else old_lineno_class, |
|
660 | 'olc': no_lineno_class if no_lineno else old_lineno_class, | |
667 | 'colspan': 'colspan="2"' if no_lineno else '' |
|
661 | 'colspan': 'colspan="2"' if no_lineno else '' | |
668 | }) |
|
662 | }) | |
669 |
|
663 | |||
670 | _html.append('''%(link)s''' % { |
|
664 | _html.append('''%(link)s''' % { | |
671 | 'link': _link_to_if(not no_lineno, change['old_lineno'], |
|
665 | 'link': _link_to_if(not no_lineno, change['old_lineno'], | |
672 | '#%s' % anchor_old) |
|
666 | '#%s' % anchor_old) | |
673 | }) |
|
667 | }) | |
674 | _html.append('''</td>\n''') |
|
668 | _html.append('''</td>\n''') | |
675 | ########################################################### |
|
669 | ########################################################### | |
676 | # NEW LINE NUMBER |
|
670 | # NEW LINE NUMBER | |
677 | ########################################################### |
|
671 | ########################################################### | |
678 |
|
672 | |||
679 | if not no_lineno: |
|
673 | if not no_lineno: | |
680 | _html.append('''\t<td %(a_id)s class="%(nlc)s">''' % { |
|
674 | _html.append('''\t<td %(a_id)s class="%(nlc)s">''' % { | |
681 | 'a_id': anchor_new_id, |
|
675 | 'a_id': anchor_new_id, | |
682 | 'nlc': new_lineno_class |
|
676 | 'nlc': new_lineno_class | |
683 | }) |
|
677 | }) | |
684 |
|
678 | |||
685 | _html.append('''%(link)s''' % { |
|
679 | _html.append('''%(link)s''' % { | |
686 | 'link': _link_to_if(True, change['new_lineno'], |
|
680 | 'link': _link_to_if(True, change['new_lineno'], | |
687 | '#%s' % anchor_new) |
|
681 | '#%s' % anchor_new) | |
688 | }) |
|
682 | }) | |
689 | _html.append('''</td>\n''') |
|
683 | _html.append('''</td>\n''') | |
690 | ########################################################### |
|
684 | ########################################################### | |
691 | # CODE |
|
685 | # CODE | |
692 | ########################################################### |
|
686 | ########################################################### | |
693 | comments = '' if enable_comments else 'no-comment' |
|
687 | comments = '' if enable_comments else 'no-comment' | |
694 | _html.append('''\t<td class="%(cc)s %(inc)s">''' % { |
|
688 | _html.append('''\t<td class="%(cc)s %(inc)s">''' % { | |
695 | 'cc': code_class, |
|
689 | 'cc': code_class, | |
696 | 'inc': comments |
|
690 | 'inc': comments | |
697 | }) |
|
691 | }) | |
698 | _html.append('''\n\t\t<div class="add-bubble"><div> </div></div><pre>%(code)s</pre>\n''' % { |
|
692 | _html.append('''\n\t\t<div class="add-bubble"><div> </div></div><pre>%(code)s</pre>\n''' % { | |
699 | 'code': change['line'] |
|
693 | 'code': change['line'] | |
700 | }) |
|
694 | }) | |
701 |
|
695 | |||
702 | _html.append('''\t</td>''') |
|
696 | _html.append('''\t</td>''') | |
703 | _html.append('''\n</tr>\n''') |
|
697 | _html.append('''\n</tr>\n''') | |
704 | _html.append('''</table>''') |
|
698 | _html.append('''</table>''') | |
705 | if _html_empty: |
|
699 | if _html_empty: | |
706 | return None |
|
700 | return None | |
707 | return ''.join(_html) |
|
701 | return ''.join(_html) | |
708 |
|
702 | |||
709 | def stat(self): |
|
703 | def stat(self): | |
710 | """ |
|
704 | """ | |
711 | Returns tuple of added, and removed lines for this instance |
|
705 | Returns tuple of added, and removed lines for this instance | |
712 | """ |
|
706 | """ | |
713 | return self.adds, self.removes |
|
707 | return self.adds, self.removes |
General Comments 0
You need to be logged in to leave comments.
Login now