Show More
@@ -1,264 +1,264 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 | # This program is free software: you can redistribute it and/or modify |
|
2 | # This program is free software: you can redistribute it and/or modify | |
3 | # it under the terms of the GNU General Public License as published by |
|
3 | # it under the terms of the GNU General Public License as published by | |
4 | # the Free Software Foundation, either version 3 of the License, or |
|
4 | # the Free Software Foundation, either version 3 of the License, or | |
5 | # (at your option) any later version. |
|
5 | # (at your option) any later version. | |
6 | # |
|
6 | # | |
7 | # This program is distributed in the hope that it will be useful, |
|
7 | # This program is distributed in the hope that it will be useful, | |
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | # GNU General Public License for more details. |
|
10 | # GNU General Public License for more details. | |
11 | # |
|
11 | # | |
12 | # You should have received a copy of the GNU General Public License |
|
12 | # You should have received a copy of the GNU General Public License | |
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
14 | """ |
|
14 | """ | |
15 | kallithea.controllers.admin.gists |
|
15 | kallithea.controllers.admin.gists | |
16 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
|
16 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |
17 |
|
17 | |||
18 | gist controller for Kallithea |
|
18 | gist controller for Kallithea | |
19 |
|
19 | |||
20 | This file was forked by the Kallithea project in July 2014. |
|
20 | This file was forked by the Kallithea project in July 2014. | |
21 | Original author and date, and relevant copyright and licensing information is below: |
|
21 | Original author and date, and relevant copyright and licensing information is below: | |
22 | :created_on: May 9, 2013 |
|
22 | :created_on: May 9, 2013 | |
23 | :author: marcink |
|
23 | :author: marcink | |
24 | :copyright: (c) 2013 RhodeCode GmbH, and others. |
|
24 | :copyright: (c) 2013 RhodeCode GmbH, and others. | |
25 | :license: GPLv3, see LICENSE.md for more details. |
|
25 | :license: GPLv3, see LICENSE.md for more details. | |
26 | """ |
|
26 | """ | |
27 |
|
27 | |||
28 | import logging |
|
28 | import logging | |
29 | import traceback |
|
29 | import traceback | |
30 |
|
30 | |||
31 | import formencode.htmlfill |
|
31 | import formencode.htmlfill | |
32 | from sqlalchemy.sql.expression import or_ |
|
32 | from sqlalchemy.sql.expression import or_ | |
33 | from tg import request, response |
|
33 | from tg import request, response | |
34 | from tg import tmpl_context as c |
|
34 | from tg import tmpl_context as c | |
35 | from tg.i18n import ugettext as _ |
|
35 | from tg.i18n import ugettext as _ | |
36 | from webob.exc import HTTPForbidden, HTTPFound, HTTPNotFound |
|
36 | from webob.exc import HTTPForbidden, HTTPFound, HTTPNotFound | |
37 |
|
37 | |||
38 | from kallithea.config.routing import url |
|
38 | from kallithea.config.routing import url | |
39 | from kallithea.lib import helpers as h |
|
39 | from kallithea.lib import helpers as h | |
40 | from kallithea.lib.auth import LoginRequired |
|
40 | from kallithea.lib.auth import LoginRequired | |
41 | from kallithea.lib.base import BaseController, jsonify, render |
|
41 | from kallithea.lib.base import BaseController, jsonify, render | |
42 | from kallithea.lib.page import Page |
|
42 | from kallithea.lib.page import Page | |
43 | from kallithea.lib.utils2 import safe_int, safe_unicode, time_to_datetime |
|
43 | from kallithea.lib.utils2 import safe_int, safe_unicode, time_to_datetime | |
44 | from kallithea.lib.vcs.exceptions import NodeNotChangedError, VCSError |
|
44 | from kallithea.lib.vcs.exceptions import NodeNotChangedError, VCSError | |
45 | from kallithea.model.db import Gist |
|
45 | from kallithea.model.db import Gist | |
46 | from kallithea.model.forms import GistForm |
|
46 | from kallithea.model.forms import GistForm | |
47 | from kallithea.model.gist import GistModel |
|
47 | from kallithea.model.gist import GistModel | |
48 | from kallithea.model.meta import Session |
|
48 | from kallithea.model.meta import Session | |
49 |
|
49 | |||
50 |
|
50 | |||
51 | log = logging.getLogger(__name__) |
|
51 | log = logging.getLogger(__name__) | |
52 |
|
52 | |||
53 |
|
53 | |||
54 | class GistsController(BaseController): |
|
54 | class GistsController(BaseController): | |
55 | """REST Controller styled on the Atom Publishing Protocol""" |
|
55 | """REST Controller styled on the Atom Publishing Protocol""" | |
56 |
|
56 | |||
57 | def __load_defaults(self, extra_values=None): |
|
57 | def __load_defaults(self, extra_values=None): | |
58 | c.lifetime_values = [ |
|
58 | c.lifetime_values = [ | |
59 | (str(-1), _('Forever')), |
|
59 | (str(-1), _('Forever')), | |
60 | (str(5), _('5 minutes')), |
|
60 | (str(5), _('5 minutes')), | |
61 | (str(60), _('1 hour')), |
|
61 | (str(60), _('1 hour')), | |
62 | (str(60 * 24), _('1 day')), |
|
62 | (str(60 * 24), _('1 day')), | |
63 | (str(60 * 24 * 30), _('1 month')), |
|
63 | (str(60 * 24 * 30), _('1 month')), | |
64 | ] |
|
64 | ] | |
65 | if extra_values: |
|
65 | if extra_values: | |
66 | c.lifetime_values.append(extra_values) |
|
66 | c.lifetime_values.append(extra_values) | |
67 | c.lifetime_options = [(c.lifetime_values, _("Lifetime"))] |
|
67 | c.lifetime_options = [(c.lifetime_values, _("Lifetime"))] | |
68 |
|
68 | |||
69 | @LoginRequired(allow_default_user=True) |
|
69 | @LoginRequired(allow_default_user=True) | |
70 | def index(self): |
|
70 | def index(self): | |
71 | not_default_user = not request.authuser.is_default_user |
|
71 | not_default_user = not request.authuser.is_default_user | |
72 | c.show_private = request.GET.get('private') and not_default_user |
|
72 | c.show_private = request.GET.get('private') and not_default_user | |
73 | c.show_public = request.GET.get('public') and not_default_user |
|
73 | c.show_public = request.GET.get('public') and not_default_user | |
74 | url_params = {} |
|
74 | url_params = {} | |
75 | if c.show_public: |
|
75 | if c.show_public: | |
76 | url_params['public'] = 1 |
|
76 | url_params['public'] = 1 | |
77 | elif c.show_private: |
|
77 | elif c.show_private: | |
78 | url_params['private'] = 1 |
|
78 | url_params['private'] = 1 | |
79 |
|
79 | |||
80 | gists = Gist().query() \ |
|
80 | gists = Gist().query() \ | |
81 | .filter_by(is_expired=False) \ |
|
81 | .filter_by(is_expired=False) \ | |
82 | .order_by(Gist.created_on.desc()) |
|
82 | .order_by(Gist.created_on.desc()) | |
83 |
|
83 | |||
84 | # MY private |
|
84 | # MY private | |
85 | if c.show_private and not c.show_public: |
|
85 | if c.show_private and not c.show_public: | |
86 | gists = gists.filter(Gist.gist_type == Gist.GIST_PRIVATE) \ |
|
86 | gists = gists.filter(Gist.gist_type == Gist.GIST_PRIVATE) \ | |
87 | .filter(Gist.owner_id == request.authuser.user_id) |
|
87 | .filter(Gist.owner_id == request.authuser.user_id) | |
88 | # MY public |
|
88 | # MY public | |
89 | elif c.show_public and not c.show_private: |
|
89 | elif c.show_public and not c.show_private: | |
90 | gists = gists.filter(Gist.gist_type == Gist.GIST_PUBLIC) \ |
|
90 | gists = gists.filter(Gist.gist_type == Gist.GIST_PUBLIC) \ | |
91 | .filter(Gist.owner_id == request.authuser.user_id) |
|
91 | .filter(Gist.owner_id == request.authuser.user_id) | |
92 |
|
92 | |||
93 | # MY public+private |
|
93 | # MY public+private | |
94 | elif c.show_private and c.show_public: |
|
94 | elif c.show_private and c.show_public: | |
95 | gists = gists.filter(or_(Gist.gist_type == Gist.GIST_PUBLIC, |
|
95 | gists = gists.filter(or_(Gist.gist_type == Gist.GIST_PUBLIC, | |
96 | Gist.gist_type == Gist.GIST_PRIVATE)) \ |
|
96 | Gist.gist_type == Gist.GIST_PRIVATE)) \ | |
97 | .filter(Gist.owner_id == request.authuser.user_id) |
|
97 | .filter(Gist.owner_id == request.authuser.user_id) | |
98 |
|
98 | |||
99 | # default show ALL public gists |
|
99 | # default show ALL public gists | |
100 | if not c.show_public and not c.show_private: |
|
100 | if not c.show_public and not c.show_private: | |
101 | gists = gists.filter(Gist.gist_type == Gist.GIST_PUBLIC) |
|
101 | gists = gists.filter(Gist.gist_type == Gist.GIST_PUBLIC) | |
102 |
|
102 | |||
103 | c.gists = gists |
|
103 | c.gists = gists | |
104 | p = safe_int(request.GET.get('page'), 1) |
|
104 | p = safe_int(request.GET.get('page'), 1) | |
105 | c.gists_pager = Page(c.gists, page=p, items_per_page=10, |
|
105 | c.gists_pager = Page(c.gists, page=p, items_per_page=10, | |
106 | **url_params) |
|
106 | **url_params) | |
107 | return render('admin/gists/index.html') |
|
107 | return render('admin/gists/index.html') | |
108 |
|
108 | |||
109 | @LoginRequired() |
|
109 | @LoginRequired() | |
110 | def create(self): |
|
110 | def create(self): | |
111 | self.__load_defaults() |
|
111 | self.__load_defaults() | |
112 | gist_form = GistForm([x[0] for x in c.lifetime_values])() |
|
112 | gist_form = GistForm([x[0] for x in c.lifetime_values])() | |
113 | try: |
|
113 | try: | |
114 | form_result = gist_form.to_python(dict(request.POST)) |
|
114 | form_result = gist_form.to_python(dict(request.POST)) | |
115 | # TODO: multiple files support, from the form |
|
115 | # TODO: multiple files support, from the form | |
116 | filename = form_result['filename'] or Gist.DEFAULT_FILENAME |
|
116 | filename = form_result['filename'] or Gist.DEFAULT_FILENAME | |
117 | nodes = { |
|
117 | nodes = { | |
118 | filename: { |
|
118 | filename: { | |
119 | 'content': form_result['content'], |
|
119 | 'content': form_result['content'], | |
120 | 'lexer': form_result['mimetype'] # None is autodetect |
|
120 | 'lexer': form_result['mimetype'] # None is autodetect | |
121 | } |
|
121 | } | |
122 | } |
|
122 | } | |
123 | _public = form_result['public'] |
|
123 | _public = form_result['public'] | |
124 | gist_type = Gist.GIST_PUBLIC if _public else Gist.GIST_PRIVATE |
|
124 | gist_type = Gist.GIST_PUBLIC if _public else Gist.GIST_PRIVATE | |
125 | gist = GistModel().create( |
|
125 | gist = GistModel().create( | |
126 | description=form_result['description'], |
|
126 | description=form_result['description'], | |
127 | owner=request.authuser.user_id, |
|
127 | owner=request.authuser.user_id, | |
128 | ip_addr=request.ip_addr, |
|
128 | ip_addr=request.ip_addr, | |
129 | gist_mapping=nodes, |
|
129 | gist_mapping=nodes, | |
130 | gist_type=gist_type, |
|
130 | gist_type=gist_type, | |
131 | lifetime=form_result['lifetime'] |
|
131 | lifetime=form_result['lifetime'] | |
132 | ) |
|
132 | ) | |
133 | Session().commit() |
|
133 | Session().commit() | |
134 | new_gist_id = gist.gist_access_id |
|
134 | new_gist_id = gist.gist_access_id | |
135 | except formencode.Invalid as errors: |
|
135 | except formencode.Invalid as errors: | |
136 | defaults = errors.value |
|
136 | defaults = errors.value | |
137 |
|
137 | |||
138 | return formencode.htmlfill.render( |
|
138 | return formencode.htmlfill.render( | |
139 | render('admin/gists/new.html'), |
|
139 | render('admin/gists/new.html'), | |
140 | defaults=defaults, |
|
140 | defaults=defaults, | |
141 | errors=errors.error_dict or {}, |
|
141 | errors=errors.error_dict or {}, | |
142 | prefix_error=False, |
|
142 | prefix_error=False, | |
143 | encoding="UTF-8", |
|
143 | encoding="UTF-8", | |
144 | force_defaults=False) |
|
144 | force_defaults=False) | |
145 |
|
145 | |||
146 | except Exception as e: |
|
146 | except Exception as e: | |
147 | log.error(traceback.format_exc()) |
|
147 | log.error(traceback.format_exc()) | |
148 | h.flash(_('Error occurred during gist creation'), category='error') |
|
148 | h.flash(_('Error occurred during gist creation'), category='error') | |
149 | raise HTTPFound(location=url('new_gist')) |
|
149 | raise HTTPFound(location=url('new_gist')) | |
150 | raise HTTPFound(location=url('gist', gist_id=new_gist_id)) |
|
150 | raise HTTPFound(location=url('gist', gist_id=new_gist_id)) | |
151 |
|
151 | |||
152 | @LoginRequired() |
|
152 | @LoginRequired() | |
153 | def new(self, format='html'): |
|
153 | def new(self, format='html'): | |
154 | self.__load_defaults() |
|
154 | self.__load_defaults() | |
155 | return render('admin/gists/new.html') |
|
155 | return render('admin/gists/new.html') | |
156 |
|
156 | |||
157 | @LoginRequired() |
|
157 | @LoginRequired() | |
158 | def delete(self, gist_id): |
|
158 | def delete(self, gist_id): | |
159 | gist = GistModel().get_gist(gist_id) |
|
159 | gist = GistModel().get_gist(gist_id) | |
160 | owner = gist.owner_id == request.authuser.user_id |
|
160 | owner = gist.owner_id == request.authuser.user_id | |
161 | if h.HasPermissionAny('hg.admin')() or owner: |
|
161 | if h.HasPermissionAny('hg.admin')() or owner: | |
162 | GistModel().delete(gist) |
|
162 | GistModel().delete(gist) | |
163 | Session().commit() |
|
163 | Session().commit() | |
164 | h.flash(_('Deleted gist %s') % gist.gist_access_id, category='success') |
|
164 | h.flash(_('Deleted gist %s') % gist.gist_access_id, category='success') | |
165 | else: |
|
165 | else: | |
166 | raise HTTPForbidden() |
|
166 | raise HTTPForbidden() | |
167 |
|
167 | |||
168 | raise HTTPFound(location=url('gists')) |
|
168 | raise HTTPFound(location=url('gists')) | |
169 |
|
169 | |||
170 | @LoginRequired(allow_default_user=True) |
|
170 | @LoginRequired(allow_default_user=True) | |
171 | def show(self, gist_id, revision='tip', format='html', f_path=None): |
|
171 | def show(self, gist_id, revision='tip', format='html', f_path=None): | |
172 | c.gist = Gist.get_or_404(gist_id) |
|
172 | c.gist = Gist.get_or_404(gist_id) | |
173 |
|
173 | |||
174 | if c.gist.is_expired: |
|
174 | if c.gist.is_expired: | |
175 | log.error('Gist expired at %s', |
|
175 | log.error('Gist expired at %s', | |
176 | time_to_datetime(c.gist.gist_expires)) |
|
176 | time_to_datetime(c.gist.gist_expires)) | |
177 | raise HTTPNotFound() |
|
177 | raise HTTPNotFound() | |
178 | try: |
|
178 | try: | |
179 | c.file_changeset, c.files = GistModel().get_gist_files(gist_id, |
|
179 | c.file_changeset, c.files = GistModel().get_gist_files(gist_id, | |
180 | revision=revision) |
|
180 | revision=revision) | |
181 | except VCSError: |
|
181 | except VCSError: | |
182 | log.error(traceback.format_exc()) |
|
182 | log.error(traceback.format_exc()) | |
183 | raise HTTPNotFound() |
|
183 | raise HTTPNotFound() | |
184 | if format == 'raw': |
|
184 | if format == 'raw': | |
185 | content = '\n\n'.join([f.content for f in c.files if (f_path is None or safe_unicode(f.path) == f_path)]) |
|
185 | content = '\n\n'.join([safe_unicode(f.content) for f in c.files if (f_path is None or safe_unicode(f.path) == f_path)]) | |
186 | response.content_type = 'text/plain' |
|
186 | response.content_type = 'text/plain' | |
187 | return content |
|
187 | return content | |
188 | return render('admin/gists/show.html') |
|
188 | return render('admin/gists/show.html') | |
189 |
|
189 | |||
190 | @LoginRequired() |
|
190 | @LoginRequired() | |
191 | def edit(self, gist_id, format='html'): |
|
191 | def edit(self, gist_id, format='html'): | |
192 | c.gist = Gist.get_or_404(gist_id) |
|
192 | c.gist = Gist.get_or_404(gist_id) | |
193 |
|
193 | |||
194 | if c.gist.is_expired: |
|
194 | if c.gist.is_expired: | |
195 | log.error('Gist expired at %s', |
|
195 | log.error('Gist expired at %s', | |
196 | time_to_datetime(c.gist.gist_expires)) |
|
196 | time_to_datetime(c.gist.gist_expires)) | |
197 | raise HTTPNotFound() |
|
197 | raise HTTPNotFound() | |
198 | try: |
|
198 | try: | |
199 | c.file_changeset, c.files = GistModel().get_gist_files(gist_id) |
|
199 | c.file_changeset, c.files = GistModel().get_gist_files(gist_id) | |
200 | except VCSError: |
|
200 | except VCSError: | |
201 | log.error(traceback.format_exc()) |
|
201 | log.error(traceback.format_exc()) | |
202 | raise HTTPNotFound() |
|
202 | raise HTTPNotFound() | |
203 |
|
203 | |||
204 | self.__load_defaults(extra_values=('0', _('Unmodified'))) |
|
204 | self.__load_defaults(extra_values=('0', _('Unmodified'))) | |
205 | rendered = render('admin/gists/edit.html') |
|
205 | rendered = render('admin/gists/edit.html') | |
206 |
|
206 | |||
207 | if request.POST: |
|
207 | if request.POST: | |
208 | rpost = request.POST |
|
208 | rpost = request.POST | |
209 | nodes = {} |
|
209 | nodes = {} | |
210 | for org_filename, filename, mimetype, content in zip( |
|
210 | for org_filename, filename, mimetype, content in zip( | |
211 | rpost.getall('org_files'), |
|
211 | rpost.getall('org_files'), | |
212 | rpost.getall('files'), |
|
212 | rpost.getall('files'), | |
213 | rpost.getall('mimetypes'), |
|
213 | rpost.getall('mimetypes'), | |
214 | rpost.getall('contents')): |
|
214 | rpost.getall('contents')): | |
215 |
|
215 | |||
216 | nodes[org_filename] = { |
|
216 | nodes[org_filename] = { | |
217 | 'org_filename': org_filename, |
|
217 | 'org_filename': org_filename, | |
218 | 'filename': filename, |
|
218 | 'filename': filename, | |
219 | 'content': content, |
|
219 | 'content': content, | |
220 | 'lexer': mimetype, |
|
220 | 'lexer': mimetype, | |
221 | } |
|
221 | } | |
222 | try: |
|
222 | try: | |
223 | GistModel().update( |
|
223 | GistModel().update( | |
224 | gist=c.gist, |
|
224 | gist=c.gist, | |
225 | description=rpost['description'], |
|
225 | description=rpost['description'], | |
226 | owner=c.gist.owner, # FIXME: request.authuser.user_id ? |
|
226 | owner=c.gist.owner, # FIXME: request.authuser.user_id ? | |
227 | ip_addr=request.ip_addr, |
|
227 | ip_addr=request.ip_addr, | |
228 | gist_mapping=nodes, |
|
228 | gist_mapping=nodes, | |
229 | gist_type=c.gist.gist_type, |
|
229 | gist_type=c.gist.gist_type, | |
230 | lifetime=rpost['lifetime'] |
|
230 | lifetime=rpost['lifetime'] | |
231 | ) |
|
231 | ) | |
232 |
|
232 | |||
233 | Session().commit() |
|
233 | Session().commit() | |
234 | h.flash(_('Successfully updated gist content'), category='success') |
|
234 | h.flash(_('Successfully updated gist content'), category='success') | |
235 | except NodeNotChangedError: |
|
235 | except NodeNotChangedError: | |
236 | # raised if nothing was changed in repo itself. We anyway then |
|
236 | # raised if nothing was changed in repo itself. We anyway then | |
237 | # store only DB stuff for gist |
|
237 | # store only DB stuff for gist | |
238 | Session().commit() |
|
238 | Session().commit() | |
239 | h.flash(_('Successfully updated gist data'), category='success') |
|
239 | h.flash(_('Successfully updated gist data'), category='success') | |
240 | except Exception: |
|
240 | except Exception: | |
241 | log.error(traceback.format_exc()) |
|
241 | log.error(traceback.format_exc()) | |
242 | h.flash(_('Error occurred during update of gist %s') % gist_id, |
|
242 | h.flash(_('Error occurred during update of gist %s') % gist_id, | |
243 | category='error') |
|
243 | category='error') | |
244 |
|
244 | |||
245 | raise HTTPFound(location=url('gist', gist_id=gist_id)) |
|
245 | raise HTTPFound(location=url('gist', gist_id=gist_id)) | |
246 |
|
246 | |||
247 | return rendered |
|
247 | return rendered | |
248 |
|
248 | |||
249 | @LoginRequired() |
|
249 | @LoginRequired() | |
250 | @jsonify |
|
250 | @jsonify | |
251 | def check_revision(self, gist_id): |
|
251 | def check_revision(self, gist_id): | |
252 | c.gist = Gist.get_or_404(gist_id) |
|
252 | c.gist = Gist.get_or_404(gist_id) | |
253 | last_rev = c.gist.scm_instance.get_changeset() |
|
253 | last_rev = c.gist.scm_instance.get_changeset() | |
254 | success = True |
|
254 | success = True | |
255 | revision = request.POST.get('revision') |
|
255 | revision = request.POST.get('revision') | |
256 |
|
256 | |||
257 | # TODO: maybe move this to model ? |
|
257 | # TODO: maybe move this to model ? | |
258 | if revision != last_rev.raw_id: |
|
258 | if revision != last_rev.raw_id: | |
259 | log.error('Last revision %s is different than submitted %s', |
|
259 | log.error('Last revision %s is different than submitted %s', | |
260 | revision, last_rev) |
|
260 | revision, last_rev) | |
261 | # our gist has newer version than we |
|
261 | # our gist has newer version than we | |
262 | success = False |
|
262 | success = False | |
263 |
|
263 | |||
264 | return {'success': success} |
|
264 | return {'success': success} |
@@ -1,289 +1,289 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 | # This program is free software: you can redistribute it and/or modify |
|
2 | # This program is free software: you can redistribute it and/or modify | |
3 | # it under the terms of the GNU General Public License as published by |
|
3 | # it under the terms of the GNU General Public License as published by | |
4 | # the Free Software Foundation, either version 3 of the License, or |
|
4 | # the Free Software Foundation, either version 3 of the License, or | |
5 | # (at your option) any later version. |
|
5 | # (at your option) any later version. | |
6 | # |
|
6 | # | |
7 | # This program is distributed in the hope that it will be useful, |
|
7 | # This program is distributed in the hope that it will be useful, | |
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | # GNU General Public License for more details. |
|
10 | # GNU General Public License for more details. | |
11 | # |
|
11 | # | |
12 | # You should have received a copy of the GNU General Public License |
|
12 | # You should have received a copy of the GNU General Public License | |
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
14 | """ |
|
14 | """ | |
15 | kallithea.controllers.compare |
|
15 | kallithea.controllers.compare | |
16 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
|
16 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |
17 |
|
17 | |||
18 | compare controller showing differences between two |
|
18 | compare controller showing differences between two | |
19 | repos, branches, bookmarks or tips |
|
19 | repos, branches, bookmarks or tips | |
20 |
|
20 | |||
21 | This file was forked by the Kallithea project in July 2014. |
|
21 | This file was forked by the Kallithea project in July 2014. | |
22 | Original author and date, and relevant copyright and licensing information is below: |
|
22 | Original author and date, and relevant copyright and licensing information is below: | |
23 | :created_on: May 6, 2012 |
|
23 | :created_on: May 6, 2012 | |
24 | :author: marcink |
|
24 | :author: marcink | |
25 | :copyright: (c) 2013 RhodeCode GmbH, and others. |
|
25 | :copyright: (c) 2013 RhodeCode GmbH, and others. | |
26 | :license: GPLv3, see LICENSE.md for more details. |
|
26 | :license: GPLv3, see LICENSE.md for more details. | |
27 | """ |
|
27 | """ | |
28 |
|
28 | |||
29 |
|
29 | |||
30 | import logging |
|
30 | import logging | |
31 | import re |
|
31 | import re | |
32 |
|
32 | |||
33 | from tg import request |
|
33 | from tg import request | |
34 | from tg import tmpl_context as c |
|
34 | from tg import tmpl_context as c | |
35 | from tg.i18n import ugettext as _ |
|
35 | from tg.i18n import ugettext as _ | |
36 | from webob.exc import HTTPBadRequest, HTTPFound, HTTPNotFound |
|
36 | from webob.exc import HTTPBadRequest, HTTPFound, HTTPNotFound | |
37 |
|
37 | |||
38 | from kallithea.config.routing import url |
|
38 | from kallithea.config.routing import url | |
39 | from kallithea.controllers.changeset import _context_url, _ignorews_url |
|
39 | from kallithea.controllers.changeset import _context_url, _ignorews_url | |
40 | from kallithea.lib import diffs |
|
40 | from kallithea.lib import diffs | |
41 | from kallithea.lib import helpers as h |
|
41 | from kallithea.lib import helpers as h | |
42 | from kallithea.lib.auth import HasRepoPermissionLevelDecorator, LoginRequired |
|
42 | from kallithea.lib.auth import HasRepoPermissionLevelDecorator, LoginRequired | |
43 | from kallithea.lib.base import BaseRepoController, render |
|
43 | from kallithea.lib.base import BaseRepoController, render | |
44 | from kallithea.lib.graphmod import graph_data |
|
44 | from kallithea.lib.graphmod import graph_data | |
45 | from kallithea.lib.utils2 import safe_int, safe_str |
|
45 | from kallithea.lib.utils2 import safe_int, safe_str | |
46 | from kallithea.lib.vcs.utils.hgcompat import unionrepo |
|
46 | from kallithea.lib.vcs.utils.hgcompat import unionrepo | |
47 | from kallithea.model.db import Repository |
|
47 | from kallithea.model.db import Repository | |
48 |
|
48 | |||
49 |
|
49 | |||
50 | log = logging.getLogger(__name__) |
|
50 | log = logging.getLogger(__name__) | |
51 |
|
51 | |||
52 |
|
52 | |||
53 | class CompareController(BaseRepoController): |
|
53 | class CompareController(BaseRepoController): | |
54 |
|
54 | |||
55 | def _before(self, *args, **kwargs): |
|
55 | def _before(self, *args, **kwargs): | |
56 | super(CompareController, self)._before(*args, **kwargs) |
|
56 | super(CompareController, self)._before(*args, **kwargs) | |
57 |
|
57 | |||
58 | # The base repository has already been retrieved. |
|
58 | # The base repository has already been retrieved. | |
59 | c.a_repo = c.db_repo |
|
59 | c.a_repo = c.db_repo | |
60 |
|
60 | |||
61 | # Retrieve the "changeset" repository (default: same as base). |
|
61 | # Retrieve the "changeset" repository (default: same as base). | |
62 | other_repo = request.GET.get('other_repo', None) |
|
62 | other_repo = request.GET.get('other_repo', None) | |
63 | if other_repo is None: |
|
63 | if other_repo is None: | |
64 | c.cs_repo = c.a_repo |
|
64 | c.cs_repo = c.a_repo | |
65 | else: |
|
65 | else: | |
66 | c.cs_repo = Repository.get_by_repo_name(other_repo) |
|
66 | c.cs_repo = Repository.get_by_repo_name(other_repo) | |
67 | if c.cs_repo is None: |
|
67 | if c.cs_repo is None: | |
68 | msg = _('Could not find other repository %s') % other_repo |
|
68 | msg = _('Could not find other repository %s') % other_repo | |
69 | h.flash(msg, category='error') |
|
69 | h.flash(msg, category='error') | |
70 | raise HTTPFound(location=url('compare_home', repo_name=c.a_repo.repo_name)) |
|
70 | raise HTTPFound(location=url('compare_home', repo_name=c.a_repo.repo_name)) | |
71 |
|
71 | |||
72 | # Verify that it's even possible to compare these two repositories. |
|
72 | # Verify that it's even possible to compare these two repositories. | |
73 | if c.a_repo.scm_instance.alias != c.cs_repo.scm_instance.alias: |
|
73 | if c.a_repo.scm_instance.alias != c.cs_repo.scm_instance.alias: | |
74 | msg = _('Cannot compare repositories of different types') |
|
74 | msg = _('Cannot compare repositories of different types') | |
75 | h.flash(msg, category='error') |
|
75 | h.flash(msg, category='error') | |
76 | raise HTTPFound(location=url('compare_home', repo_name=c.a_repo.repo_name)) |
|
76 | raise HTTPFound(location=url('compare_home', repo_name=c.a_repo.repo_name)) | |
77 |
|
77 | |||
78 | @staticmethod |
|
78 | @staticmethod | |
79 | def _get_changesets(alias, org_repo, org_rev, other_repo, other_rev): |
|
79 | def _get_changesets(alias, org_repo, org_rev, other_repo, other_rev): | |
80 | """ |
|
80 | """ | |
81 | Returns lists of changesets that can be merged from org_repo@org_rev |
|
81 | Returns lists of changesets that can be merged from org_repo@org_rev | |
82 | to other_repo@other_rev |
|
82 | to other_repo@other_rev | |
83 | ... and the other way |
|
83 | ... and the other way | |
84 | ... and the ancestors that would be used for merge |
|
84 | ... and the ancestors that would be used for merge | |
85 |
|
85 | |||
86 | :param org_repo: repo object, that is most likely the original repo we forked from |
|
86 | :param org_repo: repo object, that is most likely the original repo we forked from | |
87 | :param org_rev: the revision we want our compare to be made |
|
87 | :param org_rev: the revision we want our compare to be made | |
88 | :param other_repo: repo object, most likely the fork of org_repo. It has |
|
88 | :param other_repo: repo object, most likely the fork of org_repo. It has | |
89 | all changesets that we need to obtain |
|
89 | all changesets that we need to obtain | |
90 | :param other_rev: revision we want out compare to be made on other_repo |
|
90 | :param other_rev: revision we want out compare to be made on other_repo | |
91 | """ |
|
91 | """ | |
92 | ancestors = None |
|
92 | ancestors = None | |
93 | if org_rev == other_rev: |
|
93 | if org_rev == other_rev: | |
94 | org_changesets = [] |
|
94 | org_changesets = [] | |
95 | other_changesets = [] |
|
95 | other_changesets = [] | |
96 |
|
96 | |||
97 | elif alias == 'hg': |
|
97 | elif alias == 'hg': | |
98 | # case two independent repos |
|
98 | # case two independent repos | |
99 | if org_repo != other_repo: |
|
99 | if org_repo != other_repo: | |
100 | hgrepo = unionrepo.makeunionrepository(other_repo.baseui, |
|
100 | hgrepo = unionrepo.makeunionrepository(other_repo.baseui, | |
101 | other_repo.path, |
|
101 | other_repo.path, | |
102 | org_repo.path) |
|
102 | org_repo.path) | |
103 | # all ancestors of other_rev will be in other_repo and |
|
103 | # all ancestors of other_rev will be in other_repo and | |
104 | # rev numbers from hgrepo can be used in other_repo - org_rev ancestors cannot |
|
104 | # rev numbers from hgrepo can be used in other_repo - org_rev ancestors cannot | |
105 |
|
105 | |||
106 | # no remote compare do it on the same repository |
|
106 | # no remote compare do it on the same repository | |
107 | else: |
|
107 | else: | |
108 | hgrepo = other_repo._repo |
|
108 | hgrepo = other_repo._repo | |
109 |
|
109 | |||
110 | ancestors = [hgrepo[ancestor].hex() for ancestor in |
|
110 | ancestors = [hgrepo[ancestor].hex() for ancestor in | |
111 | hgrepo.revs("id(%s) & ::id(%s)", other_rev, org_rev)] |
|
111 | hgrepo.revs("id(%s) & ::id(%s)", other_rev, org_rev)] | |
112 | if ancestors: |
|
112 | if ancestors: | |
113 | log.debug("shortcut found: %s is already an ancestor of %s", other_rev, org_rev) |
|
113 | log.debug("shortcut found: %s is already an ancestor of %s", other_rev, org_rev) | |
114 | else: |
|
114 | else: | |
115 | log.debug("no shortcut found: %s is not an ancestor of %s", other_rev, org_rev) |
|
115 | log.debug("no shortcut found: %s is not an ancestor of %s", other_rev, org_rev) | |
116 | ancestors = [hgrepo[ancestor].hex() for ancestor in |
|
116 | ancestors = [hgrepo[ancestor].hex() for ancestor in | |
117 | hgrepo.revs("heads(::id(%s) & ::id(%s))", org_rev, other_rev)] # FIXME: expensive! |
|
117 | hgrepo.revs("heads(::id(%s) & ::id(%s))", org_rev, other_rev)] # FIXME: expensive! | |
118 |
|
118 | |||
119 | other_revs = hgrepo.revs("ancestors(id(%s)) and not ancestors(id(%s)) and not id(%s)", |
|
119 | other_revs = hgrepo.revs("ancestors(id(%s)) and not ancestors(id(%s)) and not id(%s)", | |
120 | other_rev, org_rev, org_rev) |
|
120 | other_rev, org_rev, org_rev) | |
121 | other_changesets = [other_repo.get_changeset(rev) for rev in other_revs] |
|
121 | other_changesets = [other_repo.get_changeset(rev) for rev in other_revs] | |
122 | org_revs = hgrepo.revs("ancestors(id(%s)) and not ancestors(id(%s)) and not id(%s)", |
|
122 | org_revs = hgrepo.revs("ancestors(id(%s)) and not ancestors(id(%s)) and not id(%s)", | |
123 | org_rev, other_rev, other_rev) |
|
123 | org_rev, other_rev, other_rev) | |
124 | org_changesets = [org_repo.get_changeset(hgrepo[rev].hex()) for rev in org_revs] |
|
124 | org_changesets = [org_repo.get_changeset(hgrepo[rev].hex()) for rev in org_revs] | |
125 |
|
125 | |||
126 | elif alias == 'git': |
|
126 | elif alias == 'git': | |
127 | if org_repo != other_repo: |
|
127 | if org_repo != other_repo: | |
128 | from dulwich.repo import Repo |
|
128 | from dulwich.repo import Repo | |
129 | from dulwich.client import SubprocessGitClient |
|
129 | from dulwich.client import SubprocessGitClient | |
130 |
|
130 | |||
131 | gitrepo = Repo(org_repo.path) |
|
131 | gitrepo = Repo(org_repo.path) | |
132 | SubprocessGitClient(thin_packs=False).fetch(safe_str(other_repo.path), gitrepo) |
|
132 | SubprocessGitClient(thin_packs=False).fetch(safe_str(other_repo.path), gitrepo) | |
133 |
|
133 | |||
134 | gitrepo_remote = Repo(other_repo.path) |
|
134 | gitrepo_remote = Repo(other_repo.path) | |
135 | SubprocessGitClient(thin_packs=False).fetch(safe_str(org_repo.path), gitrepo_remote) |
|
135 | SubprocessGitClient(thin_packs=False).fetch(safe_str(org_repo.path), gitrepo_remote) | |
136 |
|
136 | |||
137 | revs = [ |
|
137 | revs = [ | |
138 | x.commit.id |
|
138 | x.commit.id | |
139 | for x in gitrepo_remote.get_walker(include=[other_rev], |
|
139 | for x in gitrepo_remote.get_walker(include=[other_rev], | |
140 | exclude=[org_rev]) |
|
140 | exclude=[org_rev]) | |
141 | ] |
|
141 | ] | |
142 | other_changesets = [other_repo.get_changeset(rev) for rev in reversed(revs)] |
|
142 | other_changesets = [other_repo.get_changeset(rev) for rev in reversed(revs)] | |
143 | if other_changesets: |
|
143 | if other_changesets: | |
144 | ancestors = [other_changesets[0].parents[0].raw_id] |
|
144 | ancestors = [other_changesets[0].parents[0].raw_id] | |
145 | else: |
|
145 | else: | |
146 | # no changesets from other repo, ancestor is the other_rev |
|
146 | # no changesets from other repo, ancestor is the other_rev | |
147 | ancestors = [other_rev] |
|
147 | ancestors = [other_rev] | |
148 |
|
148 | |||
149 | gitrepo.close() |
|
149 | gitrepo.close() | |
150 | gitrepo_remote.close() |
|
150 | gitrepo_remote.close() | |
151 |
|
151 | |||
152 | else: |
|
152 | else: | |
153 | so = org_repo.run_git_command( |
|
153 | so = org_repo.run_git_command( | |
154 | ['log', '--reverse', '--pretty=format:%H', |
|
154 | ['log', '--reverse', '--pretty=format:%H', | |
155 | '-s', '%s..%s' % (org_rev, other_rev)] |
|
155 | '-s', '%s..%s' % (org_rev, other_rev)] | |
156 | ) |
|
156 | ) | |
157 | other_changesets = [org_repo.get_changeset(cs) |
|
157 | other_changesets = [org_repo.get_changeset(cs) | |
158 | for cs in re.findall(r'[0-9a-fA-F]{40}', so)] |
|
158 | for cs in re.findall(r'[0-9a-fA-F]{40}', so)] | |
159 | so = org_repo.run_git_command( |
|
159 | so = org_repo.run_git_command( | |
160 | ['merge-base', org_rev, other_rev] |
|
160 | ['merge-base', org_rev, other_rev] | |
161 | ) |
|
161 | ) | |
162 | ancestors = [re.findall(r'[0-9a-fA-F]{40}', so)[0]] |
|
162 | ancestors = [re.findall(r'[0-9a-fA-F]{40}', so)[0]] | |
163 | org_changesets = [] |
|
163 | org_changesets = [] | |
164 |
|
164 | |||
165 | else: |
|
165 | else: | |
166 | raise Exception('Bad alias only git and hg is allowed') |
|
166 | raise Exception('Bad alias only git and hg is allowed') | |
167 |
|
167 | |||
168 | return other_changesets, org_changesets, ancestors |
|
168 | return other_changesets, org_changesets, ancestors | |
169 |
|
169 | |||
170 | @LoginRequired(allow_default_user=True) |
|
170 | @LoginRequired(allow_default_user=True) | |
171 | @HasRepoPermissionLevelDecorator('read') |
|
171 | @HasRepoPermissionLevelDecorator('read') | |
172 | def index(self, repo_name): |
|
172 | def index(self, repo_name): | |
173 | c.compare_home = True |
|
173 | c.compare_home = True | |
174 | c.a_ref_name = c.cs_ref_name = None |
|
174 | c.a_ref_name = c.cs_ref_name = None | |
175 | return render('compare/compare_diff.html') |
|
175 | return render('compare/compare_diff.html') | |
176 |
|
176 | |||
177 | @LoginRequired(allow_default_user=True) |
|
177 | @LoginRequired(allow_default_user=True) | |
178 | @HasRepoPermissionLevelDecorator('read') |
|
178 | @HasRepoPermissionLevelDecorator('read') | |
179 | def compare(self, repo_name, org_ref_type, org_ref_name, other_ref_type, other_ref_name): |
|
179 | def compare(self, repo_name, org_ref_type, org_ref_name, other_ref_type, other_ref_name): | |
180 | org_ref_name = org_ref_name.strip() |
|
180 | org_ref_name = org_ref_name.strip() | |
181 | other_ref_name = other_ref_name.strip() |
|
181 | other_ref_name = other_ref_name.strip() | |
182 |
|
182 | |||
183 | # If merge is True: |
|
183 | # If merge is True: | |
184 | # Show what org would get if merged with other: |
|
184 | # Show what org would get if merged with other: | |
185 | # List changesets that are ancestors of other but not of org. |
|
185 | # List changesets that are ancestors of other but not of org. | |
186 | # New changesets in org is thus ignored. |
|
186 | # New changesets in org is thus ignored. | |
187 | # Diff will be from common ancestor, and merges of org to other will thus be ignored. |
|
187 | # Diff will be from common ancestor, and merges of org to other will thus be ignored. | |
188 | # If merge is False: |
|
188 | # If merge is False: | |
189 | # Make a raw diff from org to other, no matter if related or not. |
|
189 | # Make a raw diff from org to other, no matter if related or not. | |
190 | # Changesets in one and not in the other will be ignored |
|
190 | # Changesets in one and not in the other will be ignored | |
191 | merge = bool(request.GET.get('merge')) |
|
191 | merge = bool(request.GET.get('merge')) | |
192 | # fulldiff disables cut_off_limit |
|
192 | # fulldiff disables cut_off_limit | |
193 | fulldiff = request.GET.get('fulldiff') |
|
193 | fulldiff = request.GET.get('fulldiff') | |
194 | # partial uses compare_cs.html template directly |
|
194 | # partial uses compare_cs.html template directly | |
195 | partial = request.environ.get('HTTP_X_PARTIAL_XHR') |
|
195 | partial = request.environ.get('HTTP_X_PARTIAL_XHR') | |
196 | # is_ajax_preview puts hidden input field with changeset revisions |
|
196 | # is_ajax_preview puts hidden input field with changeset revisions | |
197 | c.is_ajax_preview = partial and request.GET.get('is_ajax_preview') |
|
197 | c.is_ajax_preview = partial and request.GET.get('is_ajax_preview') | |
198 | # swap url for compare_diff page - never partial and never is_ajax_preview |
|
198 | # swap url for compare_diff page - never partial and never is_ajax_preview | |
199 | c.swap_url = h.url('compare_url', |
|
199 | c.swap_url = h.url('compare_url', | |
200 | repo_name=c.cs_repo.repo_name, |
|
200 | repo_name=c.cs_repo.repo_name, | |
201 | org_ref_type=other_ref_type, org_ref_name=other_ref_name, |
|
201 | org_ref_type=other_ref_type, org_ref_name=other_ref_name, | |
202 | other_repo=c.a_repo.repo_name, |
|
202 | other_repo=c.a_repo.repo_name, | |
203 | other_ref_type=org_ref_type, other_ref_name=org_ref_name, |
|
203 | other_ref_type=org_ref_type, other_ref_name=org_ref_name, | |
204 | merge=merge or '') |
|
204 | merge=merge or '') | |
205 |
|
205 | |||
206 | # set callbacks for generating markup for icons |
|
206 | # set callbacks for generating markup for icons | |
207 | c.ignorews_url = _ignorews_url |
|
207 | c.ignorews_url = _ignorews_url | |
208 | c.context_url = _context_url |
|
208 | c.context_url = _context_url | |
209 | ignore_whitespace = request.GET.get('ignorews') == '1' |
|
209 | ignore_whitespace = request.GET.get('ignorews') == '1' | |
210 | line_context = safe_int(request.GET.get('context'), 3) |
|
210 | line_context = safe_int(request.GET.get('context'), 3) | |
211 |
|
211 | |||
212 | c.a_rev = self._get_ref_rev(c.a_repo, org_ref_type, org_ref_name, |
|
212 | c.a_rev = self._get_ref_rev(c.a_repo, org_ref_type, org_ref_name, | |
213 | returnempty=True) |
|
213 | returnempty=True) | |
214 | c.cs_rev = self._get_ref_rev(c.cs_repo, other_ref_type, other_ref_name) |
|
214 | c.cs_rev = self._get_ref_rev(c.cs_repo, other_ref_type, other_ref_name) | |
215 |
|
215 | |||
216 | c.compare_home = False |
|
216 | c.compare_home = False | |
217 | c.a_ref_name = org_ref_name |
|
217 | c.a_ref_name = org_ref_name | |
218 | c.a_ref_type = org_ref_type |
|
218 | c.a_ref_type = org_ref_type | |
219 | c.cs_ref_name = other_ref_name |
|
219 | c.cs_ref_name = other_ref_name | |
220 | c.cs_ref_type = other_ref_type |
|
220 | c.cs_ref_type = other_ref_type | |
221 |
|
221 | |||
222 | c.cs_ranges, c.cs_ranges_org, c.ancestors = self._get_changesets( |
|
222 | c.cs_ranges, c.cs_ranges_org, c.ancestors = self._get_changesets( | |
223 | c.a_repo.scm_instance.alias, c.a_repo.scm_instance, c.a_rev, |
|
223 | c.a_repo.scm_instance.alias, c.a_repo.scm_instance, c.a_rev, | |
224 | c.cs_repo.scm_instance, c.cs_rev) |
|
224 | c.cs_repo.scm_instance, c.cs_rev) | |
225 | raw_ids = [x.raw_id for x in c.cs_ranges] |
|
225 | raw_ids = [x.raw_id for x in c.cs_ranges] | |
226 | c.cs_comments = c.cs_repo.get_comments(raw_ids) |
|
226 | c.cs_comments = c.cs_repo.get_comments(raw_ids) | |
227 | c.cs_statuses = c.cs_repo.statuses(raw_ids) |
|
227 | c.cs_statuses = c.cs_repo.statuses(raw_ids) | |
228 |
|
228 | |||
229 | revs = [ctx.revision for ctx in reversed(c.cs_ranges)] |
|
229 | revs = [ctx.revision for ctx in reversed(c.cs_ranges)] | |
230 | c.jsdata = graph_data(c.cs_repo.scm_instance, revs) |
|
230 | c.jsdata = graph_data(c.cs_repo.scm_instance, revs) | |
231 |
|
231 | |||
232 | if partial: |
|
232 | if partial: | |
233 | return render('compare/compare_cs.html') |
|
233 | return render('compare/compare_cs.html') | |
234 |
|
234 | |||
235 | org_repo = c.a_repo |
|
235 | org_repo = c.a_repo | |
236 | other_repo = c.cs_repo |
|
236 | other_repo = c.cs_repo | |
237 |
|
237 | |||
238 | if merge: |
|
238 | if merge: | |
239 | rev1 = msg = None |
|
239 | rev1 = msg = None | |
240 | if not c.cs_ranges: |
|
240 | if not c.cs_ranges: | |
241 | msg = _('Cannot show empty diff') |
|
241 | msg = _('Cannot show empty diff') | |
242 | elif not c.ancestors: |
|
242 | elif not c.ancestors: | |
243 | msg = _('No ancestor found for merge diff') |
|
243 | msg = _('No ancestor found for merge diff') | |
244 | elif len(c.ancestors) == 1: |
|
244 | elif len(c.ancestors) == 1: | |
245 | rev1 = c.ancestors[0] |
|
245 | rev1 = c.ancestors[0] | |
246 | else: |
|
246 | else: | |
247 | msg = _('Multiple merge ancestors found for merge compare') |
|
247 | msg = _('Multiple merge ancestors found for merge compare') | |
248 | if rev1 is None: |
|
248 | if rev1 is None: | |
249 | h.flash(msg, category='error') |
|
249 | h.flash(msg, category='error') | |
250 | log.error(msg) |
|
250 | log.error(msg) | |
251 | raise HTTPNotFound |
|
251 | raise HTTPNotFound | |
252 |
|
252 | |||
253 | # case we want a simple diff without incoming changesets, |
|
253 | # case we want a simple diff without incoming changesets, | |
254 | # previewing what will be merged. |
|
254 | # previewing what will be merged. | |
255 | # Make the diff on the other repo (which is known to have other_rev) |
|
255 | # Make the diff on the other repo (which is known to have other_rev) | |
256 | log.debug('Using ancestor %s as rev1 instead of %s', |
|
256 | log.debug('Using ancestor %s as rev1 instead of %s', | |
257 | rev1, c.a_rev) |
|
257 | rev1, c.a_rev) | |
258 | org_repo = other_repo |
|
258 | org_repo = other_repo | |
259 | else: # comparing tips, not necessarily linearly related |
|
259 | else: # comparing tips, not necessarily linearly related | |
260 | if org_repo != other_repo: |
|
260 | if org_repo != other_repo: | |
261 | # TODO: we could do this by using hg unionrepo |
|
261 | # TODO: we could do this by using hg unionrepo | |
262 | log.error('cannot compare across repos %s and %s', org_repo, other_repo) |
|
262 | log.error('cannot compare across repos %s and %s', org_repo, other_repo) | |
263 | h.flash(_('Cannot compare repositories without using common ancestor'), category='error') |
|
263 | h.flash(_('Cannot compare repositories without using common ancestor'), category='error') | |
264 | raise HTTPBadRequest |
|
264 | raise HTTPBadRequest | |
265 | rev1 = c.a_rev |
|
265 | rev1 = c.a_rev | |
266 |
|
266 | |||
267 | diff_limit = None if fulldiff else self.cut_off_limit |
|
267 | diff_limit = None if fulldiff else self.cut_off_limit | |
268 |
|
268 | |||
269 | log.debug('running diff between %s and %s in %s', |
|
269 | log.debug('running diff between %s and %s in %s', | |
270 | rev1, c.cs_rev, org_repo.scm_instance.path) |
|
270 | rev1, c.cs_rev, org_repo.scm_instance.path) | |
271 | raw_diff = diffs.get_diff(org_repo.scm_instance, rev1=rev1, rev2=c.cs_rev, |
|
271 | raw_diff = diffs.get_diff(org_repo.scm_instance, rev1=rev1, rev2=c.cs_rev, | |
272 | ignore_whitespace=ignore_whitespace, |
|
272 | ignore_whitespace=ignore_whitespace, | |
273 | context=line_context) |
|
273 | context=line_context) | |
274 |
|
274 | |||
275 |
diff_processor = diffs.DiffProcessor(raw_diff |
|
275 | diff_processor = diffs.DiffProcessor(raw_diff, diff_limit=diff_limit) | |
276 | c.limited_diff = diff_processor.limited_diff |
|
276 | c.limited_diff = diff_processor.limited_diff | |
277 | c.file_diff_data = [] |
|
277 | c.file_diff_data = [] | |
278 | c.lines_added = 0 |
|
278 | c.lines_added = 0 | |
279 | c.lines_deleted = 0 |
|
279 | c.lines_deleted = 0 | |
280 | for f in diff_processor.parsed: |
|
280 | for f in diff_processor.parsed: | |
281 | st = f['stats'] |
|
281 | st = f['stats'] | |
282 | c.lines_added += st['added'] |
|
282 | c.lines_added += st['added'] | |
283 | c.lines_deleted += st['deleted'] |
|
283 | c.lines_deleted += st['deleted'] | |
284 | filename = f['filename'] |
|
284 | filename = f['filename'] | |
285 | fid = h.FID('', filename) |
|
285 | fid = h.FID('', filename) | |
286 | html_diff = diffs.as_html(enable_comments=False, parsed_lines=[f]) |
|
286 | html_diff = diffs.as_html(enable_comments=False, parsed_lines=[f]) | |
287 | c.file_diff_data.append((fid, None, f['operation'], f['old_filename'], filename, html_diff, st)) |
|
287 | c.file_diff_data.append((fid, None, f['operation'], f['old_filename'], filename, html_diff, st)) | |
288 |
|
288 | |||
289 | return render('compare/compare_diff.html') |
|
289 | return render('compare/compare_diff.html') |
@@ -1,134 +1,134 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 | # This program is free software: you can redistribute it and/or modify |
|
2 | # This program is free software: you can redistribute it and/or modify | |
3 | # it under the terms of the GNU General Public License as published by |
|
3 | # it under the terms of the GNU General Public License as published by | |
4 | # the Free Software Foundation, either version 3 of the License, or |
|
4 | # the Free Software Foundation, either version 3 of the License, or | |
5 | # (at your option) any later version. |
|
5 | # (at your option) any later version. | |
6 | # |
|
6 | # | |
7 | # This program is distributed in the hope that it will be useful, |
|
7 | # This program is distributed in the hope that it will be useful, | |
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | # GNU General Public License for more details. |
|
10 | # GNU General Public License for more details. | |
11 | # |
|
11 | # | |
12 | # You should have received a copy of the GNU General Public License |
|
12 | # You should have received a copy of the GNU General Public License | |
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
14 | """ |
|
14 | """ | |
15 | kallithea.controllers.feed |
|
15 | kallithea.controllers.feed | |
16 | ~~~~~~~~~~~~~~~~~~~~~~~~~~ |
|
16 | ~~~~~~~~~~~~~~~~~~~~~~~~~~ | |
17 |
|
17 | |||
18 | Feed controller for Kallithea |
|
18 | Feed controller for Kallithea | |
19 |
|
19 | |||
20 | This file was forked by the Kallithea project in July 2014. |
|
20 | This file was forked by the Kallithea project in July 2014. | |
21 | Original author and date, and relevant copyright and licensing information is below: |
|
21 | Original author and date, and relevant copyright and licensing information is below: | |
22 | :created_on: Apr 23, 2010 |
|
22 | :created_on: Apr 23, 2010 | |
23 | :author: marcink |
|
23 | :author: marcink | |
24 | :copyright: (c) 2013 RhodeCode GmbH, and others. |
|
24 | :copyright: (c) 2013 RhodeCode GmbH, and others. | |
25 | :license: GPLv3, see LICENSE.md for more details. |
|
25 | :license: GPLv3, see LICENSE.md for more details. | |
26 | """ |
|
26 | """ | |
27 |
|
27 | |||
28 |
|
28 | |||
29 | import logging |
|
29 | import logging | |
30 |
|
30 | |||
31 | from beaker.cache import cache_region |
|
31 | from beaker.cache import cache_region | |
32 | from tg import response |
|
32 | from tg import response | |
33 | from tg import tmpl_context as c |
|
33 | from tg import tmpl_context as c | |
34 | from tg.i18n import ugettext as _ |
|
34 | from tg.i18n import ugettext as _ | |
35 |
|
35 | |||
36 | from kallithea import CONFIG |
|
36 | from kallithea import CONFIG | |
37 | from kallithea.lib import feeds |
|
37 | from kallithea.lib import feeds | |
38 | from kallithea.lib import helpers as h |
|
38 | from kallithea.lib import helpers as h | |
39 | from kallithea.lib.auth import HasRepoPermissionLevelDecorator, LoginRequired |
|
39 | from kallithea.lib.auth import HasRepoPermissionLevelDecorator, LoginRequired | |
40 | from kallithea.lib.base import BaseRepoController |
|
40 | from kallithea.lib.base import BaseRepoController | |
41 | from kallithea.lib.diffs import DiffProcessor |
|
41 | from kallithea.lib.diffs import DiffProcessor | |
42 | from kallithea.lib.utils2 import safe_int, safe_unicode, str2bool |
|
42 | from kallithea.lib.utils2 import safe_int, safe_unicode, str2bool | |
43 |
|
43 | |||
44 |
|
44 | |||
45 | log = logging.getLogger(__name__) |
|
45 | log = logging.getLogger(__name__) | |
46 |
|
46 | |||
47 |
|
47 | |||
48 | class FeedController(BaseRepoController): |
|
48 | class FeedController(BaseRepoController): | |
49 |
|
49 | |||
50 | @LoginRequired(allow_default_user=True) |
|
50 | @LoginRequired(allow_default_user=True) | |
51 | @HasRepoPermissionLevelDecorator('read') |
|
51 | @HasRepoPermissionLevelDecorator('read') | |
52 | def _before(self, *args, **kwargs): |
|
52 | def _before(self, *args, **kwargs): | |
53 | super(FeedController, self)._before(*args, **kwargs) |
|
53 | super(FeedController, self)._before(*args, **kwargs) | |
54 |
|
54 | |||
55 | def _get_title(self, cs): |
|
55 | def _get_title(self, cs): | |
56 | return h.shorter(cs.message, 160) |
|
56 | return h.shorter(cs.message, 160) | |
57 |
|
57 | |||
58 | def __get_desc(self, cs): |
|
58 | def __get_desc(self, cs): | |
59 | desc_msg = [(_('%s committed on %s') |
|
59 | desc_msg = [(_('%s committed on %s') | |
60 | % (h.person(cs.author), h.fmt_date(cs.date))) + '<br/>'] |
|
60 | % (h.person(cs.author), h.fmt_date(cs.date))) + '<br/>'] | |
61 | # branches, tags, bookmarks |
|
61 | # branches, tags, bookmarks | |
62 | for branch in cs.branches: |
|
62 | for branch in cs.branches: | |
63 | desc_msg.append('branch: %s<br/>' % branch) |
|
63 | desc_msg.append('branch: %s<br/>' % branch) | |
64 | for book in cs.bookmarks: |
|
64 | for book in cs.bookmarks: | |
65 | desc_msg.append('bookmark: %s<br/>' % book) |
|
65 | desc_msg.append('bookmark: %s<br/>' % book) | |
66 | for tag in cs.tags: |
|
66 | for tag in cs.tags: | |
67 | desc_msg.append('tag: %s<br/>' % tag) |
|
67 | desc_msg.append('tag: %s<br/>' % tag) | |
68 |
|
68 | |||
69 | changes = [] |
|
69 | changes = [] | |
70 | diff_limit = safe_int(CONFIG.get('rss_cut_off_limit', 32 * 1024)) |
|
70 | diff_limit = safe_int(CONFIG.get('rss_cut_off_limit', 32 * 1024)) | |
71 | raw_diff = cs.diff() |
|
71 | raw_diff = cs.diff() | |
72 | diff_processor = DiffProcessor(raw_diff, |
|
72 | diff_processor = DiffProcessor(raw_diff, | |
73 | diff_limit=diff_limit, |
|
73 | diff_limit=diff_limit, | |
74 | inline_diff=False) |
|
74 | inline_diff=False) | |
75 |
|
75 | |||
76 | for st in diff_processor.parsed: |
|
76 | for st in diff_processor.parsed: | |
77 | st.update({'added': st['stats']['added'], |
|
77 | st.update({'added': st['stats']['added'], | |
78 | 'removed': st['stats']['deleted']}) |
|
78 | 'removed': st['stats']['deleted']}) | |
79 | changes.append('\n %(operation)s %(filename)s ' |
|
79 | changes.append('\n %(operation)s %(filename)s ' | |
80 | '(%(added)s lines added, %(removed)s lines removed)' |
|
80 | '(%(added)s lines added, %(removed)s lines removed)' | |
81 | % st) |
|
81 | % st) | |
82 | if diff_processor.limited_diff: |
|
82 | if diff_processor.limited_diff: | |
83 | changes = changes + ['\n ' + |
|
83 | changes = changes + ['\n ' + | |
84 | _('Changeset was too big and was cut off...')] |
|
84 | _('Changeset was too big and was cut off...')] | |
85 |
|
85 | |||
86 | # rev link |
|
86 | # rev link | |
87 | _url = h.canonical_url('changeset_home', repo_name=c.db_repo.repo_name, |
|
87 | _url = h.canonical_url('changeset_home', repo_name=c.db_repo.repo_name, | |
88 | revision=cs.raw_id) |
|
88 | revision=cs.raw_id) | |
89 | desc_msg.append('changeset: <a href="%s">%s</a>' % (_url, cs.raw_id[:8])) |
|
89 | desc_msg.append('changeset: <a href="%s">%s</a>' % (_url, cs.raw_id[:8])) | |
90 |
|
90 | |||
91 | desc_msg.append('<pre>') |
|
91 | desc_msg.append('<pre>') | |
92 | desc_msg.append(h.urlify_text(cs.message)) |
|
92 | desc_msg.append(h.urlify_text(cs.message)) | |
93 | desc_msg.append('\n') |
|
93 | desc_msg.append('\n') | |
94 | desc_msg.extend(changes) |
|
94 | desc_msg.extend(changes) | |
95 | if str2bool(CONFIG.get('rss_include_diff', False)): |
|
95 | if str2bool(CONFIG.get('rss_include_diff', False)): | |
96 | desc_msg.append('\n\n') |
|
96 | desc_msg.append('\n\n') | |
97 | desc_msg.append(raw_diff) |
|
97 | desc_msg.append(safe_unicode(raw_diff)) | |
98 | desc_msg.append('</pre>') |
|
98 | desc_msg.append('</pre>') | |
99 | return [safe_unicode(chunk) for chunk in desc_msg] |
|
99 | return [safe_unicode(chunk) for chunk in desc_msg] | |
100 |
|
100 | |||
101 | def _feed(self, repo_name, feeder): |
|
101 | def _feed(self, repo_name, feeder): | |
102 | """Produce a simple feed""" |
|
102 | """Produce a simple feed""" | |
103 |
|
103 | |||
104 | @cache_region('long_term', '_get_feed_from_cache') |
|
104 | @cache_region('long_term', '_get_feed_from_cache') | |
105 | def _get_feed_from_cache(*_cache_keys): # parameters are not really used - only as caching key |
|
105 | def _get_feed_from_cache(*_cache_keys): # parameters are not really used - only as caching key | |
106 | header = dict( |
|
106 | header = dict( | |
107 | title=_('%s %s feed') % (c.site_name, repo_name), |
|
107 | title=_('%s %s feed') % (c.site_name, repo_name), | |
108 | link=h.canonical_url('summary_home', repo_name=repo_name), |
|
108 | link=h.canonical_url('summary_home', repo_name=repo_name), | |
109 | description=_('Changes on %s repository') % repo_name, |
|
109 | description=_('Changes on %s repository') % repo_name, | |
110 | ) |
|
110 | ) | |
111 |
|
111 | |||
112 | rss_items_per_page = safe_int(CONFIG.get('rss_items_per_page', 20)) |
|
112 | rss_items_per_page = safe_int(CONFIG.get('rss_items_per_page', 20)) | |
113 | entries=[] |
|
113 | entries=[] | |
114 | for cs in reversed(list(c.db_repo_scm_instance[-rss_items_per_page:])): |
|
114 | for cs in reversed(list(c.db_repo_scm_instance[-rss_items_per_page:])): | |
115 | entries.append(dict( |
|
115 | entries.append(dict( | |
116 | title=self._get_title(cs), |
|
116 | title=self._get_title(cs), | |
117 | link=h.canonical_url('changeset_home', repo_name=repo_name, revision=cs.raw_id), |
|
117 | link=h.canonical_url('changeset_home', repo_name=repo_name, revision=cs.raw_id), | |
118 | author_email=cs.author_email, |
|
118 | author_email=cs.author_email, | |
119 | author_name=cs.author_name, |
|
119 | author_name=cs.author_name, | |
120 | description=''.join(self.__get_desc(cs)), |
|
120 | description=''.join(self.__get_desc(cs)), | |
121 | pubdate=cs.date, |
|
121 | pubdate=cs.date, | |
122 | )) |
|
122 | )) | |
123 | return feeder.render(header, entries) |
|
123 | return feeder.render(header, entries) | |
124 |
|
124 | |||
125 | response.content_type = feeder.content_type |
|
125 | response.content_type = feeder.content_type | |
126 | return _get_feed_from_cache(repo_name, feeder.__name__) |
|
126 | return _get_feed_from_cache(repo_name, feeder.__name__) | |
127 |
|
127 | |||
128 | def atom(self, repo_name): |
|
128 | def atom(self, repo_name): | |
129 | """Produce a simple atom-1.0 feed""" |
|
129 | """Produce a simple atom-1.0 feed""" | |
130 | return self._feed(repo_name, feeds.AtomFeed) |
|
130 | return self._feed(repo_name, feeds.AtomFeed) | |
131 |
|
131 | |||
132 | def rss(self, repo_name): |
|
132 | def rss(self, repo_name): | |
133 | """Produce a simple rss2 feed""" |
|
133 | """Produce a simple rss2 feed""" | |
134 | return self._feed(repo_name, feeds.RssFeed) |
|
134 | return self._feed(repo_name, feeds.RssFeed) |
@@ -1,757 +1,756 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 | # This program is free software: you can redistribute it and/or modify |
|
2 | # This program is free software: you can redistribute it and/or modify | |
3 | # it under the terms of the GNU General Public License as published by |
|
3 | # it under the terms of the GNU General Public License as published by | |
4 | # the Free Software Foundation, either version 3 of the License, or |
|
4 | # the Free Software Foundation, either version 3 of the License, or | |
5 | # (at your option) any later version. |
|
5 | # (at your option) any later version. | |
6 | # |
|
6 | # | |
7 | # This program is distributed in the hope that it will be useful, |
|
7 | # This program is distributed in the hope that it will be useful, | |
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | # GNU General Public License for more details. |
|
10 | # GNU General Public License for more details. | |
11 | # |
|
11 | # | |
12 | # You should have received a copy of the GNU General Public License |
|
12 | # You should have received a copy of the GNU General Public License | |
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
14 | """ |
|
14 | """ | |
15 | kallithea.controllers.files |
|
15 | kallithea.controllers.files | |
16 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
|
16 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |
17 |
|
17 | |||
18 | Files controller for Kallithea |
|
18 | Files controller for Kallithea | |
19 |
|
19 | |||
20 | This file was forked by the Kallithea project in July 2014. |
|
20 | This file was forked by the Kallithea project in July 2014. | |
21 | Original author and date, and relevant copyright and licensing information is below: |
|
21 | Original author and date, and relevant copyright and licensing information is below: | |
22 | :created_on: Apr 21, 2010 |
|
22 | :created_on: Apr 21, 2010 | |
23 | :author: marcink |
|
23 | :author: marcink | |
24 | :copyright: (c) 2013 RhodeCode GmbH, and others. |
|
24 | :copyright: (c) 2013 RhodeCode GmbH, and others. | |
25 | :license: GPLv3, see LICENSE.md for more details. |
|
25 | :license: GPLv3, see LICENSE.md for more details. | |
26 | """ |
|
26 | """ | |
27 |
|
27 | |||
28 | import logging |
|
28 | import logging | |
29 | import os |
|
29 | import os | |
30 | import posixpath |
|
30 | import posixpath | |
31 | import shutil |
|
31 | import shutil | |
32 | import tempfile |
|
32 | import tempfile | |
33 | import traceback |
|
33 | import traceback | |
34 | from collections import OrderedDict |
|
34 | from collections import OrderedDict | |
35 |
|
35 | |||
36 | from tg import request, response |
|
36 | from tg import request, response | |
37 | from tg import tmpl_context as c |
|
37 | from tg import tmpl_context as c | |
38 | from tg.i18n import ugettext as _ |
|
38 | from tg.i18n import ugettext as _ | |
39 | from webob.exc import HTTPFound, HTTPNotFound |
|
39 | from webob.exc import HTTPFound, HTTPNotFound | |
40 |
|
40 | |||
41 | from kallithea.config.routing import url |
|
41 | from kallithea.config.routing import url | |
42 | from kallithea.controllers.changeset import _context_url, _ignorews_url, anchor_url, get_ignore_ws, get_line_ctx |
|
42 | from kallithea.controllers.changeset import _context_url, _ignorews_url, anchor_url, get_ignore_ws, get_line_ctx | |
43 | from kallithea.lib import diffs |
|
43 | from kallithea.lib import diffs | |
44 | from kallithea.lib import helpers as h |
|
44 | from kallithea.lib import helpers as h | |
45 | from kallithea.lib.auth import HasRepoPermissionLevelDecorator, LoginRequired |
|
45 | from kallithea.lib.auth import HasRepoPermissionLevelDecorator, LoginRequired | |
46 | from kallithea.lib.base import BaseRepoController, jsonify, render |
|
46 | from kallithea.lib.base import BaseRepoController, jsonify, render | |
47 | from kallithea.lib.exceptions import NonRelativePathError |
|
47 | from kallithea.lib.exceptions import NonRelativePathError | |
48 | from kallithea.lib.utils import action_logger |
|
48 | from kallithea.lib.utils import action_logger | |
49 | from kallithea.lib.utils2 import convert_line_endings, detect_mode, safe_int, safe_str, str2bool |
|
49 | from kallithea.lib.utils2 import convert_line_endings, detect_mode, safe_int, safe_str, safe_unicode, str2bool | |
50 | from kallithea.lib.vcs.backends.base import EmptyChangeset |
|
50 | from kallithea.lib.vcs.backends.base import EmptyChangeset | |
51 | from kallithea.lib.vcs.conf import settings |
|
51 | from kallithea.lib.vcs.conf import settings | |
52 | from kallithea.lib.vcs.exceptions import ( |
|
52 | from kallithea.lib.vcs.exceptions import ( | |
53 | ChangesetDoesNotExistError, ChangesetError, EmptyRepositoryError, ImproperArchiveTypeError, NodeAlreadyExistsError, NodeDoesNotExistError, NodeError, RepositoryError, VCSError) |
|
53 | ChangesetDoesNotExistError, ChangesetError, EmptyRepositoryError, ImproperArchiveTypeError, NodeAlreadyExistsError, NodeDoesNotExistError, NodeError, RepositoryError, VCSError) | |
54 | from kallithea.lib.vcs.nodes import FileNode |
|
54 | from kallithea.lib.vcs.nodes import FileNode | |
55 | from kallithea.model.db import Repository |
|
55 | from kallithea.model.db import Repository | |
56 | from kallithea.model.repo import RepoModel |
|
56 | from kallithea.model.repo import RepoModel | |
57 | from kallithea.model.scm import ScmModel |
|
57 | from kallithea.model.scm import ScmModel | |
58 |
|
58 | |||
59 |
|
59 | |||
60 | log = logging.getLogger(__name__) |
|
60 | log = logging.getLogger(__name__) | |
61 |
|
61 | |||
62 |
|
62 | |||
63 | class FilesController(BaseRepoController): |
|
63 | class FilesController(BaseRepoController): | |
64 |
|
64 | |||
65 | def _before(self, *args, **kwargs): |
|
65 | def _before(self, *args, **kwargs): | |
66 | super(FilesController, self)._before(*args, **kwargs) |
|
66 | super(FilesController, self)._before(*args, **kwargs) | |
67 |
|
67 | |||
68 | def __get_cs(self, rev, silent_empty=False): |
|
68 | def __get_cs(self, rev, silent_empty=False): | |
69 | """ |
|
69 | """ | |
70 | Safe way to get changeset if error occur it redirects to tip with |
|
70 | Safe way to get changeset if error occur it redirects to tip with | |
71 | proper message |
|
71 | proper message | |
72 |
|
72 | |||
73 | :param rev: revision to fetch |
|
73 | :param rev: revision to fetch | |
74 | :silent_empty: return None if repository is empty |
|
74 | :silent_empty: return None if repository is empty | |
75 | """ |
|
75 | """ | |
76 |
|
76 | |||
77 | try: |
|
77 | try: | |
78 | return c.db_repo_scm_instance.get_changeset(rev) |
|
78 | return c.db_repo_scm_instance.get_changeset(rev) | |
79 | except EmptyRepositoryError as e: |
|
79 | except EmptyRepositoryError as e: | |
80 | if silent_empty: |
|
80 | if silent_empty: | |
81 | return None |
|
81 | return None | |
82 | url_ = url('files_add_home', |
|
82 | url_ = url('files_add_home', | |
83 | repo_name=c.repo_name, |
|
83 | repo_name=c.repo_name, | |
84 | revision=0, f_path='', anchor='edit') |
|
84 | revision=0, f_path='', anchor='edit') | |
85 | add_new = h.link_to(_('Click here to add new file'), url_, class_="alert-link") |
|
85 | add_new = h.link_to(_('Click here to add new file'), url_, class_="alert-link") | |
86 | h.flash(_('There are no files yet.') + ' ' + add_new, category='warning') |
|
86 | h.flash(_('There are no files yet.') + ' ' + add_new, category='warning') | |
87 | raise HTTPNotFound() |
|
87 | raise HTTPNotFound() | |
88 | except (ChangesetDoesNotExistError, LookupError): |
|
88 | except (ChangesetDoesNotExistError, LookupError): | |
89 | msg = _('Such revision does not exist for this repository') |
|
89 | msg = _('Such revision does not exist for this repository') | |
90 | h.flash(msg, category='error') |
|
90 | h.flash(msg, category='error') | |
91 | raise HTTPNotFound() |
|
91 | raise HTTPNotFound() | |
92 | except RepositoryError as e: |
|
92 | except RepositoryError as e: | |
93 | h.flash(unicode(e), category='error') |
|
93 | h.flash(unicode(e), category='error') | |
94 | raise HTTPNotFound() |
|
94 | raise HTTPNotFound() | |
95 |
|
95 | |||
96 | def __get_filenode(self, cs, path): |
|
96 | def __get_filenode(self, cs, path): | |
97 | """ |
|
97 | """ | |
98 | Returns file_node or raise HTTP error. |
|
98 | Returns file_node or raise HTTP error. | |
99 |
|
99 | |||
100 | :param cs: given changeset |
|
100 | :param cs: given changeset | |
101 | :param path: path to lookup |
|
101 | :param path: path to lookup | |
102 | """ |
|
102 | """ | |
103 |
|
103 | |||
104 | try: |
|
104 | try: | |
105 | file_node = cs.get_node(path) |
|
105 | file_node = cs.get_node(path) | |
106 | if file_node.is_dir(): |
|
106 | if file_node.is_dir(): | |
107 | raise RepositoryError('given path is a directory') |
|
107 | raise RepositoryError('given path is a directory') | |
108 | except ChangesetDoesNotExistError: |
|
108 | except ChangesetDoesNotExistError: | |
109 | msg = _('Such revision does not exist for this repository') |
|
109 | msg = _('Such revision does not exist for this repository') | |
110 | h.flash(msg, category='error') |
|
110 | h.flash(msg, category='error') | |
111 | raise HTTPNotFound() |
|
111 | raise HTTPNotFound() | |
112 | except RepositoryError as e: |
|
112 | except RepositoryError as e: | |
113 | h.flash(unicode(e), category='error') |
|
113 | h.flash(unicode(e), category='error') | |
114 | raise HTTPNotFound() |
|
114 | raise HTTPNotFound() | |
115 |
|
115 | |||
116 | return file_node |
|
116 | return file_node | |
117 |
|
117 | |||
118 | @LoginRequired(allow_default_user=True) |
|
118 | @LoginRequired(allow_default_user=True) | |
119 | @HasRepoPermissionLevelDecorator('read') |
|
119 | @HasRepoPermissionLevelDecorator('read') | |
120 | def index(self, repo_name, revision, f_path, annotate=False): |
|
120 | def index(self, repo_name, revision, f_path, annotate=False): | |
121 | # redirect to given revision from form if given |
|
121 | # redirect to given revision from form if given | |
122 | post_revision = request.POST.get('at_rev', None) |
|
122 | post_revision = request.POST.get('at_rev', None) | |
123 | if post_revision: |
|
123 | if post_revision: | |
124 | cs = self.__get_cs(post_revision) # FIXME - unused! |
|
124 | cs = self.__get_cs(post_revision) # FIXME - unused! | |
125 |
|
125 | |||
126 | c.revision = revision |
|
126 | c.revision = revision | |
127 | c.changeset = self.__get_cs(revision) |
|
127 | c.changeset = self.__get_cs(revision) | |
128 | c.branch = request.GET.get('branch', None) |
|
128 | c.branch = request.GET.get('branch', None) | |
129 | c.f_path = f_path |
|
129 | c.f_path = f_path | |
130 | c.annotate = annotate |
|
130 | c.annotate = annotate | |
131 | cur_rev = c.changeset.revision |
|
131 | cur_rev = c.changeset.revision | |
132 | # used in files_source.html: |
|
132 | # used in files_source.html: | |
133 | c.cut_off_limit = self.cut_off_limit |
|
133 | c.cut_off_limit = self.cut_off_limit | |
134 | c.fulldiff = request.GET.get('fulldiff') |
|
134 | c.fulldiff = request.GET.get('fulldiff') | |
135 |
|
135 | |||
136 | # prev link |
|
136 | # prev link | |
137 | try: |
|
137 | try: | |
138 | prev_rev = c.db_repo_scm_instance.get_changeset(cur_rev).prev(c.branch) |
|
138 | prev_rev = c.db_repo_scm_instance.get_changeset(cur_rev).prev(c.branch) | |
139 | c.url_prev = url('files_home', repo_name=c.repo_name, |
|
139 | c.url_prev = url('files_home', repo_name=c.repo_name, | |
140 | revision=prev_rev.raw_id, f_path=f_path) |
|
140 | revision=prev_rev.raw_id, f_path=f_path) | |
141 | if c.branch: |
|
141 | if c.branch: | |
142 | c.url_prev += '?branch=%s' % c.branch |
|
142 | c.url_prev += '?branch=%s' % c.branch | |
143 | except (ChangesetDoesNotExistError, VCSError): |
|
143 | except (ChangesetDoesNotExistError, VCSError): | |
144 | c.url_prev = '#' |
|
144 | c.url_prev = '#' | |
145 |
|
145 | |||
146 | # next link |
|
146 | # next link | |
147 | try: |
|
147 | try: | |
148 | next_rev = c.db_repo_scm_instance.get_changeset(cur_rev).next(c.branch) |
|
148 | next_rev = c.db_repo_scm_instance.get_changeset(cur_rev).next(c.branch) | |
149 | c.url_next = url('files_home', repo_name=c.repo_name, |
|
149 | c.url_next = url('files_home', repo_name=c.repo_name, | |
150 | revision=next_rev.raw_id, f_path=f_path) |
|
150 | revision=next_rev.raw_id, f_path=f_path) | |
151 | if c.branch: |
|
151 | if c.branch: | |
152 | c.url_next += '?branch=%s' % c.branch |
|
152 | c.url_next += '?branch=%s' % c.branch | |
153 | except (ChangesetDoesNotExistError, VCSError): |
|
153 | except (ChangesetDoesNotExistError, VCSError): | |
154 | c.url_next = '#' |
|
154 | c.url_next = '#' | |
155 |
|
155 | |||
156 | # files or dirs |
|
156 | # files or dirs | |
157 | try: |
|
157 | try: | |
158 | c.file = c.changeset.get_node(f_path) |
|
158 | c.file = c.changeset.get_node(f_path) | |
159 |
|
159 | |||
160 | if c.file.is_submodule(): |
|
160 | if c.file.is_submodule(): | |
161 | raise HTTPFound(location=c.file.url) |
|
161 | raise HTTPFound(location=c.file.url) | |
162 | elif c.file.is_file(): |
|
162 | elif c.file.is_file(): | |
163 | c.load_full_history = False |
|
163 | c.load_full_history = False | |
164 | # determine if we're on branch head |
|
164 | # determine if we're on branch head | |
165 | _branches = c.db_repo_scm_instance.branches |
|
165 | _branches = c.db_repo_scm_instance.branches | |
166 | c.on_branch_head = revision in _branches or revision in _branches.values() |
|
166 | c.on_branch_head = revision in _branches or revision in _branches.values() | |
167 | _hist = [] |
|
167 | _hist = [] | |
168 | c.file_history = [] |
|
168 | c.file_history = [] | |
169 | if c.load_full_history: |
|
169 | if c.load_full_history: | |
170 | c.file_history, _hist = self._get_node_history(c.changeset, f_path) |
|
170 | c.file_history, _hist = self._get_node_history(c.changeset, f_path) | |
171 |
|
171 | |||
172 | c.authors = [] |
|
172 | c.authors = [] | |
173 | for a in set([x.author for x in _hist]): |
|
173 | for a in set([x.author for x in _hist]): | |
174 | c.authors.append((h.email(a), h.person(a))) |
|
174 | c.authors.append((h.email(a), h.person(a))) | |
175 | else: |
|
175 | else: | |
176 | c.authors = c.file_history = [] |
|
176 | c.authors = c.file_history = [] | |
177 | except RepositoryError as e: |
|
177 | except RepositoryError as e: | |
178 | h.flash(unicode(e), category='error') |
|
178 | h.flash(unicode(e), category='error') | |
179 | raise HTTPNotFound() |
|
179 | raise HTTPNotFound() | |
180 |
|
180 | |||
181 | if request.environ.get('HTTP_X_PARTIAL_XHR'): |
|
181 | if request.environ.get('HTTP_X_PARTIAL_XHR'): | |
182 | return render('files/files_ypjax.html') |
|
182 | return render('files/files_ypjax.html') | |
183 |
|
183 | |||
184 | # TODO: tags and bookmarks? |
|
184 | # TODO: tags and bookmarks? | |
185 | c.revision_options = [(c.changeset.raw_id, |
|
185 | c.revision_options = [(c.changeset.raw_id, | |
186 | _('%s at %s') % (b, h.short_id(c.changeset.raw_id))) for b in c.changeset.branches] + \ |
|
186 | _('%s at %s') % (b, h.short_id(c.changeset.raw_id))) for b in c.changeset.branches] + \ | |
187 | [(n, b) for b, n in c.db_repo_scm_instance.branches.items()] |
|
187 | [(n, b) for b, n in c.db_repo_scm_instance.branches.items()] | |
188 | if c.db_repo_scm_instance.closed_branches: |
|
188 | if c.db_repo_scm_instance.closed_branches: | |
189 | prefix = _('(closed)') + ' ' |
|
189 | prefix = _('(closed)') + ' ' | |
190 | c.revision_options += [('-', '-')] + \ |
|
190 | c.revision_options += [('-', '-')] + \ | |
191 | [(n, prefix + b) for b, n in c.db_repo_scm_instance.closed_branches.items()] |
|
191 | [(n, prefix + b) for b, n in c.db_repo_scm_instance.closed_branches.items()] | |
192 |
|
192 | |||
193 | return render('files/files.html') |
|
193 | return render('files/files.html') | |
194 |
|
194 | |||
195 | @LoginRequired(allow_default_user=True) |
|
195 | @LoginRequired(allow_default_user=True) | |
196 | @HasRepoPermissionLevelDecorator('read') |
|
196 | @HasRepoPermissionLevelDecorator('read') | |
197 | @jsonify |
|
197 | @jsonify | |
198 | def history(self, repo_name, revision, f_path): |
|
198 | def history(self, repo_name, revision, f_path): | |
199 | changeset = self.__get_cs(revision) |
|
199 | changeset = self.__get_cs(revision) | |
200 | _file = changeset.get_node(f_path) |
|
200 | _file = changeset.get_node(f_path) | |
201 | if _file.is_file(): |
|
201 | if _file.is_file(): | |
202 | file_history, _hist = self._get_node_history(changeset, f_path) |
|
202 | file_history, _hist = self._get_node_history(changeset, f_path) | |
203 |
|
203 | |||
204 | res = [] |
|
204 | res = [] | |
205 | for obj in file_history: |
|
205 | for obj in file_history: | |
206 | res.append({ |
|
206 | res.append({ | |
207 | 'text': obj[1], |
|
207 | 'text': obj[1], | |
208 | 'children': [{'id': o[0], 'text': o[1]} for o in obj[0]] |
|
208 | 'children': [{'id': o[0], 'text': o[1]} for o in obj[0]] | |
209 | }) |
|
209 | }) | |
210 |
|
210 | |||
211 | data = { |
|
211 | data = { | |
212 | 'more': False, |
|
212 | 'more': False, | |
213 | 'results': res |
|
213 | 'results': res | |
214 | } |
|
214 | } | |
215 | return data |
|
215 | return data | |
216 |
|
216 | |||
217 | @LoginRequired(allow_default_user=True) |
|
217 | @LoginRequired(allow_default_user=True) | |
218 | @HasRepoPermissionLevelDecorator('read') |
|
218 | @HasRepoPermissionLevelDecorator('read') | |
219 | def authors(self, repo_name, revision, f_path): |
|
219 | def authors(self, repo_name, revision, f_path): | |
220 | changeset = self.__get_cs(revision) |
|
220 | changeset = self.__get_cs(revision) | |
221 | _file = changeset.get_node(f_path) |
|
221 | _file = changeset.get_node(f_path) | |
222 | if _file.is_file(): |
|
222 | if _file.is_file(): | |
223 | file_history, _hist = self._get_node_history(changeset, f_path) |
|
223 | file_history, _hist = self._get_node_history(changeset, f_path) | |
224 | c.authors = [] |
|
224 | c.authors = [] | |
225 | for a in set([x.author for x in _hist]): |
|
225 | for a in set([x.author for x in _hist]): | |
226 | c.authors.append((h.email(a), h.person(a))) |
|
226 | c.authors.append((h.email(a), h.person(a))) | |
227 | return render('files/files_history_box.html') |
|
227 | return render('files/files_history_box.html') | |
228 |
|
228 | |||
229 | @LoginRequired(allow_default_user=True) |
|
229 | @LoginRequired(allow_default_user=True) | |
230 | @HasRepoPermissionLevelDecorator('read') |
|
230 | @HasRepoPermissionLevelDecorator('read') | |
231 | def rawfile(self, repo_name, revision, f_path): |
|
231 | def rawfile(self, repo_name, revision, f_path): | |
232 | cs = self.__get_cs(revision) |
|
232 | cs = self.__get_cs(revision) | |
233 | file_node = self.__get_filenode(cs, f_path) |
|
233 | file_node = self.__get_filenode(cs, f_path) | |
234 |
|
234 | |||
235 | response.content_disposition = 'attachment; filename=%s' % \ |
|
235 | response.content_disposition = 'attachment; filename=%s' % \ | |
236 | safe_str(f_path.split(Repository.url_sep())[-1]) |
|
236 | safe_str(f_path.split(Repository.url_sep())[-1]) | |
237 |
|
237 | |||
238 | response.content_type = file_node.mimetype |
|
238 | response.content_type = file_node.mimetype | |
239 | return file_node.content |
|
239 | return file_node.content | |
240 |
|
240 | |||
241 | @LoginRequired(allow_default_user=True) |
|
241 | @LoginRequired(allow_default_user=True) | |
242 | @HasRepoPermissionLevelDecorator('read') |
|
242 | @HasRepoPermissionLevelDecorator('read') | |
243 | def raw(self, repo_name, revision, f_path): |
|
243 | def raw(self, repo_name, revision, f_path): | |
244 | cs = self.__get_cs(revision) |
|
244 | cs = self.__get_cs(revision) | |
245 | file_node = self.__get_filenode(cs, f_path) |
|
245 | file_node = self.__get_filenode(cs, f_path) | |
246 |
|
246 | |||
247 | raw_mimetype_mapping = { |
|
247 | raw_mimetype_mapping = { | |
248 | # map original mimetype to a mimetype used for "show as raw" |
|
248 | # map original mimetype to a mimetype used for "show as raw" | |
249 | # you can also provide a content-disposition to override the |
|
249 | # you can also provide a content-disposition to override the | |
250 | # default "attachment" disposition. |
|
250 | # default "attachment" disposition. | |
251 | # orig_type: (new_type, new_dispo) |
|
251 | # orig_type: (new_type, new_dispo) | |
252 |
|
252 | |||
253 | # show images inline: |
|
253 | # show images inline: | |
254 | 'image/x-icon': ('image/x-icon', 'inline'), |
|
254 | 'image/x-icon': ('image/x-icon', 'inline'), | |
255 | 'image/png': ('image/png', 'inline'), |
|
255 | 'image/png': ('image/png', 'inline'), | |
256 | 'image/gif': ('image/gif', 'inline'), |
|
256 | 'image/gif': ('image/gif', 'inline'), | |
257 | 'image/jpeg': ('image/jpeg', 'inline'), |
|
257 | 'image/jpeg': ('image/jpeg', 'inline'), | |
258 | 'image/svg+xml': ('image/svg+xml', 'inline'), |
|
258 | 'image/svg+xml': ('image/svg+xml', 'inline'), | |
259 | } |
|
259 | } | |
260 |
|
260 | |||
261 | mimetype = file_node.mimetype |
|
261 | mimetype = file_node.mimetype | |
262 | try: |
|
262 | try: | |
263 | mimetype, dispo = raw_mimetype_mapping[mimetype] |
|
263 | mimetype, dispo = raw_mimetype_mapping[mimetype] | |
264 | except KeyError: |
|
264 | except KeyError: | |
265 | # we don't know anything special about this, handle it safely |
|
265 | # we don't know anything special about this, handle it safely | |
266 | if file_node.is_binary: |
|
266 | if file_node.is_binary: | |
267 | # do same as download raw for binary files |
|
267 | # do same as download raw for binary files | |
268 | mimetype, dispo = 'application/octet-stream', 'attachment' |
|
268 | mimetype, dispo = 'application/octet-stream', 'attachment' | |
269 | else: |
|
269 | else: | |
270 | # do not just use the original mimetype, but force text/plain, |
|
270 | # do not just use the original mimetype, but force text/plain, | |
271 | # otherwise it would serve text/html and that might be unsafe. |
|
271 | # otherwise it would serve text/html and that might be unsafe. | |
272 | # Note: underlying vcs library fakes text/plain mimetype if the |
|
272 | # Note: underlying vcs library fakes text/plain mimetype if the | |
273 | # mimetype can not be determined and it thinks it is not |
|
273 | # mimetype can not be determined and it thinks it is not | |
274 | # binary.This might lead to erroneous text display in some |
|
274 | # binary.This might lead to erroneous text display in some | |
275 | # cases, but helps in other cases, like with text files |
|
275 | # cases, but helps in other cases, like with text files | |
276 | # without extension. |
|
276 | # without extension. | |
277 | mimetype, dispo = 'text/plain', 'inline' |
|
277 | mimetype, dispo = 'text/plain', 'inline' | |
278 |
|
278 | |||
279 | if dispo == 'attachment': |
|
279 | if dispo == 'attachment': | |
280 | dispo = 'attachment; filename=%s' % \ |
|
280 | dispo = 'attachment; filename=%s' % \ | |
281 | safe_str(f_path.split(os.sep)[-1]) |
|
281 | safe_str(f_path.split(os.sep)[-1]) | |
282 |
|
282 | |||
283 | response.content_disposition = dispo |
|
283 | response.content_disposition = dispo | |
284 | response.content_type = mimetype |
|
284 | response.content_type = mimetype | |
285 | return file_node.content |
|
285 | return file_node.content | |
286 |
|
286 | |||
287 | @LoginRequired() |
|
287 | @LoginRequired() | |
288 | @HasRepoPermissionLevelDecorator('write') |
|
288 | @HasRepoPermissionLevelDecorator('write') | |
289 | def delete(self, repo_name, revision, f_path): |
|
289 | def delete(self, repo_name, revision, f_path): | |
290 | repo = c.db_repo |
|
290 | repo = c.db_repo | |
291 | # check if revision is a branch identifier- basically we cannot |
|
291 | # check if revision is a branch identifier- basically we cannot | |
292 | # create multiple heads via file editing |
|
292 | # create multiple heads via file editing | |
293 | _branches = repo.scm_instance.branches |
|
293 | _branches = repo.scm_instance.branches | |
294 | # check if revision is a branch name or branch hash |
|
294 | # check if revision is a branch name or branch hash | |
295 | if revision not in _branches and revision not in _branches.values(): |
|
295 | if revision not in _branches and revision not in _branches.values(): | |
296 | h.flash(_('You can only delete files with revision ' |
|
296 | h.flash(_('You can only delete files with revision ' | |
297 | 'being a valid branch'), category='warning') |
|
297 | 'being a valid branch'), category='warning') | |
298 | raise HTTPFound(location=h.url('files_home', |
|
298 | raise HTTPFound(location=h.url('files_home', | |
299 | repo_name=repo_name, revision='tip', |
|
299 | repo_name=repo_name, revision='tip', | |
300 | f_path=f_path)) |
|
300 | f_path=f_path)) | |
301 |
|
301 | |||
302 | r_post = request.POST |
|
302 | r_post = request.POST | |
303 |
|
303 | |||
304 | c.cs = self.__get_cs(revision) |
|
304 | c.cs = self.__get_cs(revision) | |
305 | c.file = self.__get_filenode(c.cs, f_path) |
|
305 | c.file = self.__get_filenode(c.cs, f_path) | |
306 |
|
306 | |||
307 | c.default_message = _('Deleted file %s via Kallithea') % (f_path) |
|
307 | c.default_message = _('Deleted file %s via Kallithea') % (f_path) | |
308 | c.f_path = f_path |
|
308 | c.f_path = f_path | |
309 | node_path = f_path |
|
309 | node_path = f_path | |
310 | author = request.authuser.full_contact |
|
310 | author = request.authuser.full_contact | |
311 |
|
311 | |||
312 | if r_post: |
|
312 | if r_post: | |
313 | message = r_post.get('message') or c.default_message |
|
313 | message = r_post.get('message') or c.default_message | |
314 |
|
314 | |||
315 | try: |
|
315 | try: | |
316 | nodes = { |
|
316 | nodes = { | |
317 | node_path: { |
|
317 | node_path: { | |
318 | 'content': '' |
|
318 | 'content': '' | |
319 | } |
|
319 | } | |
320 | } |
|
320 | } | |
321 | self.scm_model.delete_nodes( |
|
321 | self.scm_model.delete_nodes( | |
322 | user=request.authuser.user_id, |
|
322 | user=request.authuser.user_id, | |
323 | ip_addr=request.ip_addr, |
|
323 | ip_addr=request.ip_addr, | |
324 | repo=c.db_repo, |
|
324 | repo=c.db_repo, | |
325 | message=message, |
|
325 | message=message, | |
326 | nodes=nodes, |
|
326 | nodes=nodes, | |
327 | parent_cs=c.cs, |
|
327 | parent_cs=c.cs, | |
328 | author=author, |
|
328 | author=author, | |
329 | ) |
|
329 | ) | |
330 |
|
330 | |||
331 | h.flash(_('Successfully deleted file %s') % f_path, |
|
331 | h.flash(_('Successfully deleted file %s') % f_path, | |
332 | category='success') |
|
332 | category='success') | |
333 | except Exception: |
|
333 | except Exception: | |
334 | log.error(traceback.format_exc()) |
|
334 | log.error(traceback.format_exc()) | |
335 | h.flash(_('Error occurred during commit'), category='error') |
|
335 | h.flash(_('Error occurred during commit'), category='error') | |
336 | raise HTTPFound(location=url('changeset_home', |
|
336 | raise HTTPFound(location=url('changeset_home', | |
337 | repo_name=c.repo_name, revision='tip')) |
|
337 | repo_name=c.repo_name, revision='tip')) | |
338 |
|
338 | |||
339 | return render('files/files_delete.html') |
|
339 | return render('files/files_delete.html') | |
340 |
|
340 | |||
341 | @LoginRequired() |
|
341 | @LoginRequired() | |
342 | @HasRepoPermissionLevelDecorator('write') |
|
342 | @HasRepoPermissionLevelDecorator('write') | |
343 | def edit(self, repo_name, revision, f_path): |
|
343 | def edit(self, repo_name, revision, f_path): | |
344 | repo = c.db_repo |
|
344 | repo = c.db_repo | |
345 | # check if revision is a branch identifier- basically we cannot |
|
345 | # check if revision is a branch identifier- basically we cannot | |
346 | # create multiple heads via file editing |
|
346 | # create multiple heads via file editing | |
347 | _branches = repo.scm_instance.branches |
|
347 | _branches = repo.scm_instance.branches | |
348 | # check if revision is a branch name or branch hash |
|
348 | # check if revision is a branch name or branch hash | |
349 | if revision not in _branches and revision not in _branches.values(): |
|
349 | if revision not in _branches and revision not in _branches.values(): | |
350 | h.flash(_('You can only edit files with revision ' |
|
350 | h.flash(_('You can only edit files with revision ' | |
351 | 'being a valid branch'), category='warning') |
|
351 | 'being a valid branch'), category='warning') | |
352 | raise HTTPFound(location=h.url('files_home', |
|
352 | raise HTTPFound(location=h.url('files_home', | |
353 | repo_name=repo_name, revision='tip', |
|
353 | repo_name=repo_name, revision='tip', | |
354 | f_path=f_path)) |
|
354 | f_path=f_path)) | |
355 |
|
355 | |||
356 | r_post = request.POST |
|
356 | r_post = request.POST | |
357 |
|
357 | |||
358 | c.cs = self.__get_cs(revision) |
|
358 | c.cs = self.__get_cs(revision) | |
359 | c.file = self.__get_filenode(c.cs, f_path) |
|
359 | c.file = self.__get_filenode(c.cs, f_path) | |
360 |
|
360 | |||
361 | if c.file.is_binary: |
|
361 | if c.file.is_binary: | |
362 | raise HTTPFound(location=url('files_home', repo_name=c.repo_name, |
|
362 | raise HTTPFound(location=url('files_home', repo_name=c.repo_name, | |
363 | revision=c.cs.raw_id, f_path=f_path)) |
|
363 | revision=c.cs.raw_id, f_path=f_path)) | |
364 | c.default_message = _('Edited file %s via Kallithea') % (f_path) |
|
364 | c.default_message = _('Edited file %s via Kallithea') % (f_path) | |
365 | c.f_path = f_path |
|
365 | c.f_path = f_path | |
366 |
|
366 | |||
367 | if r_post: |
|
367 | if r_post: | |
368 |
|
368 | old_content = safe_unicode(c.file.content) | ||
369 | old_content = c.file.content |
|
|||
370 | sl = old_content.splitlines(1) |
|
369 | sl = old_content.splitlines(1) | |
371 | first_line = sl[0] if sl else '' |
|
370 | first_line = sl[0] if sl else '' | |
372 | # modes: 0 - Unix, 1 - Mac, 2 - DOS |
|
371 | # modes: 0 - Unix, 1 - Mac, 2 - DOS | |
373 | mode = detect_mode(first_line, 0) |
|
372 | mode = detect_mode(first_line, 0) | |
374 | content = convert_line_endings(r_post.get('content', ''), mode) |
|
373 | content = convert_line_endings(r_post.get('content', ''), mode) | |
375 |
|
374 | |||
376 | message = r_post.get('message') or c.default_message |
|
375 | message = r_post.get('message') or c.default_message | |
377 | author = request.authuser.full_contact |
|
376 | author = request.authuser.full_contact | |
378 |
|
377 | |||
379 | if content == old_content: |
|
378 | if content == old_content: | |
380 | h.flash(_('No changes'), category='warning') |
|
379 | h.flash(_('No changes'), category='warning') | |
381 | raise HTTPFound(location=url('changeset_home', repo_name=c.repo_name, |
|
380 | raise HTTPFound(location=url('changeset_home', repo_name=c.repo_name, | |
382 | revision='tip')) |
|
381 | revision='tip')) | |
383 | try: |
|
382 | try: | |
384 | self.scm_model.commit_change(repo=c.db_repo_scm_instance, |
|
383 | self.scm_model.commit_change(repo=c.db_repo_scm_instance, | |
385 | repo_name=repo_name, cs=c.cs, |
|
384 | repo_name=repo_name, cs=c.cs, | |
386 | user=request.authuser.user_id, |
|
385 | user=request.authuser.user_id, | |
387 | ip_addr=request.ip_addr, |
|
386 | ip_addr=request.ip_addr, | |
388 | author=author, message=message, |
|
387 | author=author, message=message, | |
389 | content=content, f_path=f_path) |
|
388 | content=content, f_path=f_path) | |
390 | h.flash(_('Successfully committed to %s') % f_path, |
|
389 | h.flash(_('Successfully committed to %s') % f_path, | |
391 | category='success') |
|
390 | category='success') | |
392 | except Exception: |
|
391 | except Exception: | |
393 | log.error(traceback.format_exc()) |
|
392 | log.error(traceback.format_exc()) | |
394 | h.flash(_('Error occurred during commit'), category='error') |
|
393 | h.flash(_('Error occurred during commit'), category='error') | |
395 | raise HTTPFound(location=url('changeset_home', |
|
394 | raise HTTPFound(location=url('changeset_home', | |
396 | repo_name=c.repo_name, revision='tip')) |
|
395 | repo_name=c.repo_name, revision='tip')) | |
397 |
|
396 | |||
398 | return render('files/files_edit.html') |
|
397 | return render('files/files_edit.html') | |
399 |
|
398 | |||
400 | @LoginRequired() |
|
399 | @LoginRequired() | |
401 | @HasRepoPermissionLevelDecorator('write') |
|
400 | @HasRepoPermissionLevelDecorator('write') | |
402 | def add(self, repo_name, revision, f_path): |
|
401 | def add(self, repo_name, revision, f_path): | |
403 |
|
402 | |||
404 | repo = c.db_repo |
|
403 | repo = c.db_repo | |
405 | r_post = request.POST |
|
404 | r_post = request.POST | |
406 | c.cs = self.__get_cs(revision, silent_empty=True) |
|
405 | c.cs = self.__get_cs(revision, silent_empty=True) | |
407 | if c.cs is None: |
|
406 | if c.cs is None: | |
408 | c.cs = EmptyChangeset(alias=c.db_repo_scm_instance.alias) |
|
407 | c.cs = EmptyChangeset(alias=c.db_repo_scm_instance.alias) | |
409 | c.default_message = (_('Added file via Kallithea')) |
|
408 | c.default_message = (_('Added file via Kallithea')) | |
410 | c.f_path = f_path |
|
409 | c.f_path = f_path | |
411 |
|
410 | |||
412 | if r_post: |
|
411 | if r_post: | |
413 | unix_mode = 0 |
|
412 | unix_mode = 0 | |
414 | content = convert_line_endings(r_post.get('content', ''), unix_mode) |
|
413 | content = convert_line_endings(r_post.get('content', ''), unix_mode) | |
415 |
|
414 | |||
416 | message = r_post.get('message') or c.default_message |
|
415 | message = r_post.get('message') or c.default_message | |
417 | filename = r_post.get('filename') |
|
416 | filename = r_post.get('filename') | |
418 | location = r_post.get('location', '') |
|
417 | location = r_post.get('location', '') | |
419 | file_obj = r_post.get('upload_file', None) |
|
418 | file_obj = r_post.get('upload_file', None) | |
420 |
|
419 | |||
421 | if file_obj is not None and hasattr(file_obj, 'filename'): |
|
420 | if file_obj is not None and hasattr(file_obj, 'filename'): | |
422 | filename = file_obj.filename |
|
421 | filename = file_obj.filename | |
423 | content = file_obj.file |
|
422 | content = file_obj.file | |
424 |
|
423 | |||
425 | if hasattr(content, 'file'): |
|
424 | if hasattr(content, 'file'): | |
426 | # non posix systems store real file under file attr |
|
425 | # non posix systems store real file under file attr | |
427 | content = content.file |
|
426 | content = content.file | |
428 |
|
427 | |||
429 | if not content: |
|
428 | if not content: | |
430 | h.flash(_('No content'), category='warning') |
|
429 | h.flash(_('No content'), category='warning') | |
431 | raise HTTPFound(location=url('changeset_home', repo_name=c.repo_name, |
|
430 | raise HTTPFound(location=url('changeset_home', repo_name=c.repo_name, | |
432 | revision='tip')) |
|
431 | revision='tip')) | |
433 | if not filename: |
|
432 | if not filename: | |
434 | h.flash(_('No filename'), category='warning') |
|
433 | h.flash(_('No filename'), category='warning') | |
435 | raise HTTPFound(location=url('changeset_home', repo_name=c.repo_name, |
|
434 | raise HTTPFound(location=url('changeset_home', repo_name=c.repo_name, | |
436 | revision='tip')) |
|
435 | revision='tip')) | |
437 | # strip all crap out of file, just leave the basename |
|
436 | # strip all crap out of file, just leave the basename | |
438 | filename = os.path.basename(filename) |
|
437 | filename = os.path.basename(filename) | |
439 | node_path = posixpath.join(location, filename) |
|
438 | node_path = posixpath.join(location, filename) | |
440 | author = request.authuser.full_contact |
|
439 | author = request.authuser.full_contact | |
441 |
|
440 | |||
442 | try: |
|
441 | try: | |
443 | nodes = { |
|
442 | nodes = { | |
444 | node_path: { |
|
443 | node_path: { | |
445 | 'content': content |
|
444 | 'content': content | |
446 | } |
|
445 | } | |
447 | } |
|
446 | } | |
448 | self.scm_model.create_nodes( |
|
447 | self.scm_model.create_nodes( | |
449 | user=request.authuser.user_id, |
|
448 | user=request.authuser.user_id, | |
450 | ip_addr=request.ip_addr, |
|
449 | ip_addr=request.ip_addr, | |
451 | repo=c.db_repo, |
|
450 | repo=c.db_repo, | |
452 | message=message, |
|
451 | message=message, | |
453 | nodes=nodes, |
|
452 | nodes=nodes, | |
454 | parent_cs=c.cs, |
|
453 | parent_cs=c.cs, | |
455 | author=author, |
|
454 | author=author, | |
456 | ) |
|
455 | ) | |
457 |
|
456 | |||
458 | h.flash(_('Successfully committed to %s') % node_path, |
|
457 | h.flash(_('Successfully committed to %s') % node_path, | |
459 | category='success') |
|
458 | category='success') | |
460 | except NonRelativePathError as e: |
|
459 | except NonRelativePathError as e: | |
461 | h.flash(_('Location must be relative path and must not ' |
|
460 | h.flash(_('Location must be relative path and must not ' | |
462 | 'contain .. in path'), category='warning') |
|
461 | 'contain .. in path'), category='warning') | |
463 | raise HTTPFound(location=url('changeset_home', repo_name=c.repo_name, |
|
462 | raise HTTPFound(location=url('changeset_home', repo_name=c.repo_name, | |
464 | revision='tip')) |
|
463 | revision='tip')) | |
465 | except (NodeError, NodeAlreadyExistsError) as e: |
|
464 | except (NodeError, NodeAlreadyExistsError) as e: | |
466 | h.flash(_(e), category='error') |
|
465 | h.flash(_(e), category='error') | |
467 | except Exception: |
|
466 | except Exception: | |
468 | log.error(traceback.format_exc()) |
|
467 | log.error(traceback.format_exc()) | |
469 | h.flash(_('Error occurred during commit'), category='error') |
|
468 | h.flash(_('Error occurred during commit'), category='error') | |
470 | raise HTTPFound(location=url('changeset_home', |
|
469 | raise HTTPFound(location=url('changeset_home', | |
471 | repo_name=c.repo_name, revision='tip')) |
|
470 | repo_name=c.repo_name, revision='tip')) | |
472 |
|
471 | |||
473 | return render('files/files_add.html') |
|
472 | return render('files/files_add.html') | |
474 |
|
473 | |||
475 | @LoginRequired(allow_default_user=True) |
|
474 | @LoginRequired(allow_default_user=True) | |
476 | @HasRepoPermissionLevelDecorator('read') |
|
475 | @HasRepoPermissionLevelDecorator('read') | |
477 | def archivefile(self, repo_name, fname): |
|
476 | def archivefile(self, repo_name, fname): | |
478 | fileformat = None |
|
477 | fileformat = None | |
479 | revision = None |
|
478 | revision = None | |
480 | ext = None |
|
479 | ext = None | |
481 | subrepos = request.GET.get('subrepos') == 'true' |
|
480 | subrepos = request.GET.get('subrepos') == 'true' | |
482 |
|
481 | |||
483 | for a_type, ext_data in settings.ARCHIVE_SPECS.items(): |
|
482 | for a_type, ext_data in settings.ARCHIVE_SPECS.items(): | |
484 | archive_spec = fname.split(ext_data[1]) |
|
483 | archive_spec = fname.split(ext_data[1]) | |
485 | if len(archive_spec) == 2 and archive_spec[1] == '': |
|
484 | if len(archive_spec) == 2 and archive_spec[1] == '': | |
486 | fileformat = a_type or ext_data[1] |
|
485 | fileformat = a_type or ext_data[1] | |
487 | revision = archive_spec[0] |
|
486 | revision = archive_spec[0] | |
488 | ext = ext_data[1] |
|
487 | ext = ext_data[1] | |
489 |
|
488 | |||
490 | try: |
|
489 | try: | |
491 | dbrepo = RepoModel().get_by_repo_name(repo_name) |
|
490 | dbrepo = RepoModel().get_by_repo_name(repo_name) | |
492 | if not dbrepo.enable_downloads: |
|
491 | if not dbrepo.enable_downloads: | |
493 | return _('Downloads disabled') # TODO: do something else? |
|
492 | return _('Downloads disabled') # TODO: do something else? | |
494 |
|
493 | |||
495 | if c.db_repo_scm_instance.alias == 'hg': |
|
494 | if c.db_repo_scm_instance.alias == 'hg': | |
496 | # patch and reset hooks section of UI config to not run any |
|
495 | # patch and reset hooks section of UI config to not run any | |
497 | # hooks on fetching archives with subrepos |
|
496 | # hooks on fetching archives with subrepos | |
498 | for k, v in c.db_repo_scm_instance._repo.ui.configitems('hooks'): |
|
497 | for k, v in c.db_repo_scm_instance._repo.ui.configitems('hooks'): | |
499 | c.db_repo_scm_instance._repo.ui.setconfig('hooks', k, None) |
|
498 | c.db_repo_scm_instance._repo.ui.setconfig('hooks', k, None) | |
500 |
|
499 | |||
501 | cs = c.db_repo_scm_instance.get_changeset(revision) |
|
500 | cs = c.db_repo_scm_instance.get_changeset(revision) | |
502 | content_type = settings.ARCHIVE_SPECS[fileformat][0] |
|
501 | content_type = settings.ARCHIVE_SPECS[fileformat][0] | |
503 | except ChangesetDoesNotExistError: |
|
502 | except ChangesetDoesNotExistError: | |
504 | return _('Unknown revision %s') % revision |
|
503 | return _('Unknown revision %s') % revision | |
505 | except EmptyRepositoryError: |
|
504 | except EmptyRepositoryError: | |
506 | return _('Empty repository') |
|
505 | return _('Empty repository') | |
507 | except (ImproperArchiveTypeError, KeyError): |
|
506 | except (ImproperArchiveTypeError, KeyError): | |
508 | return _('Unknown archive type') |
|
507 | return _('Unknown archive type') | |
509 |
|
508 | |||
510 | from kallithea import CONFIG |
|
509 | from kallithea import CONFIG | |
511 | rev_name = cs.raw_id[:12] |
|
510 | rev_name = cs.raw_id[:12] | |
512 | archive_name = '%s-%s%s' % (safe_str(repo_name.replace('/', '_')), |
|
511 | archive_name = '%s-%s%s' % (safe_str(repo_name.replace('/', '_')), | |
513 | safe_str(rev_name), ext) |
|
512 | safe_str(rev_name), ext) | |
514 |
|
513 | |||
515 | archive_path = None |
|
514 | archive_path = None | |
516 | cached_archive_path = None |
|
515 | cached_archive_path = None | |
517 | archive_cache_dir = CONFIG.get('archive_cache_dir') |
|
516 | archive_cache_dir = CONFIG.get('archive_cache_dir') | |
518 | if archive_cache_dir and not subrepos: # TODO: subrepo caching? |
|
517 | if archive_cache_dir and not subrepos: # TODO: subrepo caching? | |
519 | if not os.path.isdir(archive_cache_dir): |
|
518 | if not os.path.isdir(archive_cache_dir): | |
520 | os.makedirs(archive_cache_dir) |
|
519 | os.makedirs(archive_cache_dir) | |
521 | cached_archive_path = os.path.join(archive_cache_dir, archive_name) |
|
520 | cached_archive_path = os.path.join(archive_cache_dir, archive_name) | |
522 | if os.path.isfile(cached_archive_path): |
|
521 | if os.path.isfile(cached_archive_path): | |
523 | log.debug('Found cached archive in %s', cached_archive_path) |
|
522 | log.debug('Found cached archive in %s', cached_archive_path) | |
524 | archive_path = cached_archive_path |
|
523 | archive_path = cached_archive_path | |
525 | else: |
|
524 | else: | |
526 | log.debug('Archive %s is not yet cached', archive_name) |
|
525 | log.debug('Archive %s is not yet cached', archive_name) | |
527 |
|
526 | |||
528 | if archive_path is None: |
|
527 | if archive_path is None: | |
529 | # generate new archive |
|
528 | # generate new archive | |
530 | fd, archive_path = tempfile.mkstemp() |
|
529 | fd, archive_path = tempfile.mkstemp() | |
531 | log.debug('Creating new temp archive in %s', archive_path) |
|
530 | log.debug('Creating new temp archive in %s', archive_path) | |
532 | with os.fdopen(fd, 'wb') as stream: |
|
531 | with os.fdopen(fd, 'wb') as stream: | |
533 | cs.fill_archive(stream=stream, kind=fileformat, subrepos=subrepos) |
|
532 | cs.fill_archive(stream=stream, kind=fileformat, subrepos=subrepos) | |
534 | # stream (and thus fd) has been closed by cs.fill_archive |
|
533 | # stream (and thus fd) has been closed by cs.fill_archive | |
535 | if cached_archive_path is not None: |
|
534 | if cached_archive_path is not None: | |
536 | # we generated the archive - move it to cache |
|
535 | # we generated the archive - move it to cache | |
537 | log.debug('Storing new archive in %s', cached_archive_path) |
|
536 | log.debug('Storing new archive in %s', cached_archive_path) | |
538 | shutil.move(archive_path, cached_archive_path) |
|
537 | shutil.move(archive_path, cached_archive_path) | |
539 | archive_path = cached_archive_path |
|
538 | archive_path = cached_archive_path | |
540 |
|
539 | |||
541 | def get_chunked_archive(archive_path): |
|
540 | def get_chunked_archive(archive_path): | |
542 | stream = open(archive_path, 'rb') |
|
541 | stream = open(archive_path, 'rb') | |
543 | while True: |
|
542 | while True: | |
544 | data = stream.read(16 * 1024) |
|
543 | data = stream.read(16 * 1024) | |
545 | if not data: |
|
544 | if not data: | |
546 | break |
|
545 | break | |
547 | yield data |
|
546 | yield data | |
548 | stream.close() |
|
547 | stream.close() | |
549 | if archive_path != cached_archive_path: |
|
548 | if archive_path != cached_archive_path: | |
550 | log.debug('Destroying temp archive %s', archive_path) |
|
549 | log.debug('Destroying temp archive %s', archive_path) | |
551 | os.remove(archive_path) |
|
550 | os.remove(archive_path) | |
552 |
|
551 | |||
553 | action_logger(user=request.authuser, |
|
552 | action_logger(user=request.authuser, | |
554 | action='user_downloaded_archive:%s' % (archive_name), |
|
553 | action='user_downloaded_archive:%s' % (archive_name), | |
555 | repo=repo_name, ipaddr=request.ip_addr, commit=True) |
|
554 | repo=repo_name, ipaddr=request.ip_addr, commit=True) | |
556 |
|
555 | |||
557 | response.content_disposition = str('attachment; filename=%s' % (archive_name)) |
|
556 | response.content_disposition = str('attachment; filename=%s' % (archive_name)) | |
558 | response.content_type = str(content_type) |
|
557 | response.content_type = str(content_type) | |
559 | return get_chunked_archive(archive_path) |
|
558 | return get_chunked_archive(archive_path) | |
560 |
|
559 | |||
561 | @LoginRequired(allow_default_user=True) |
|
560 | @LoginRequired(allow_default_user=True) | |
562 | @HasRepoPermissionLevelDecorator('read') |
|
561 | @HasRepoPermissionLevelDecorator('read') | |
563 | def diff(self, repo_name, f_path): |
|
562 | def diff(self, repo_name, f_path): | |
564 | ignore_whitespace = request.GET.get('ignorews') == '1' |
|
563 | ignore_whitespace = request.GET.get('ignorews') == '1' | |
565 | line_context = safe_int(request.GET.get('context'), 3) |
|
564 | line_context = safe_int(request.GET.get('context'), 3) | |
566 | diff2 = request.GET.get('diff2', '') |
|
565 | diff2 = request.GET.get('diff2', '') | |
567 | diff1 = request.GET.get('diff1', '') or diff2 |
|
566 | diff1 = request.GET.get('diff1', '') or diff2 | |
568 | c.action = request.GET.get('diff') |
|
567 | c.action = request.GET.get('diff') | |
569 | c.no_changes = diff1 == diff2 |
|
568 | c.no_changes = diff1 == diff2 | |
570 | c.f_path = f_path |
|
569 | c.f_path = f_path | |
571 | c.big_diff = False |
|
570 | c.big_diff = False | |
572 | fulldiff = request.GET.get('fulldiff') |
|
571 | fulldiff = request.GET.get('fulldiff') | |
573 | c.anchor_url = anchor_url |
|
572 | c.anchor_url = anchor_url | |
574 | c.ignorews_url = _ignorews_url |
|
573 | c.ignorews_url = _ignorews_url | |
575 | c.context_url = _context_url |
|
574 | c.context_url = _context_url | |
576 | c.changes = OrderedDict() |
|
575 | c.changes = OrderedDict() | |
577 | c.changes[diff2] = [] |
|
576 | c.changes[diff2] = [] | |
578 |
|
577 | |||
579 | # special case if we want a show rev only, it's impl here |
|
578 | # special case if we want a show rev only, it's impl here | |
580 | # to reduce JS and callbacks |
|
579 | # to reduce JS and callbacks | |
581 |
|
580 | |||
582 | if request.GET.get('show_rev'): |
|
581 | if request.GET.get('show_rev'): | |
583 | if str2bool(request.GET.get('annotate', 'False')): |
|
582 | if str2bool(request.GET.get('annotate', 'False')): | |
584 | _url = url('files_annotate_home', repo_name=c.repo_name, |
|
583 | _url = url('files_annotate_home', repo_name=c.repo_name, | |
585 | revision=diff1, f_path=c.f_path) |
|
584 | revision=diff1, f_path=c.f_path) | |
586 | else: |
|
585 | else: | |
587 | _url = url('files_home', repo_name=c.repo_name, |
|
586 | _url = url('files_home', repo_name=c.repo_name, | |
588 | revision=diff1, f_path=c.f_path) |
|
587 | revision=diff1, f_path=c.f_path) | |
589 |
|
588 | |||
590 | raise HTTPFound(location=_url) |
|
589 | raise HTTPFound(location=_url) | |
591 | try: |
|
590 | try: | |
592 | if diff1 not in ['', None, 'None', '0' * 12, '0' * 40]: |
|
591 | if diff1 not in ['', None, 'None', '0' * 12, '0' * 40]: | |
593 | c.changeset_1 = c.db_repo_scm_instance.get_changeset(diff1) |
|
592 | c.changeset_1 = c.db_repo_scm_instance.get_changeset(diff1) | |
594 | try: |
|
593 | try: | |
595 | node1 = c.changeset_1.get_node(f_path) |
|
594 | node1 = c.changeset_1.get_node(f_path) | |
596 | if node1.is_dir(): |
|
595 | if node1.is_dir(): | |
597 | raise NodeError('%s path is a %s not a file' |
|
596 | raise NodeError('%s path is a %s not a file' | |
598 | % (node1, type(node1))) |
|
597 | % (node1, type(node1))) | |
599 | except NodeDoesNotExistError: |
|
598 | except NodeDoesNotExistError: | |
600 | c.changeset_1 = EmptyChangeset(cs=diff1, |
|
599 | c.changeset_1 = EmptyChangeset(cs=diff1, | |
601 | revision=c.changeset_1.revision, |
|
600 | revision=c.changeset_1.revision, | |
602 | repo=c.db_repo_scm_instance) |
|
601 | repo=c.db_repo_scm_instance) | |
603 | node1 = FileNode(f_path, '', changeset=c.changeset_1) |
|
602 | node1 = FileNode(f_path, '', changeset=c.changeset_1) | |
604 | else: |
|
603 | else: | |
605 | c.changeset_1 = EmptyChangeset(repo=c.db_repo_scm_instance) |
|
604 | c.changeset_1 = EmptyChangeset(repo=c.db_repo_scm_instance) | |
606 | node1 = FileNode(f_path, '', changeset=c.changeset_1) |
|
605 | node1 = FileNode(f_path, '', changeset=c.changeset_1) | |
607 |
|
606 | |||
608 | if diff2 not in ['', None, 'None', '0' * 12, '0' * 40]: |
|
607 | if diff2 not in ['', None, 'None', '0' * 12, '0' * 40]: | |
609 | c.changeset_2 = c.db_repo_scm_instance.get_changeset(diff2) |
|
608 | c.changeset_2 = c.db_repo_scm_instance.get_changeset(diff2) | |
610 | try: |
|
609 | try: | |
611 | node2 = c.changeset_2.get_node(f_path) |
|
610 | node2 = c.changeset_2.get_node(f_path) | |
612 | if node2.is_dir(): |
|
611 | if node2.is_dir(): | |
613 | raise NodeError('%s path is a %s not a file' |
|
612 | raise NodeError('%s path is a %s not a file' | |
614 | % (node2, type(node2))) |
|
613 | % (node2, type(node2))) | |
615 | except NodeDoesNotExistError: |
|
614 | except NodeDoesNotExistError: | |
616 | c.changeset_2 = EmptyChangeset(cs=diff2, |
|
615 | c.changeset_2 = EmptyChangeset(cs=diff2, | |
617 | revision=c.changeset_2.revision, |
|
616 | revision=c.changeset_2.revision, | |
618 | repo=c.db_repo_scm_instance) |
|
617 | repo=c.db_repo_scm_instance) | |
619 | node2 = FileNode(f_path, '', changeset=c.changeset_2) |
|
618 | node2 = FileNode(f_path, '', changeset=c.changeset_2) | |
620 | else: |
|
619 | else: | |
621 | c.changeset_2 = EmptyChangeset(repo=c.db_repo_scm_instance) |
|
620 | c.changeset_2 = EmptyChangeset(repo=c.db_repo_scm_instance) | |
622 | node2 = FileNode(f_path, '', changeset=c.changeset_2) |
|
621 | node2 = FileNode(f_path, '', changeset=c.changeset_2) | |
623 | except (RepositoryError, NodeError): |
|
622 | except (RepositoryError, NodeError): | |
624 | log.error(traceback.format_exc()) |
|
623 | log.error(traceback.format_exc()) | |
625 | raise HTTPFound(location=url('files_home', repo_name=c.repo_name, |
|
624 | raise HTTPFound(location=url('files_home', repo_name=c.repo_name, | |
626 | f_path=f_path)) |
|
625 | f_path=f_path)) | |
627 |
|
626 | |||
628 | if c.action == 'download': |
|
627 | if c.action == 'download': | |
629 | raw_diff = diffs.get_gitdiff(node1, node2, |
|
628 | raw_diff = diffs.get_gitdiff(node1, node2, | |
630 | ignore_whitespace=ignore_whitespace, |
|
629 | ignore_whitespace=ignore_whitespace, | |
631 | context=line_context) |
|
630 | context=line_context) | |
632 | diff_name = '%s_vs_%s.diff' % (diff1, diff2) |
|
631 | diff_name = '%s_vs_%s.diff' % (diff1, diff2) | |
633 | response.content_type = 'text/plain' |
|
632 | response.content_type = 'text/plain' | |
634 | response.content_disposition = ( |
|
633 | response.content_disposition = ( | |
635 | 'attachment; filename=%s' % diff_name |
|
634 | 'attachment; filename=%s' % diff_name | |
636 | ) |
|
635 | ) | |
637 | return raw_diff |
|
636 | return raw_diff | |
638 |
|
637 | |||
639 | elif c.action == 'raw': |
|
638 | elif c.action == 'raw': | |
640 | raw_diff = diffs.get_gitdiff(node1, node2, |
|
639 | raw_diff = diffs.get_gitdiff(node1, node2, | |
641 | ignore_whitespace=ignore_whitespace, |
|
640 | ignore_whitespace=ignore_whitespace, | |
642 | context=line_context) |
|
641 | context=line_context) | |
643 | response.content_type = 'text/plain' |
|
642 | response.content_type = 'text/plain' | |
644 | return raw_diff |
|
643 | return raw_diff | |
645 |
|
644 | |||
646 | else: |
|
645 | else: | |
647 | fid = h.FID(diff2, node2.path) |
|
646 | fid = h.FID(diff2, node2.path) | |
648 | line_context_lcl = get_line_ctx(fid, request.GET) |
|
647 | line_context_lcl = get_line_ctx(fid, request.GET) | |
649 | ign_whitespace_lcl = get_ignore_ws(fid, request.GET) |
|
648 | ign_whitespace_lcl = get_ignore_ws(fid, request.GET) | |
650 |
|
649 | |||
651 | diff_limit = None if fulldiff else self.cut_off_limit |
|
650 | diff_limit = None if fulldiff else self.cut_off_limit | |
652 | c.a_rev, c.cs_rev, a_path, diff, st, op = diffs.wrapped_diff(filenode_old=node1, |
|
651 | c.a_rev, c.cs_rev, a_path, diff, st, op = diffs.wrapped_diff(filenode_old=node1, | |
653 | filenode_new=node2, |
|
652 | filenode_new=node2, | |
654 | diff_limit=diff_limit, |
|
653 | diff_limit=diff_limit, | |
655 | ignore_whitespace=ign_whitespace_lcl, |
|
654 | ignore_whitespace=ign_whitespace_lcl, | |
656 | line_context=line_context_lcl, |
|
655 | line_context=line_context_lcl, | |
657 | enable_comments=False) |
|
656 | enable_comments=False) | |
658 | c.file_diff_data = [(fid, fid, op, a_path, node2.path, diff, st)] |
|
657 | c.file_diff_data = [(fid, fid, op, a_path, node2.path, diff, st)] | |
659 |
|
658 | |||
660 | return render('files/file_diff.html') |
|
659 | return render('files/file_diff.html') | |
661 |
|
660 | |||
662 | @LoginRequired(allow_default_user=True) |
|
661 | @LoginRequired(allow_default_user=True) | |
663 | @HasRepoPermissionLevelDecorator('read') |
|
662 | @HasRepoPermissionLevelDecorator('read') | |
664 | def diff_2way(self, repo_name, f_path): |
|
663 | def diff_2way(self, repo_name, f_path): | |
665 | diff1 = request.GET.get('diff1', '') |
|
664 | diff1 = request.GET.get('diff1', '') | |
666 | diff2 = request.GET.get('diff2', '') |
|
665 | diff2 = request.GET.get('diff2', '') | |
667 | try: |
|
666 | try: | |
668 | if diff1 not in ['', None, 'None', '0' * 12, '0' * 40]: |
|
667 | if diff1 not in ['', None, 'None', '0' * 12, '0' * 40]: | |
669 | c.changeset_1 = c.db_repo_scm_instance.get_changeset(diff1) |
|
668 | c.changeset_1 = c.db_repo_scm_instance.get_changeset(diff1) | |
670 | try: |
|
669 | try: | |
671 | node1 = c.changeset_1.get_node(f_path) |
|
670 | node1 = c.changeset_1.get_node(f_path) | |
672 | if node1.is_dir(): |
|
671 | if node1.is_dir(): | |
673 | raise NodeError('%s path is a %s not a file' |
|
672 | raise NodeError('%s path is a %s not a file' | |
674 | % (node1, type(node1))) |
|
673 | % (node1, type(node1))) | |
675 | except NodeDoesNotExistError: |
|
674 | except NodeDoesNotExistError: | |
676 | c.changeset_1 = EmptyChangeset(cs=diff1, |
|
675 | c.changeset_1 = EmptyChangeset(cs=diff1, | |
677 | revision=c.changeset_1.revision, |
|
676 | revision=c.changeset_1.revision, | |
678 | repo=c.db_repo_scm_instance) |
|
677 | repo=c.db_repo_scm_instance) | |
679 | node1 = FileNode(f_path, '', changeset=c.changeset_1) |
|
678 | node1 = FileNode(f_path, '', changeset=c.changeset_1) | |
680 | else: |
|
679 | else: | |
681 | c.changeset_1 = EmptyChangeset(repo=c.db_repo_scm_instance) |
|
680 | c.changeset_1 = EmptyChangeset(repo=c.db_repo_scm_instance) | |
682 | node1 = FileNode(f_path, '', changeset=c.changeset_1) |
|
681 | node1 = FileNode(f_path, '', changeset=c.changeset_1) | |
683 |
|
682 | |||
684 | if diff2 not in ['', None, 'None', '0' * 12, '0' * 40]: |
|
683 | if diff2 not in ['', None, 'None', '0' * 12, '0' * 40]: | |
685 | c.changeset_2 = c.db_repo_scm_instance.get_changeset(diff2) |
|
684 | c.changeset_2 = c.db_repo_scm_instance.get_changeset(diff2) | |
686 | try: |
|
685 | try: | |
687 | node2 = c.changeset_2.get_node(f_path) |
|
686 | node2 = c.changeset_2.get_node(f_path) | |
688 | if node2.is_dir(): |
|
687 | if node2.is_dir(): | |
689 | raise NodeError('%s path is a %s not a file' |
|
688 | raise NodeError('%s path is a %s not a file' | |
690 | % (node2, type(node2))) |
|
689 | % (node2, type(node2))) | |
691 | except NodeDoesNotExistError: |
|
690 | except NodeDoesNotExistError: | |
692 | c.changeset_2 = EmptyChangeset(cs=diff2, |
|
691 | c.changeset_2 = EmptyChangeset(cs=diff2, | |
693 | revision=c.changeset_2.revision, |
|
692 | revision=c.changeset_2.revision, | |
694 | repo=c.db_repo_scm_instance) |
|
693 | repo=c.db_repo_scm_instance) | |
695 | node2 = FileNode(f_path, '', changeset=c.changeset_2) |
|
694 | node2 = FileNode(f_path, '', changeset=c.changeset_2) | |
696 | else: |
|
695 | else: | |
697 | c.changeset_2 = EmptyChangeset(repo=c.db_repo_scm_instance) |
|
696 | c.changeset_2 = EmptyChangeset(repo=c.db_repo_scm_instance) | |
698 | node2 = FileNode(f_path, '', changeset=c.changeset_2) |
|
697 | node2 = FileNode(f_path, '', changeset=c.changeset_2) | |
699 | except ChangesetDoesNotExistError as e: |
|
698 | except ChangesetDoesNotExistError as e: | |
700 | msg = _('Such revision does not exist for this repository') |
|
699 | msg = _('Such revision does not exist for this repository') | |
701 | h.flash(msg, category='error') |
|
700 | h.flash(msg, category='error') | |
702 | raise HTTPNotFound() |
|
701 | raise HTTPNotFound() | |
703 | c.node1 = node1 |
|
702 | c.node1 = node1 | |
704 | c.node2 = node2 |
|
703 | c.node2 = node2 | |
705 | c.cs1 = c.changeset_1 |
|
704 | c.cs1 = c.changeset_1 | |
706 | c.cs2 = c.changeset_2 |
|
705 | c.cs2 = c.changeset_2 | |
707 |
|
706 | |||
708 | return render('files/diff_2way.html') |
|
707 | return render('files/diff_2way.html') | |
709 |
|
708 | |||
710 | def _get_node_history(self, cs, f_path, changesets=None): |
|
709 | def _get_node_history(self, cs, f_path, changesets=None): | |
711 | """ |
|
710 | """ | |
712 | get changesets history for given node |
|
711 | get changesets history for given node | |
713 |
|
712 | |||
714 | :param cs: changeset to calculate history |
|
713 | :param cs: changeset to calculate history | |
715 | :param f_path: path for node to calculate history for |
|
714 | :param f_path: path for node to calculate history for | |
716 | :param changesets: if passed don't calculate history and take |
|
715 | :param changesets: if passed don't calculate history and take | |
717 | changesets defined in this list |
|
716 | changesets defined in this list | |
718 | """ |
|
717 | """ | |
719 | # calculate history based on tip |
|
718 | # calculate history based on tip | |
720 | tip_cs = c.db_repo_scm_instance.get_changeset() |
|
719 | tip_cs = c.db_repo_scm_instance.get_changeset() | |
721 | if changesets is None: |
|
720 | if changesets is None: | |
722 | try: |
|
721 | try: | |
723 | changesets = tip_cs.get_file_history(f_path) |
|
722 | changesets = tip_cs.get_file_history(f_path) | |
724 | except (NodeDoesNotExistError, ChangesetError): |
|
723 | except (NodeDoesNotExistError, ChangesetError): | |
725 | # this node is not present at tip ! |
|
724 | # this node is not present at tip ! | |
726 | changesets = cs.get_file_history(f_path) |
|
725 | changesets = cs.get_file_history(f_path) | |
727 | hist_l = [] |
|
726 | hist_l = [] | |
728 |
|
727 | |||
729 | changesets_group = ([], _("Changesets")) |
|
728 | changesets_group = ([], _("Changesets")) | |
730 | branches_group = ([], _("Branches")) |
|
729 | branches_group = ([], _("Branches")) | |
731 | tags_group = ([], _("Tags")) |
|
730 | tags_group = ([], _("Tags")) | |
732 | for chs in changesets: |
|
731 | for chs in changesets: | |
733 | # TODO: loop over chs.branches ... but that will not give all the bogus None branches for Git ... |
|
732 | # TODO: loop over chs.branches ... but that will not give all the bogus None branches for Git ... | |
734 | _branch = chs.branch |
|
733 | _branch = chs.branch | |
735 | n_desc = '%s (%s)' % (h.show_id(chs), _branch) |
|
734 | n_desc = '%s (%s)' % (h.show_id(chs), _branch) | |
736 | changesets_group[0].append((chs.raw_id, n_desc,)) |
|
735 | changesets_group[0].append((chs.raw_id, n_desc,)) | |
737 | hist_l.append(changesets_group) |
|
736 | hist_l.append(changesets_group) | |
738 |
|
737 | |||
739 | for name, chs in c.db_repo_scm_instance.branches.items(): |
|
738 | for name, chs in c.db_repo_scm_instance.branches.items(): | |
740 | branches_group[0].append((chs, name),) |
|
739 | branches_group[0].append((chs, name),) | |
741 | hist_l.append(branches_group) |
|
740 | hist_l.append(branches_group) | |
742 |
|
741 | |||
743 | for name, chs in c.db_repo_scm_instance.tags.items(): |
|
742 | for name, chs in c.db_repo_scm_instance.tags.items(): | |
744 | tags_group[0].append((chs, name),) |
|
743 | tags_group[0].append((chs, name),) | |
745 | hist_l.append(tags_group) |
|
744 | hist_l.append(tags_group) | |
746 |
|
745 | |||
747 | return hist_l, changesets |
|
746 | return hist_l, changesets | |
748 |
|
747 | |||
749 | @LoginRequired(allow_default_user=True) |
|
748 | @LoginRequired(allow_default_user=True) | |
750 | @HasRepoPermissionLevelDecorator('read') |
|
749 | @HasRepoPermissionLevelDecorator('read') | |
751 | @jsonify |
|
750 | @jsonify | |
752 | def nodelist(self, repo_name, revision, f_path): |
|
751 | def nodelist(self, repo_name, revision, f_path): | |
753 | if request.environ.get('HTTP_X_PARTIAL_XHR'): |
|
752 | if request.environ.get('HTTP_X_PARTIAL_XHR'): | |
754 | cs = self.__get_cs(revision) |
|
753 | cs = self.__get_cs(revision) | |
755 | _d, _f = ScmModel().get_nodes(repo_name, cs.raw_id, f_path, |
|
754 | _d, _f = ScmModel().get_nodes(repo_name, cs.raw_id, f_path, | |
756 | flat=False) |
|
755 | flat=False) | |
757 | return {'nodes': _d + _f} |
|
756 | return {'nodes': _d + _f} |
@@ -1,645 +1,645 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 | # This program is free software: you can redistribute it and/or modify |
|
2 | # This program is free software: you can redistribute it and/or modify | |
3 | # it under the terms of the GNU General Public License as published by |
|
3 | # it under the terms of the GNU General Public License as published by | |
4 | # the Free Software Foundation, either version 3 of the License, or |
|
4 | # the Free Software Foundation, either version 3 of the License, or | |
5 | # (at your option) any later version. |
|
5 | # (at your option) any later version. | |
6 | # |
|
6 | # | |
7 | # This program is distributed in the hope that it will be useful, |
|
7 | # This program is distributed in the hope that it will be useful, | |
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | # GNU General Public License for more details. |
|
10 | # GNU General Public License for more details. | |
11 | # |
|
11 | # | |
12 | # You should have received a copy of the GNU General Public License |
|
12 | # You should have received a copy of the GNU General Public License | |
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
14 | """ |
|
14 | """ | |
15 | kallithea.controllers.pullrequests |
|
15 | kallithea.controllers.pullrequests | |
16 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
|
16 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |
17 |
|
17 | |||
18 | pull requests controller for Kallithea for initializing pull requests |
|
18 | pull requests controller for Kallithea for initializing pull requests | |
19 |
|
19 | |||
20 | This file was forked by the Kallithea project in July 2014. |
|
20 | This file was forked by the Kallithea project in July 2014. | |
21 | Original author and date, and relevant copyright and licensing information is below: |
|
21 | Original author and date, and relevant copyright and licensing information is below: | |
22 | :created_on: May 7, 2012 |
|
22 | :created_on: May 7, 2012 | |
23 | :author: marcink |
|
23 | :author: marcink | |
24 | :copyright: (c) 2013 RhodeCode GmbH, and others. |
|
24 | :copyright: (c) 2013 RhodeCode GmbH, and others. | |
25 | :license: GPLv3, see LICENSE.md for more details. |
|
25 | :license: GPLv3, see LICENSE.md for more details. | |
26 | """ |
|
26 | """ | |
27 |
|
27 | |||
28 | import logging |
|
28 | import logging | |
29 | import traceback |
|
29 | import traceback | |
30 |
|
30 | |||
31 | import formencode |
|
31 | import formencode | |
32 | from tg import request |
|
32 | from tg import request | |
33 | from tg import tmpl_context as c |
|
33 | from tg import tmpl_context as c | |
34 | from tg.i18n import ugettext as _ |
|
34 | from tg.i18n import ugettext as _ | |
35 | from webob.exc import HTTPBadRequest, HTTPForbidden, HTTPFound, HTTPNotFound |
|
35 | from webob.exc import HTTPBadRequest, HTTPForbidden, HTTPFound, HTTPNotFound | |
36 |
|
36 | |||
37 | from kallithea.config.routing import url |
|
37 | from kallithea.config.routing import url | |
38 | from kallithea.controllers.changeset import _context_url, _ignorews_url, create_cs_pr_comment, delete_cs_pr_comment |
|
38 | from kallithea.controllers.changeset import _context_url, _ignorews_url, create_cs_pr_comment, delete_cs_pr_comment | |
39 | from kallithea.lib import diffs |
|
39 | from kallithea.lib import diffs | |
40 | from kallithea.lib import helpers as h |
|
40 | from kallithea.lib import helpers as h | |
41 | from kallithea.lib.auth import HasRepoPermissionLevelDecorator, LoginRequired |
|
41 | from kallithea.lib.auth import HasRepoPermissionLevelDecorator, LoginRequired | |
42 | from kallithea.lib.base import BaseRepoController, jsonify, render |
|
42 | from kallithea.lib.base import BaseRepoController, jsonify, render | |
43 | from kallithea.lib.graphmod import graph_data |
|
43 | from kallithea.lib.graphmod import graph_data | |
44 | from kallithea.lib.page import Page |
|
44 | from kallithea.lib.page import Page | |
45 | from kallithea.lib.utils2 import safe_int |
|
45 | from kallithea.lib.utils2 import safe_int | |
46 | from kallithea.lib.vcs.exceptions import ChangesetDoesNotExistError, EmptyRepositoryError |
|
46 | from kallithea.lib.vcs.exceptions import ChangesetDoesNotExistError, EmptyRepositoryError | |
47 | from kallithea.lib.vcs.utils import safe_str |
|
47 | from kallithea.lib.vcs.utils import safe_str | |
48 | from kallithea.lib.vcs.utils.hgcompat import unionrepo |
|
48 | from kallithea.lib.vcs.utils.hgcompat import unionrepo | |
49 | from kallithea.model.changeset_status import ChangesetStatusModel |
|
49 | from kallithea.model.changeset_status import ChangesetStatusModel | |
50 | from kallithea.model.comment import ChangesetCommentsModel |
|
50 | from kallithea.model.comment import ChangesetCommentsModel | |
51 | from kallithea.model.db import ChangesetStatus, PullRequest, PullRequestReviewer, Repository, User |
|
51 | from kallithea.model.db import ChangesetStatus, PullRequest, PullRequestReviewer, Repository, User | |
52 | from kallithea.model.forms import PullRequestForm, PullRequestPostForm |
|
52 | from kallithea.model.forms import PullRequestForm, PullRequestPostForm | |
53 | from kallithea.model.meta import Session |
|
53 | from kallithea.model.meta import Session | |
54 | from kallithea.model.pull_request import CreatePullRequestAction, CreatePullRequestIterationAction, PullRequestModel |
|
54 | from kallithea.model.pull_request import CreatePullRequestAction, CreatePullRequestIterationAction, PullRequestModel | |
55 |
|
55 | |||
56 |
|
56 | |||
57 | log = logging.getLogger(__name__) |
|
57 | log = logging.getLogger(__name__) | |
58 |
|
58 | |||
59 |
|
59 | |||
60 | def _get_reviewer(user_id): |
|
60 | def _get_reviewer(user_id): | |
61 | """Look up user by ID and validate it as a potential reviewer.""" |
|
61 | """Look up user by ID and validate it as a potential reviewer.""" | |
62 | try: |
|
62 | try: | |
63 | user = User.get(int(user_id)) |
|
63 | user = User.get(int(user_id)) | |
64 | except ValueError: |
|
64 | except ValueError: | |
65 | user = None |
|
65 | user = None | |
66 |
|
66 | |||
67 | if user is None or user.is_default_user: |
|
67 | if user is None or user.is_default_user: | |
68 | h.flash(_('Invalid reviewer "%s" specified') % user_id, category='error') |
|
68 | h.flash(_('Invalid reviewer "%s" specified') % user_id, category='error') | |
69 | raise HTTPBadRequest() |
|
69 | raise HTTPBadRequest() | |
70 |
|
70 | |||
71 | return user |
|
71 | return user | |
72 |
|
72 | |||
73 |
|
73 | |||
74 | class PullrequestsController(BaseRepoController): |
|
74 | class PullrequestsController(BaseRepoController): | |
75 |
|
75 | |||
76 | def _get_repo_refs(self, repo, rev=None, branch=None, branch_rev=None): |
|
76 | def _get_repo_refs(self, repo, rev=None, branch=None, branch_rev=None): | |
77 | """return a structure with repo's interesting changesets, suitable for |
|
77 | """return a structure with repo's interesting changesets, suitable for | |
78 | the selectors in pullrequest.html |
|
78 | the selectors in pullrequest.html | |
79 |
|
79 | |||
80 | rev: a revision that must be in the list somehow and selected by default |
|
80 | rev: a revision that must be in the list somehow and selected by default | |
81 | branch: a branch that must be in the list and selected by default - even if closed |
|
81 | branch: a branch that must be in the list and selected by default - even if closed | |
82 | branch_rev: a revision of which peers should be preferred and available.""" |
|
82 | branch_rev: a revision of which peers should be preferred and available.""" | |
83 | # list named branches that has been merged to this named branch - it should probably merge back |
|
83 | # list named branches that has been merged to this named branch - it should probably merge back | |
84 | peers = [] |
|
84 | peers = [] | |
85 |
|
85 | |||
86 | if rev: |
|
86 | if rev: | |
87 | rev = safe_str(rev) |
|
87 | rev = safe_str(rev) | |
88 |
|
88 | |||
89 | if branch: |
|
89 | if branch: | |
90 | branch = safe_str(branch) |
|
90 | branch = safe_str(branch) | |
91 |
|
91 | |||
92 | if branch_rev: |
|
92 | if branch_rev: | |
93 | branch_rev = safe_str(branch_rev) |
|
93 | branch_rev = safe_str(branch_rev) | |
94 | # a revset not restricting to merge() would be better |
|
94 | # a revset not restricting to merge() would be better | |
95 | # (especially because it would get the branch point) |
|
95 | # (especially because it would get the branch point) | |
96 | # ... but is currently too expensive |
|
96 | # ... but is currently too expensive | |
97 | # including branches of children could be nice too |
|
97 | # including branches of children could be nice too | |
98 | peerbranches = set() |
|
98 | peerbranches = set() | |
99 | for i in repo._repo.revs( |
|
99 | for i in repo._repo.revs( | |
100 | "sort(parents(branch(id(%s)) and merge()) - branch(id(%s)), -rev)", |
|
100 | "sort(parents(branch(id(%s)) and merge()) - branch(id(%s)), -rev)", | |
101 | branch_rev, branch_rev |
|
101 | branch_rev, branch_rev | |
102 | ): |
|
102 | ): | |
103 | for abranch in repo.get_changeset(i).branches: |
|
103 | for abranch in repo.get_changeset(i).branches: | |
104 | if abranch not in peerbranches: |
|
104 | if abranch not in peerbranches: | |
105 | n = 'branch:%s:%s' % (abranch, repo.get_changeset(abranch).raw_id) |
|
105 | n = 'branch:%s:%s' % (abranch, repo.get_changeset(abranch).raw_id) | |
106 | peers.append((n, abranch)) |
|
106 | peers.append((n, abranch)) | |
107 | peerbranches.add(abranch) |
|
107 | peerbranches.add(abranch) | |
108 |
|
108 | |||
109 | selected = None |
|
109 | selected = None | |
110 | tiprev = repo.tags.get('tip') |
|
110 | tiprev = repo.tags.get('tip') | |
111 | tipbranch = None |
|
111 | tipbranch = None | |
112 |
|
112 | |||
113 | branches = [] |
|
113 | branches = [] | |
114 | for abranch, branchrev in repo.branches.iteritems(): |
|
114 | for abranch, branchrev in repo.branches.iteritems(): | |
115 | n = 'branch:%s:%s' % (abranch, branchrev) |
|
115 | n = 'branch:%s:%s' % (abranch, branchrev) | |
116 | desc = abranch |
|
116 | desc = abranch | |
117 | if branchrev == tiprev: |
|
117 | if branchrev == tiprev: | |
118 | tipbranch = abranch |
|
118 | tipbranch = abranch | |
119 | desc = '%s (current tip)' % desc |
|
119 | desc = '%s (current tip)' % desc | |
120 | branches.append((n, desc)) |
|
120 | branches.append((n, desc)) | |
121 | if rev == branchrev: |
|
121 | if rev == branchrev: | |
122 | selected = n |
|
122 | selected = n | |
123 | if branch == abranch: |
|
123 | if branch == abranch: | |
124 | if not rev: |
|
124 | if not rev: | |
125 | selected = n |
|
125 | selected = n | |
126 | branch = None |
|
126 | branch = None | |
127 | if branch: # branch not in list - it is probably closed |
|
127 | if branch: # branch not in list - it is probably closed | |
128 | branchrev = repo.closed_branches.get(branch) |
|
128 | branchrev = repo.closed_branches.get(branch) | |
129 | if branchrev: |
|
129 | if branchrev: | |
130 | n = 'branch:%s:%s' % (branch, branchrev) |
|
130 | n = 'branch:%s:%s' % (branch, branchrev) | |
131 | branches.append((n, _('%s (closed)') % branch)) |
|
131 | branches.append((n, _('%s (closed)') % branch)) | |
132 | selected = n |
|
132 | selected = n | |
133 | branch = None |
|
133 | branch = None | |
134 | if branch: |
|
134 | if branch: | |
135 | log.debug('branch %r not found in %s', branch, repo) |
|
135 | log.debug('branch %r not found in %s', branch, repo) | |
136 |
|
136 | |||
137 | bookmarks = [] |
|
137 | bookmarks = [] | |
138 | for bookmark, bookmarkrev in repo.bookmarks.iteritems(): |
|
138 | for bookmark, bookmarkrev in repo.bookmarks.iteritems(): | |
139 | n = 'book:%s:%s' % (bookmark, bookmarkrev) |
|
139 | n = 'book:%s:%s' % (bookmark, bookmarkrev) | |
140 | bookmarks.append((n, bookmark)) |
|
140 | bookmarks.append((n, bookmark)) | |
141 | if rev == bookmarkrev: |
|
141 | if rev == bookmarkrev: | |
142 | selected = n |
|
142 | selected = n | |
143 |
|
143 | |||
144 | tags = [] |
|
144 | tags = [] | |
145 | for tag, tagrev in repo.tags.iteritems(): |
|
145 | for tag, tagrev in repo.tags.iteritems(): | |
146 | if tag == 'tip': |
|
146 | if tag == 'tip': | |
147 | continue |
|
147 | continue | |
148 | n = 'tag:%s:%s' % (tag, tagrev) |
|
148 | n = 'tag:%s:%s' % (tag, tagrev) | |
149 | tags.append((n, tag)) |
|
149 | tags.append((n, tag)) | |
150 | # note: even if rev == tagrev, don't select the static tag - it must be chosen explicitly |
|
150 | # note: even if rev == tagrev, don't select the static tag - it must be chosen explicitly | |
151 |
|
151 | |||
152 | # prio 1: rev was selected as existing entry above |
|
152 | # prio 1: rev was selected as existing entry above | |
153 |
|
153 | |||
154 | # prio 2: create special entry for rev; rev _must_ be used |
|
154 | # prio 2: create special entry for rev; rev _must_ be used | |
155 | specials = [] |
|
155 | specials = [] | |
156 | if rev and selected is None: |
|
156 | if rev and selected is None: | |
157 | selected = 'rev:%s:%s' % (rev, rev) |
|
157 | selected = 'rev:%s:%s' % (rev, rev) | |
158 | specials = [(selected, '%s: %s' % (_("Changeset"), rev[:12]))] |
|
158 | specials = [(selected, '%s: %s' % (_("Changeset"), rev[:12]))] | |
159 |
|
159 | |||
160 | # prio 3: most recent peer branch |
|
160 | # prio 3: most recent peer branch | |
161 | if peers and not selected: |
|
161 | if peers and not selected: | |
162 | selected = peers[0][0] |
|
162 | selected = peers[0][0] | |
163 |
|
163 | |||
164 | # prio 4: tip revision |
|
164 | # prio 4: tip revision | |
165 | if not selected: |
|
165 | if not selected: | |
166 | if h.is_hg(repo): |
|
166 | if h.is_hg(repo): | |
167 | if tipbranch: |
|
167 | if tipbranch: | |
168 | selected = 'branch:%s:%s' % (tipbranch, tiprev) |
|
168 | selected = 'branch:%s:%s' % (tipbranch, tiprev) | |
169 | else: |
|
169 | else: | |
170 | selected = 'tag:null:' + repo.EMPTY_CHANGESET |
|
170 | selected = 'tag:null:' + repo.EMPTY_CHANGESET | |
171 | tags.append((selected, 'null')) |
|
171 | tags.append((selected, 'null')) | |
172 | else: |
|
172 | else: | |
173 | if 'master' in repo.branches: |
|
173 | if 'master' in repo.branches: | |
174 | selected = 'branch:master:%s' % repo.branches['master'] |
|
174 | selected = 'branch:master:%s' % repo.branches['master'] | |
175 | else: |
|
175 | else: | |
176 | k, v = list(repo.branches.items())[0] |
|
176 | k, v = list(repo.branches.items())[0] | |
177 | selected = 'branch:%s:%s' % (k, v) |
|
177 | selected = 'branch:%s:%s' % (k, v) | |
178 |
|
178 | |||
179 | groups = [(specials, _("Special")), |
|
179 | groups = [(specials, _("Special")), | |
180 | (peers, _("Peer branches")), |
|
180 | (peers, _("Peer branches")), | |
181 | (bookmarks, _("Bookmarks")), |
|
181 | (bookmarks, _("Bookmarks")), | |
182 | (branches, _("Branches")), |
|
182 | (branches, _("Branches")), | |
183 | (tags, _("Tags")), |
|
183 | (tags, _("Tags")), | |
184 | ] |
|
184 | ] | |
185 | return [g for g in groups if g[0]], selected |
|
185 | return [g for g in groups if g[0]], selected | |
186 |
|
186 | |||
187 | def _is_allowed_to_change_status(self, pull_request): |
|
187 | def _is_allowed_to_change_status(self, pull_request): | |
188 | if pull_request.is_closed(): |
|
188 | if pull_request.is_closed(): | |
189 | return False |
|
189 | return False | |
190 |
|
190 | |||
191 | owner = request.authuser.user_id == pull_request.owner_id |
|
191 | owner = request.authuser.user_id == pull_request.owner_id | |
192 | reviewer = PullRequestReviewer.query() \ |
|
192 | reviewer = PullRequestReviewer.query() \ | |
193 | .filter(PullRequestReviewer.pull_request == pull_request) \ |
|
193 | .filter(PullRequestReviewer.pull_request == pull_request) \ | |
194 | .filter(PullRequestReviewer.user_id == request.authuser.user_id) \ |
|
194 | .filter(PullRequestReviewer.user_id == request.authuser.user_id) \ | |
195 | .count() != 0 |
|
195 | .count() != 0 | |
196 |
|
196 | |||
197 | return request.authuser.admin or owner or reviewer |
|
197 | return request.authuser.admin or owner or reviewer | |
198 |
|
198 | |||
199 | @LoginRequired(allow_default_user=True) |
|
199 | @LoginRequired(allow_default_user=True) | |
200 | @HasRepoPermissionLevelDecorator('read') |
|
200 | @HasRepoPermissionLevelDecorator('read') | |
201 | def show_all(self, repo_name): |
|
201 | def show_all(self, repo_name): | |
202 | c.from_ = request.GET.get('from_') or '' |
|
202 | c.from_ = request.GET.get('from_') or '' | |
203 | c.closed = request.GET.get('closed') or '' |
|
203 | c.closed = request.GET.get('closed') or '' | |
204 | url_params = {} |
|
204 | url_params = {} | |
205 | if c.from_: |
|
205 | if c.from_: | |
206 | url_params['from_'] = 1 |
|
206 | url_params['from_'] = 1 | |
207 | if c.closed: |
|
207 | if c.closed: | |
208 | url_params['closed'] = 1 |
|
208 | url_params['closed'] = 1 | |
209 | p = safe_int(request.GET.get('page'), 1) |
|
209 | p = safe_int(request.GET.get('page'), 1) | |
210 |
|
210 | |||
211 | q = PullRequest.query(include_closed=c.closed, sorted=True) |
|
211 | q = PullRequest.query(include_closed=c.closed, sorted=True) | |
212 | if c.from_: |
|
212 | if c.from_: | |
213 | q = q.filter_by(org_repo=c.db_repo) |
|
213 | q = q.filter_by(org_repo=c.db_repo) | |
214 | else: |
|
214 | else: | |
215 | q = q.filter_by(other_repo=c.db_repo) |
|
215 | q = q.filter_by(other_repo=c.db_repo) | |
216 | c.pull_requests = q.all() |
|
216 | c.pull_requests = q.all() | |
217 |
|
217 | |||
218 | c.pullrequests_pager = Page(c.pull_requests, page=p, items_per_page=100, **url_params) |
|
218 | c.pullrequests_pager = Page(c.pull_requests, page=p, items_per_page=100, **url_params) | |
219 |
|
219 | |||
220 | return render('/pullrequests/pullrequest_show_all.html') |
|
220 | return render('/pullrequests/pullrequest_show_all.html') | |
221 |
|
221 | |||
222 | @LoginRequired() |
|
222 | @LoginRequired() | |
223 | def show_my(self): |
|
223 | def show_my(self): | |
224 | c.closed = request.GET.get('closed') or '' |
|
224 | c.closed = request.GET.get('closed') or '' | |
225 |
|
225 | |||
226 | c.my_pull_requests = PullRequest.query( |
|
226 | c.my_pull_requests = PullRequest.query( | |
227 | include_closed=c.closed, |
|
227 | include_closed=c.closed, | |
228 | sorted=True, |
|
228 | sorted=True, | |
229 | ).filter_by(owner_id=request.authuser.user_id).all() |
|
229 | ).filter_by(owner_id=request.authuser.user_id).all() | |
230 |
|
230 | |||
231 | c.participate_in_pull_requests = [] |
|
231 | c.participate_in_pull_requests = [] | |
232 | c.participate_in_pull_requests_todo = [] |
|
232 | c.participate_in_pull_requests_todo = [] | |
233 | done_status = set([ChangesetStatus.STATUS_APPROVED, ChangesetStatus.STATUS_REJECTED]) |
|
233 | done_status = set([ChangesetStatus.STATUS_APPROVED, ChangesetStatus.STATUS_REJECTED]) | |
234 | for pr in PullRequest.query( |
|
234 | for pr in PullRequest.query( | |
235 | include_closed=c.closed, |
|
235 | include_closed=c.closed, | |
236 | reviewer_id=request.authuser.user_id, |
|
236 | reviewer_id=request.authuser.user_id, | |
237 | sorted=True, |
|
237 | sorted=True, | |
238 | ): |
|
238 | ): | |
239 | status = pr.user_review_status(request.authuser.user_id) # very inefficient!!! |
|
239 | status = pr.user_review_status(request.authuser.user_id) # very inefficient!!! | |
240 | if status in done_status: |
|
240 | if status in done_status: | |
241 | c.participate_in_pull_requests.append(pr) |
|
241 | c.participate_in_pull_requests.append(pr) | |
242 | else: |
|
242 | else: | |
243 | c.participate_in_pull_requests_todo.append(pr) |
|
243 | c.participate_in_pull_requests_todo.append(pr) | |
244 |
|
244 | |||
245 | return render('/pullrequests/pullrequest_show_my.html') |
|
245 | return render('/pullrequests/pullrequest_show_my.html') | |
246 |
|
246 | |||
247 | @LoginRequired() |
|
247 | @LoginRequired() | |
248 | @HasRepoPermissionLevelDecorator('read') |
|
248 | @HasRepoPermissionLevelDecorator('read') | |
249 | def index(self): |
|
249 | def index(self): | |
250 | org_repo = c.db_repo |
|
250 | org_repo = c.db_repo | |
251 | org_scm_instance = org_repo.scm_instance |
|
251 | org_scm_instance = org_repo.scm_instance | |
252 | try: |
|
252 | try: | |
253 | org_scm_instance.get_changeset() |
|
253 | org_scm_instance.get_changeset() | |
254 | except EmptyRepositoryError as e: |
|
254 | except EmptyRepositoryError as e: | |
255 | h.flash(_('There are no changesets yet'), |
|
255 | h.flash(_('There are no changesets yet'), | |
256 | category='warning') |
|
256 | category='warning') | |
257 | raise HTTPFound(location=url('summary_home', repo_name=org_repo.repo_name)) |
|
257 | raise HTTPFound(location=url('summary_home', repo_name=org_repo.repo_name)) | |
258 |
|
258 | |||
259 | org_rev = request.GET.get('rev_end') |
|
259 | org_rev = request.GET.get('rev_end') | |
260 | # rev_start is not directly useful - its parent could however be used |
|
260 | # rev_start is not directly useful - its parent could however be used | |
261 | # as default for other and thus give a simple compare view |
|
261 | # as default for other and thus give a simple compare view | |
262 | rev_start = request.GET.get('rev_start') |
|
262 | rev_start = request.GET.get('rev_start') | |
263 | other_rev = None |
|
263 | other_rev = None | |
264 | if rev_start: |
|
264 | if rev_start: | |
265 | starters = org_repo.get_changeset(rev_start).parents |
|
265 | starters = org_repo.get_changeset(rev_start).parents | |
266 | if starters: |
|
266 | if starters: | |
267 | other_rev = starters[0].raw_id |
|
267 | other_rev = starters[0].raw_id | |
268 | else: |
|
268 | else: | |
269 | other_rev = org_repo.scm_instance.EMPTY_CHANGESET |
|
269 | other_rev = org_repo.scm_instance.EMPTY_CHANGESET | |
270 | branch = request.GET.get('branch') |
|
270 | branch = request.GET.get('branch') | |
271 |
|
271 | |||
272 | c.cs_repos = [(org_repo.repo_name, org_repo.repo_name)] |
|
272 | c.cs_repos = [(org_repo.repo_name, org_repo.repo_name)] | |
273 | c.default_cs_repo = org_repo.repo_name |
|
273 | c.default_cs_repo = org_repo.repo_name | |
274 | c.cs_refs, c.default_cs_ref = self._get_repo_refs(org_scm_instance, rev=org_rev, branch=branch) |
|
274 | c.cs_refs, c.default_cs_ref = self._get_repo_refs(org_scm_instance, rev=org_rev, branch=branch) | |
275 |
|
275 | |||
276 | default_cs_ref_type, default_cs_branch, default_cs_rev = c.default_cs_ref.split(':') |
|
276 | default_cs_ref_type, default_cs_branch, default_cs_rev = c.default_cs_ref.split(':') | |
277 | if default_cs_ref_type != 'branch': |
|
277 | if default_cs_ref_type != 'branch': | |
278 | default_cs_branch = org_repo.get_changeset(default_cs_rev).branch |
|
278 | default_cs_branch = org_repo.get_changeset(default_cs_rev).branch | |
279 |
|
279 | |||
280 | # add org repo to other so we can open pull request against peer branches on itself |
|
280 | # add org repo to other so we can open pull request against peer branches on itself | |
281 | c.a_repos = [(org_repo.repo_name, '%s (self)' % org_repo.repo_name)] |
|
281 | c.a_repos = [(org_repo.repo_name, '%s (self)' % org_repo.repo_name)] | |
282 |
|
282 | |||
283 | if org_repo.parent: |
|
283 | if org_repo.parent: | |
284 | # add parent of this fork also and select it. |
|
284 | # add parent of this fork also and select it. | |
285 | # use the same branch on destination as on source, if available. |
|
285 | # use the same branch on destination as on source, if available. | |
286 | c.a_repos.append((org_repo.parent.repo_name, '%s (parent)' % org_repo.parent.repo_name)) |
|
286 | c.a_repos.append((org_repo.parent.repo_name, '%s (parent)' % org_repo.parent.repo_name)) | |
287 | c.a_repo = org_repo.parent |
|
287 | c.a_repo = org_repo.parent | |
288 | c.a_refs, c.default_a_ref = self._get_repo_refs( |
|
288 | c.a_refs, c.default_a_ref = self._get_repo_refs( | |
289 | org_repo.parent.scm_instance, branch=default_cs_branch, rev=other_rev) |
|
289 | org_repo.parent.scm_instance, branch=default_cs_branch, rev=other_rev) | |
290 |
|
290 | |||
291 | else: |
|
291 | else: | |
292 | c.a_repo = org_repo |
|
292 | c.a_repo = org_repo | |
293 | c.a_refs, c.default_a_ref = self._get_repo_refs(org_scm_instance, rev=other_rev) |
|
293 | c.a_refs, c.default_a_ref = self._get_repo_refs(org_scm_instance, rev=other_rev) | |
294 |
|
294 | |||
295 | # gather forks and add to this list ... even though it is rare to |
|
295 | # gather forks and add to this list ... even though it is rare to | |
296 | # request forks to pull from their parent |
|
296 | # request forks to pull from their parent | |
297 | for fork in org_repo.forks: |
|
297 | for fork in org_repo.forks: | |
298 | c.a_repos.append((fork.repo_name, fork.repo_name)) |
|
298 | c.a_repos.append((fork.repo_name, fork.repo_name)) | |
299 |
|
299 | |||
300 | return render('/pullrequests/pullrequest.html') |
|
300 | return render('/pullrequests/pullrequest.html') | |
301 |
|
301 | |||
302 | @LoginRequired() |
|
302 | @LoginRequired() | |
303 | @HasRepoPermissionLevelDecorator('read') |
|
303 | @HasRepoPermissionLevelDecorator('read') | |
304 | @jsonify |
|
304 | @jsonify | |
305 | def repo_info(self, repo_name): |
|
305 | def repo_info(self, repo_name): | |
306 | repo = c.db_repo |
|
306 | repo = c.db_repo | |
307 | refs, selected_ref = self._get_repo_refs(repo.scm_instance) |
|
307 | refs, selected_ref = self._get_repo_refs(repo.scm_instance) | |
308 | return { |
|
308 | return { | |
309 | 'description': repo.description.split('\n', 1)[0], |
|
309 | 'description': repo.description.split('\n', 1)[0], | |
310 | 'selected_ref': selected_ref, |
|
310 | 'selected_ref': selected_ref, | |
311 | 'refs': refs, |
|
311 | 'refs': refs, | |
312 | } |
|
312 | } | |
313 |
|
313 | |||
314 | @LoginRequired() |
|
314 | @LoginRequired() | |
315 | @HasRepoPermissionLevelDecorator('read') |
|
315 | @HasRepoPermissionLevelDecorator('read') | |
316 | def create(self, repo_name): |
|
316 | def create(self, repo_name): | |
317 | repo = c.db_repo |
|
317 | repo = c.db_repo | |
318 | try: |
|
318 | try: | |
319 | _form = PullRequestForm(repo.repo_id)().to_python(request.POST) |
|
319 | _form = PullRequestForm(repo.repo_id)().to_python(request.POST) | |
320 | except formencode.Invalid as errors: |
|
320 | except formencode.Invalid as errors: | |
321 | log.error(traceback.format_exc()) |
|
321 | log.error(traceback.format_exc()) | |
322 | log.error(str(errors)) |
|
322 | log.error(str(errors)) | |
323 | msg = _('Error creating pull request: %s') % errors.msg |
|
323 | msg = _('Error creating pull request: %s') % errors.msg | |
324 | h.flash(msg, 'error') |
|
324 | h.flash(msg, 'error') | |
325 | raise HTTPBadRequest |
|
325 | raise HTTPBadRequest | |
326 |
|
326 | |||
327 | # heads up: org and other might seem backward here ... |
|
327 | # heads up: org and other might seem backward here ... | |
328 | org_ref = _form['org_ref'] # will have merge_rev as rev but symbolic name |
|
328 | org_ref = _form['org_ref'] # will have merge_rev as rev but symbolic name | |
329 | org_repo = Repository.guess_instance(_form['org_repo']) |
|
329 | org_repo = Repository.guess_instance(_form['org_repo']) | |
330 |
|
330 | |||
331 | other_ref = _form['other_ref'] # will have symbolic name and head revision |
|
331 | other_ref = _form['other_ref'] # will have symbolic name and head revision | |
332 | other_repo = Repository.guess_instance(_form['other_repo']) |
|
332 | other_repo = Repository.guess_instance(_form['other_repo']) | |
333 |
|
333 | |||
334 | reviewers = [] |
|
334 | reviewers = [] | |
335 |
|
335 | |||
336 | title = _form['pullrequest_title'] |
|
336 | title = _form['pullrequest_title'] | |
337 | description = _form['pullrequest_desc'].strip() |
|
337 | description = _form['pullrequest_desc'].strip() | |
338 | owner = User.get(request.authuser.user_id) |
|
338 | owner = User.get(request.authuser.user_id) | |
339 |
|
339 | |||
340 | try: |
|
340 | try: | |
341 | cmd = CreatePullRequestAction(org_repo, other_repo, org_ref, other_ref, title, description, owner, reviewers) |
|
341 | cmd = CreatePullRequestAction(org_repo, other_repo, org_ref, other_ref, title, description, owner, reviewers) | |
342 | except CreatePullRequestAction.ValidationError as e: |
|
342 | except CreatePullRequestAction.ValidationError as e: | |
343 | h.flash(str(e), category='error', logf=log.error) |
|
343 | h.flash(str(e), category='error', logf=log.error) | |
344 | raise HTTPNotFound |
|
344 | raise HTTPNotFound | |
345 |
|
345 | |||
346 | try: |
|
346 | try: | |
347 | pull_request = cmd.execute() |
|
347 | pull_request = cmd.execute() | |
348 | Session().commit() |
|
348 | Session().commit() | |
349 | except Exception: |
|
349 | except Exception: | |
350 | h.flash(_('Error occurred while creating pull request'), |
|
350 | h.flash(_('Error occurred while creating pull request'), | |
351 | category='error') |
|
351 | category='error') | |
352 | log.error(traceback.format_exc()) |
|
352 | log.error(traceback.format_exc()) | |
353 | raise HTTPFound(location=url('pullrequest_home', repo_name=repo_name)) |
|
353 | raise HTTPFound(location=url('pullrequest_home', repo_name=repo_name)) | |
354 |
|
354 | |||
355 | h.flash(_('Successfully opened new pull request'), |
|
355 | h.flash(_('Successfully opened new pull request'), | |
356 | category='success') |
|
356 | category='success') | |
357 | raise HTTPFound(location=pull_request.url()) |
|
357 | raise HTTPFound(location=pull_request.url()) | |
358 |
|
358 | |||
359 | def create_new_iteration(self, old_pull_request, new_rev, title, description, reviewers): |
|
359 | def create_new_iteration(self, old_pull_request, new_rev, title, description, reviewers): | |
360 | owner = User.get(request.authuser.user_id) |
|
360 | owner = User.get(request.authuser.user_id) | |
361 | new_org_rev = self._get_ref_rev(old_pull_request.org_repo, 'rev', new_rev) |
|
361 | new_org_rev = self._get_ref_rev(old_pull_request.org_repo, 'rev', new_rev) | |
362 | new_other_rev = self._get_ref_rev(old_pull_request.other_repo, old_pull_request.other_ref_parts[0], old_pull_request.other_ref_parts[1]) |
|
362 | new_other_rev = self._get_ref_rev(old_pull_request.other_repo, old_pull_request.other_ref_parts[0], old_pull_request.other_ref_parts[1]) | |
363 | try: |
|
363 | try: | |
364 | cmd = CreatePullRequestIterationAction(old_pull_request, new_org_rev, new_other_rev, title, description, owner, reviewers) |
|
364 | cmd = CreatePullRequestIterationAction(old_pull_request, new_org_rev, new_other_rev, title, description, owner, reviewers) | |
365 | except CreatePullRequestAction.ValidationError as e: |
|
365 | except CreatePullRequestAction.ValidationError as e: | |
366 | h.flash(str(e), category='error', logf=log.error) |
|
366 | h.flash(str(e), category='error', logf=log.error) | |
367 | raise HTTPNotFound |
|
367 | raise HTTPNotFound | |
368 |
|
368 | |||
369 | try: |
|
369 | try: | |
370 | pull_request = cmd.execute() |
|
370 | pull_request = cmd.execute() | |
371 | Session().commit() |
|
371 | Session().commit() | |
372 | except Exception: |
|
372 | except Exception: | |
373 | h.flash(_('Error occurred while creating pull request'), |
|
373 | h.flash(_('Error occurred while creating pull request'), | |
374 | category='error') |
|
374 | category='error') | |
375 | log.error(traceback.format_exc()) |
|
375 | log.error(traceback.format_exc()) | |
376 | raise HTTPFound(location=old_pull_request.url()) |
|
376 | raise HTTPFound(location=old_pull_request.url()) | |
377 |
|
377 | |||
378 | h.flash(_('New pull request iteration created'), |
|
378 | h.flash(_('New pull request iteration created'), | |
379 | category='success') |
|
379 | category='success') | |
380 | raise HTTPFound(location=pull_request.url()) |
|
380 | raise HTTPFound(location=pull_request.url()) | |
381 |
|
381 | |||
382 | # pullrequest_post for PR editing |
|
382 | # pullrequest_post for PR editing | |
383 | @LoginRequired() |
|
383 | @LoginRequired() | |
384 | @HasRepoPermissionLevelDecorator('read') |
|
384 | @HasRepoPermissionLevelDecorator('read') | |
385 | def post(self, repo_name, pull_request_id): |
|
385 | def post(self, repo_name, pull_request_id): | |
386 | pull_request = PullRequest.get_or_404(pull_request_id) |
|
386 | pull_request = PullRequest.get_or_404(pull_request_id) | |
387 | if pull_request.is_closed(): |
|
387 | if pull_request.is_closed(): | |
388 | raise HTTPForbidden() |
|
388 | raise HTTPForbidden() | |
389 | assert pull_request.other_repo.repo_name == repo_name |
|
389 | assert pull_request.other_repo.repo_name == repo_name | |
390 | # only owner or admin can update it |
|
390 | # only owner or admin can update it | |
391 | owner = pull_request.owner_id == request.authuser.user_id |
|
391 | owner = pull_request.owner_id == request.authuser.user_id | |
392 | repo_admin = h.HasRepoPermissionLevel('admin')(c.repo_name) |
|
392 | repo_admin = h.HasRepoPermissionLevel('admin')(c.repo_name) | |
393 | if not (h.HasPermissionAny('hg.admin')() or repo_admin or owner): |
|
393 | if not (h.HasPermissionAny('hg.admin')() or repo_admin or owner): | |
394 | raise HTTPForbidden() |
|
394 | raise HTTPForbidden() | |
395 |
|
395 | |||
396 | _form = PullRequestPostForm()().to_python(request.POST) |
|
396 | _form = PullRequestPostForm()().to_python(request.POST) | |
397 |
|
397 | |||
398 | cur_reviewers = set(pull_request.get_reviewer_users()) |
|
398 | cur_reviewers = set(pull_request.get_reviewer_users()) | |
399 | new_reviewers = set(_get_reviewer(s) for s in _form['review_members']) |
|
399 | new_reviewers = set(_get_reviewer(s) for s in _form['review_members']) | |
400 | old_reviewers = set(_get_reviewer(s) for s in _form['org_review_members']) |
|
400 | old_reviewers = set(_get_reviewer(s) for s in _form['org_review_members']) | |
401 |
|
401 | |||
402 | other_added = cur_reviewers - old_reviewers |
|
402 | other_added = cur_reviewers - old_reviewers | |
403 | other_removed = old_reviewers - cur_reviewers |
|
403 | other_removed = old_reviewers - cur_reviewers | |
404 |
|
404 | |||
405 | if other_added: |
|
405 | if other_added: | |
406 | h.flash(_('Meanwhile, the following reviewers have been added: %s') % |
|
406 | h.flash(_('Meanwhile, the following reviewers have been added: %s') % | |
407 | (', '.join(u.username for u in other_added)), |
|
407 | (', '.join(u.username for u in other_added)), | |
408 | category='warning') |
|
408 | category='warning') | |
409 | if other_removed: |
|
409 | if other_removed: | |
410 | h.flash(_('Meanwhile, the following reviewers have been removed: %s') % |
|
410 | h.flash(_('Meanwhile, the following reviewers have been removed: %s') % | |
411 | (', '.join(u.username for u in other_removed)), |
|
411 | (', '.join(u.username for u in other_removed)), | |
412 | category='warning') |
|
412 | category='warning') | |
413 |
|
413 | |||
414 | if _form['updaterev']: |
|
414 | if _form['updaterev']: | |
415 | return self.create_new_iteration(pull_request, |
|
415 | return self.create_new_iteration(pull_request, | |
416 | _form['updaterev'], |
|
416 | _form['updaterev'], | |
417 | _form['pullrequest_title'], |
|
417 | _form['pullrequest_title'], | |
418 | _form['pullrequest_desc'], |
|
418 | _form['pullrequest_desc'], | |
419 | new_reviewers) |
|
419 | new_reviewers) | |
420 |
|
420 | |||
421 | added_reviewers = new_reviewers - old_reviewers - cur_reviewers |
|
421 | added_reviewers = new_reviewers - old_reviewers - cur_reviewers | |
422 | removed_reviewers = (old_reviewers - new_reviewers) & cur_reviewers |
|
422 | removed_reviewers = (old_reviewers - new_reviewers) & cur_reviewers | |
423 |
|
423 | |||
424 | old_description = pull_request.description |
|
424 | old_description = pull_request.description | |
425 | pull_request.title = _form['pullrequest_title'] |
|
425 | pull_request.title = _form['pullrequest_title'] | |
426 | pull_request.description = _form['pullrequest_desc'].strip() or _('No description') |
|
426 | pull_request.description = _form['pullrequest_desc'].strip() or _('No description') | |
427 | pull_request.owner = User.get_by_username(_form['owner']) |
|
427 | pull_request.owner = User.get_by_username(_form['owner']) | |
428 | user = User.get(request.authuser.user_id) |
|
428 | user = User.get(request.authuser.user_id) | |
429 |
|
429 | |||
430 | PullRequestModel().mention_from_description(user, pull_request, old_description) |
|
430 | PullRequestModel().mention_from_description(user, pull_request, old_description) | |
431 | PullRequestModel().add_reviewers(user, pull_request, added_reviewers) |
|
431 | PullRequestModel().add_reviewers(user, pull_request, added_reviewers) | |
432 | PullRequestModel().remove_reviewers(user, pull_request, removed_reviewers) |
|
432 | PullRequestModel().remove_reviewers(user, pull_request, removed_reviewers) | |
433 |
|
433 | |||
434 | Session().commit() |
|
434 | Session().commit() | |
435 | h.flash(_('Pull request updated'), category='success') |
|
435 | h.flash(_('Pull request updated'), category='success') | |
436 |
|
436 | |||
437 | raise HTTPFound(location=pull_request.url()) |
|
437 | raise HTTPFound(location=pull_request.url()) | |
438 |
|
438 | |||
439 | @LoginRequired() |
|
439 | @LoginRequired() | |
440 | @HasRepoPermissionLevelDecorator('read') |
|
440 | @HasRepoPermissionLevelDecorator('read') | |
441 | @jsonify |
|
441 | @jsonify | |
442 | def delete(self, repo_name, pull_request_id): |
|
442 | def delete(self, repo_name, pull_request_id): | |
443 | pull_request = PullRequest.get_or_404(pull_request_id) |
|
443 | pull_request = PullRequest.get_or_404(pull_request_id) | |
444 | # only owner can delete it ! |
|
444 | # only owner can delete it ! | |
445 | if pull_request.owner_id == request.authuser.user_id: |
|
445 | if pull_request.owner_id == request.authuser.user_id: | |
446 | PullRequestModel().delete(pull_request) |
|
446 | PullRequestModel().delete(pull_request) | |
447 | Session().commit() |
|
447 | Session().commit() | |
448 | h.flash(_('Successfully deleted pull request'), |
|
448 | h.flash(_('Successfully deleted pull request'), | |
449 | category='success') |
|
449 | category='success') | |
450 | raise HTTPFound(location=url('my_pullrequests')) |
|
450 | raise HTTPFound(location=url('my_pullrequests')) | |
451 | raise HTTPForbidden() |
|
451 | raise HTTPForbidden() | |
452 |
|
452 | |||
453 | @LoginRequired(allow_default_user=True) |
|
453 | @LoginRequired(allow_default_user=True) | |
454 | @HasRepoPermissionLevelDecorator('read') |
|
454 | @HasRepoPermissionLevelDecorator('read') | |
455 | def show(self, repo_name, pull_request_id, extra=None): |
|
455 | def show(self, repo_name, pull_request_id, extra=None): | |
456 | c.pull_request = PullRequest.get_or_404(pull_request_id) |
|
456 | c.pull_request = PullRequest.get_or_404(pull_request_id) | |
457 | c.allowed_to_change_status = self._is_allowed_to_change_status(c.pull_request) |
|
457 | c.allowed_to_change_status = self._is_allowed_to_change_status(c.pull_request) | |
458 | cc_model = ChangesetCommentsModel() |
|
458 | cc_model = ChangesetCommentsModel() | |
459 | cs_model = ChangesetStatusModel() |
|
459 | cs_model = ChangesetStatusModel() | |
460 |
|
460 | |||
461 | # pull_requests repo_name we opened it against |
|
461 | # pull_requests repo_name we opened it against | |
462 | # ie. other_repo must match |
|
462 | # ie. other_repo must match | |
463 | if repo_name != c.pull_request.other_repo.repo_name: |
|
463 | if repo_name != c.pull_request.other_repo.repo_name: | |
464 | raise HTTPNotFound |
|
464 | raise HTTPNotFound | |
465 |
|
465 | |||
466 | # load compare data into template context |
|
466 | # load compare data into template context | |
467 | c.cs_repo = c.pull_request.org_repo |
|
467 | c.cs_repo = c.pull_request.org_repo | |
468 | (c.cs_ref_type, |
|
468 | (c.cs_ref_type, | |
469 | c.cs_ref_name, |
|
469 | c.cs_ref_name, | |
470 | c.cs_rev) = c.pull_request.org_ref.split(':') |
|
470 | c.cs_rev) = c.pull_request.org_ref.split(':') | |
471 |
|
471 | |||
472 | c.a_repo = c.pull_request.other_repo |
|
472 | c.a_repo = c.pull_request.other_repo | |
473 | (c.a_ref_type, |
|
473 | (c.a_ref_type, | |
474 | c.a_ref_name, |
|
474 | c.a_ref_name, | |
475 | c.a_rev) = c.pull_request.other_ref.split(':') # a_rev is ancestor |
|
475 | c.a_rev) = c.pull_request.other_ref.split(':') # a_rev is ancestor | |
476 |
|
476 | |||
477 | org_scm_instance = c.cs_repo.scm_instance # property with expensive cache invalidation check!!! |
|
477 | org_scm_instance = c.cs_repo.scm_instance # property with expensive cache invalidation check!!! | |
478 | try: |
|
478 | try: | |
479 | c.cs_ranges = [] |
|
479 | c.cs_ranges = [] | |
480 | for x in c.pull_request.revisions: |
|
480 | for x in c.pull_request.revisions: | |
481 | c.cs_ranges.append(org_scm_instance.get_changeset(x)) |
|
481 | c.cs_ranges.append(org_scm_instance.get_changeset(x)) | |
482 | except ChangesetDoesNotExistError: |
|
482 | except ChangesetDoesNotExistError: | |
483 | c.cs_ranges = [] |
|
483 | c.cs_ranges = [] | |
484 | h.flash(_('Revision %s not found in %s') % (x, c.cs_repo.repo_name), |
|
484 | h.flash(_('Revision %s not found in %s') % (x, c.cs_repo.repo_name), | |
485 | 'error') |
|
485 | 'error') | |
486 | c.cs_ranges_org = None # not stored and not important and moving target - could be calculated ... |
|
486 | c.cs_ranges_org = None # not stored and not important and moving target - could be calculated ... | |
487 | revs = [ctx.revision for ctx in reversed(c.cs_ranges)] |
|
487 | revs = [ctx.revision for ctx in reversed(c.cs_ranges)] | |
488 | c.jsdata = graph_data(org_scm_instance, revs) |
|
488 | c.jsdata = graph_data(org_scm_instance, revs) | |
489 |
|
489 | |||
490 | c.is_range = False |
|
490 | c.is_range = False | |
491 | try: |
|
491 | try: | |
492 | if c.a_ref_type == 'rev': # this looks like a free range where target is ancestor |
|
492 | if c.a_ref_type == 'rev': # this looks like a free range where target is ancestor | |
493 | cs_a = org_scm_instance.get_changeset(c.a_rev) |
|
493 | cs_a = org_scm_instance.get_changeset(c.a_rev) | |
494 | root_parents = c.cs_ranges[0].parents |
|
494 | root_parents = c.cs_ranges[0].parents | |
495 | c.is_range = cs_a in root_parents |
|
495 | c.is_range = cs_a in root_parents | |
496 | #c.merge_root = len(root_parents) > 1 # a range starting with a merge might deserve a warning |
|
496 | #c.merge_root = len(root_parents) > 1 # a range starting with a merge might deserve a warning | |
497 | except ChangesetDoesNotExistError: # probably because c.a_rev not found |
|
497 | except ChangesetDoesNotExistError: # probably because c.a_rev not found | |
498 | pass |
|
498 | pass | |
499 | except IndexError: # probably because c.cs_ranges is empty, probably because revisions are missing |
|
499 | except IndexError: # probably because c.cs_ranges is empty, probably because revisions are missing | |
500 | pass |
|
500 | pass | |
501 |
|
501 | |||
502 | avail_revs = set() |
|
502 | avail_revs = set() | |
503 | avail_show = [] |
|
503 | avail_show = [] | |
504 | c.cs_branch_name = c.cs_ref_name |
|
504 | c.cs_branch_name = c.cs_ref_name | |
505 | c.a_branch_name = None |
|
505 | c.a_branch_name = None | |
506 | other_scm_instance = c.a_repo.scm_instance |
|
506 | other_scm_instance = c.a_repo.scm_instance | |
507 | c.update_msg = "" |
|
507 | c.update_msg = "" | |
508 | c.update_msg_other = "" |
|
508 | c.update_msg_other = "" | |
509 | try: |
|
509 | try: | |
510 | if not c.cs_ranges: |
|
510 | if not c.cs_ranges: | |
511 | c.update_msg = _('Error: changesets not found when displaying pull request from %s.') % c.cs_rev |
|
511 | c.update_msg = _('Error: changesets not found when displaying pull request from %s.') % c.cs_rev | |
512 | elif org_scm_instance.alias == 'hg' and c.a_ref_name != 'ancestor': |
|
512 | elif org_scm_instance.alias == 'hg' and c.a_ref_name != 'ancestor': | |
513 | if c.cs_ref_type != 'branch': |
|
513 | if c.cs_ref_type != 'branch': | |
514 | c.cs_branch_name = org_scm_instance.get_changeset(c.cs_ref_name).branch # use ref_type ? |
|
514 | c.cs_branch_name = org_scm_instance.get_changeset(c.cs_ref_name).branch # use ref_type ? | |
515 | c.a_branch_name = c.a_ref_name |
|
515 | c.a_branch_name = c.a_ref_name | |
516 | if c.a_ref_type != 'branch': |
|
516 | if c.a_ref_type != 'branch': | |
517 | try: |
|
517 | try: | |
518 | c.a_branch_name = other_scm_instance.get_changeset(c.a_ref_name).branch # use ref_type ? |
|
518 | c.a_branch_name = other_scm_instance.get_changeset(c.a_ref_name).branch # use ref_type ? | |
519 | except EmptyRepositoryError: |
|
519 | except EmptyRepositoryError: | |
520 | c.a_branch_name = 'null' # not a branch name ... but close enough |
|
520 | c.a_branch_name = 'null' # not a branch name ... but close enough | |
521 | # candidates: descendants of old head that are on the right branch |
|
521 | # candidates: descendants of old head that are on the right branch | |
522 | # and not are the old head itself ... |
|
522 | # and not are the old head itself ... | |
523 | # and nothing at all if old head is a descendant of target ref name |
|
523 | # and nothing at all if old head is a descendant of target ref name | |
524 | if not c.is_range and other_scm_instance._repo.revs('present(%s)::&%s', c.cs_ranges[-1].raw_id, c.a_branch_name): |
|
524 | if not c.is_range and other_scm_instance._repo.revs('present(%s)::&%s', c.cs_ranges[-1].raw_id, c.a_branch_name): | |
525 | c.update_msg = _('This pull request has already been merged to %s.') % c.a_branch_name |
|
525 | c.update_msg = _('This pull request has already been merged to %s.') % c.a_branch_name | |
526 | elif c.pull_request.is_closed(): |
|
526 | elif c.pull_request.is_closed(): | |
527 | c.update_msg = _('This pull request has been closed and can not be updated.') |
|
527 | c.update_msg = _('This pull request has been closed and can not be updated.') | |
528 | else: # look for descendants of PR head on source branch in org repo |
|
528 | else: # look for descendants of PR head on source branch in org repo | |
529 | avail_revs = org_scm_instance._repo.revs('%s:: & branch(%s)', |
|
529 | avail_revs = org_scm_instance._repo.revs('%s:: & branch(%s)', | |
530 | revs[0], c.cs_branch_name) |
|
530 | revs[0], c.cs_branch_name) | |
531 | if len(avail_revs) > 1: # more than just revs[0] |
|
531 | if len(avail_revs) > 1: # more than just revs[0] | |
532 | # also show changesets that not are descendants but would be merged in |
|
532 | # also show changesets that not are descendants but would be merged in | |
533 | targethead = other_scm_instance.get_changeset(c.a_branch_name).raw_id |
|
533 | targethead = other_scm_instance.get_changeset(c.a_branch_name).raw_id | |
534 | if org_scm_instance.path != other_scm_instance.path: |
|
534 | if org_scm_instance.path != other_scm_instance.path: | |
535 | # Note: org_scm_instance.path must come first so all |
|
535 | # Note: org_scm_instance.path must come first so all | |
536 | # valid revision numbers are 100% org_scm compatible |
|
536 | # valid revision numbers are 100% org_scm compatible | |
537 | # - both for avail_revs and for revset results |
|
537 | # - both for avail_revs and for revset results | |
538 | hgrepo = unionrepo.makeunionrepository(org_scm_instance.baseui, |
|
538 | hgrepo = unionrepo.makeunionrepository(org_scm_instance.baseui, | |
539 | org_scm_instance.path, |
|
539 | org_scm_instance.path, | |
540 | other_scm_instance.path) |
|
540 | other_scm_instance.path) | |
541 | else: |
|
541 | else: | |
542 | hgrepo = org_scm_instance._repo |
|
542 | hgrepo = org_scm_instance._repo | |
543 | show = set(hgrepo.revs('::%ld & !::parents(%s) & !::%s', |
|
543 | show = set(hgrepo.revs('::%ld & !::parents(%s) & !::%s', | |
544 | avail_revs, revs[0], targethead)) |
|
544 | avail_revs, revs[0], targethead)) | |
545 | if show: |
|
545 | if show: | |
546 | c.update_msg = _('The following additional changes are available on %s:') % c.cs_branch_name |
|
546 | c.update_msg = _('The following additional changes are available on %s:') % c.cs_branch_name | |
547 | else: |
|
547 | else: | |
548 | c.update_msg = _('No additional changesets found for iterating on this pull request.') |
|
548 | c.update_msg = _('No additional changesets found for iterating on this pull request.') | |
549 | else: |
|
549 | else: | |
550 | show = set() |
|
550 | show = set() | |
551 | avail_revs = set() # drop revs[0] |
|
551 | avail_revs = set() # drop revs[0] | |
552 | c.update_msg = _('No additional changesets found for iterating on this pull request.') |
|
552 | c.update_msg = _('No additional changesets found for iterating on this pull request.') | |
553 |
|
553 | |||
554 | # TODO: handle branch heads that not are tip-most |
|
554 | # TODO: handle branch heads that not are tip-most | |
555 | brevs = org_scm_instance._repo.revs('%s - %ld - %s', c.cs_branch_name, avail_revs, revs[0]) |
|
555 | brevs = org_scm_instance._repo.revs('%s - %ld - %s', c.cs_branch_name, avail_revs, revs[0]) | |
556 | if brevs: |
|
556 | if brevs: | |
557 | # also show changesets that are on branch but neither ancestors nor descendants |
|
557 | # also show changesets that are on branch but neither ancestors nor descendants | |
558 | show.update(org_scm_instance._repo.revs('::%ld - ::%ld - ::%s', brevs, avail_revs, c.a_branch_name)) |
|
558 | show.update(org_scm_instance._repo.revs('::%ld - ::%ld - ::%s', brevs, avail_revs, c.a_branch_name)) | |
559 | show.add(revs[0]) # make sure graph shows this so we can see how they relate |
|
559 | show.add(revs[0]) # make sure graph shows this so we can see how they relate | |
560 | c.update_msg_other = _('Note: Branch %s has another head: %s.') % (c.cs_branch_name, |
|
560 | c.update_msg_other = _('Note: Branch %s has another head: %s.') % (c.cs_branch_name, | |
561 | h.short_id(org_scm_instance.get_changeset((max(brevs))).raw_id)) |
|
561 | h.short_id(org_scm_instance.get_changeset((max(brevs))).raw_id)) | |
562 |
|
562 | |||
563 | avail_show = sorted(show, reverse=True) |
|
563 | avail_show = sorted(show, reverse=True) | |
564 |
|
564 | |||
565 | elif org_scm_instance.alias == 'git': |
|
565 | elif org_scm_instance.alias == 'git': | |
566 | c.cs_repo.scm_instance.get_changeset(c.cs_rev) # check it exists - raise ChangesetDoesNotExistError if not |
|
566 | c.cs_repo.scm_instance.get_changeset(c.cs_rev) # check it exists - raise ChangesetDoesNotExistError if not | |
567 | c.update_msg = _("Git pull requests don't support iterating yet.") |
|
567 | c.update_msg = _("Git pull requests don't support iterating yet.") | |
568 | except ChangesetDoesNotExistError: |
|
568 | except ChangesetDoesNotExistError: | |
569 | c.update_msg = _('Error: some changesets not found when displaying pull request from %s.') % c.cs_rev |
|
569 | c.update_msg = _('Error: some changesets not found when displaying pull request from %s.') % c.cs_rev | |
570 |
|
570 | |||
571 | c.avail_revs = avail_revs |
|
571 | c.avail_revs = avail_revs | |
572 | c.avail_cs = [org_scm_instance.get_changeset(r) for r in avail_show] |
|
572 | c.avail_cs = [org_scm_instance.get_changeset(r) for r in avail_show] | |
573 | c.avail_jsdata = graph_data(org_scm_instance, avail_show) |
|
573 | c.avail_jsdata = graph_data(org_scm_instance, avail_show) | |
574 |
|
574 | |||
575 | raw_ids = [x.raw_id for x in c.cs_ranges] |
|
575 | raw_ids = [x.raw_id for x in c.cs_ranges] | |
576 | c.cs_comments = c.cs_repo.get_comments(raw_ids) |
|
576 | c.cs_comments = c.cs_repo.get_comments(raw_ids) | |
577 | c.cs_statuses = c.cs_repo.statuses(raw_ids) |
|
577 | c.cs_statuses = c.cs_repo.statuses(raw_ids) | |
578 |
|
578 | |||
579 | ignore_whitespace = request.GET.get('ignorews') == '1' |
|
579 | ignore_whitespace = request.GET.get('ignorews') == '1' | |
580 | line_context = safe_int(request.GET.get('context'), 3) |
|
580 | line_context = safe_int(request.GET.get('context'), 3) | |
581 | c.ignorews_url = _ignorews_url |
|
581 | c.ignorews_url = _ignorews_url | |
582 | c.context_url = _context_url |
|
582 | c.context_url = _context_url | |
583 | fulldiff = request.GET.get('fulldiff') |
|
583 | fulldiff = request.GET.get('fulldiff') | |
584 | diff_limit = None if fulldiff else self.cut_off_limit |
|
584 | diff_limit = None if fulldiff else self.cut_off_limit | |
585 |
|
585 | |||
586 | # we swap org/other ref since we run a simple diff on one repo |
|
586 | # we swap org/other ref since we run a simple diff on one repo | |
587 | log.debug('running diff between %s and %s in %s', |
|
587 | log.debug('running diff between %s and %s in %s', | |
588 | c.a_rev, c.cs_rev, org_scm_instance.path) |
|
588 | c.a_rev, c.cs_rev, org_scm_instance.path) | |
589 | try: |
|
589 | try: | |
590 | raw_diff = diffs.get_diff(org_scm_instance, rev1=safe_str(c.a_rev), rev2=safe_str(c.cs_rev), |
|
590 | raw_diff = diffs.get_diff(org_scm_instance, rev1=safe_str(c.a_rev), rev2=safe_str(c.cs_rev), | |
591 | ignore_whitespace=ignore_whitespace, context=line_context) |
|
591 | ignore_whitespace=ignore_whitespace, context=line_context) | |
592 | except ChangesetDoesNotExistError: |
|
592 | except ChangesetDoesNotExistError: | |
593 | raw_diff = _("The diff can't be shown - the PR revisions could not be found.") |
|
593 | raw_diff = _("The diff can't be shown - the PR revisions could not be found.") | |
594 |
diff_processor = diffs.DiffProcessor(raw_diff |
|
594 | diff_processor = diffs.DiffProcessor(raw_diff, diff_limit=diff_limit) | |
595 | c.limited_diff = diff_processor.limited_diff |
|
595 | c.limited_diff = diff_processor.limited_diff | |
596 | c.file_diff_data = [] |
|
596 | c.file_diff_data = [] | |
597 | c.lines_added = 0 |
|
597 | c.lines_added = 0 | |
598 | c.lines_deleted = 0 |
|
598 | c.lines_deleted = 0 | |
599 |
|
599 | |||
600 | for f in diff_processor.parsed: |
|
600 | for f in diff_processor.parsed: | |
601 | st = f['stats'] |
|
601 | st = f['stats'] | |
602 | c.lines_added += st['added'] |
|
602 | c.lines_added += st['added'] | |
603 | c.lines_deleted += st['deleted'] |
|
603 | c.lines_deleted += st['deleted'] | |
604 | filename = f['filename'] |
|
604 | filename = f['filename'] | |
605 | fid = h.FID('', filename) |
|
605 | fid = h.FID('', filename) | |
606 | html_diff = diffs.as_html(enable_comments=True, parsed_lines=[f]) |
|
606 | html_diff = diffs.as_html(enable_comments=True, parsed_lines=[f]) | |
607 | c.file_diff_data.append((fid, None, f['operation'], f['old_filename'], filename, html_diff, st)) |
|
607 | c.file_diff_data.append((fid, None, f['operation'], f['old_filename'], filename, html_diff, st)) | |
608 |
|
608 | |||
609 | # inline comments |
|
609 | # inline comments | |
610 | c.inline_cnt = 0 |
|
610 | c.inline_cnt = 0 | |
611 | c.inline_comments = cc_model.get_inline_comments( |
|
611 | c.inline_comments = cc_model.get_inline_comments( | |
612 | c.db_repo.repo_id, |
|
612 | c.db_repo.repo_id, | |
613 | pull_request=pull_request_id) |
|
613 | pull_request=pull_request_id) | |
614 | # count inline comments |
|
614 | # count inline comments | |
615 | for __, lines in c.inline_comments: |
|
615 | for __, lines in c.inline_comments: | |
616 | for comments in lines.values(): |
|
616 | for comments in lines.values(): | |
617 | c.inline_cnt += len(comments) |
|
617 | c.inline_cnt += len(comments) | |
618 | # comments |
|
618 | # comments | |
619 | c.comments = cc_model.get_comments(c.db_repo.repo_id, pull_request=pull_request_id) |
|
619 | c.comments = cc_model.get_comments(c.db_repo.repo_id, pull_request=pull_request_id) | |
620 |
|
620 | |||
621 | # (badly named) pull-request status calculation based on reviewer votes |
|
621 | # (badly named) pull-request status calculation based on reviewer votes | |
622 | (c.pull_request_reviewers, |
|
622 | (c.pull_request_reviewers, | |
623 | c.pull_request_pending_reviewers, |
|
623 | c.pull_request_pending_reviewers, | |
624 | c.current_voting_result, |
|
624 | c.current_voting_result, | |
625 | ) = cs_model.calculate_pull_request_result(c.pull_request) |
|
625 | ) = cs_model.calculate_pull_request_result(c.pull_request) | |
626 | c.changeset_statuses = ChangesetStatus.STATUSES |
|
626 | c.changeset_statuses = ChangesetStatus.STATUSES | |
627 |
|
627 | |||
628 | c.is_ajax_preview = False |
|
628 | c.is_ajax_preview = False | |
629 | c.ancestors = None # [c.a_rev] ... but that is shown in an other way |
|
629 | c.ancestors = None # [c.a_rev] ... but that is shown in an other way | |
630 | return render('/pullrequests/pullrequest_show.html') |
|
630 | return render('/pullrequests/pullrequest_show.html') | |
631 |
|
631 | |||
632 | @LoginRequired() |
|
632 | @LoginRequired() | |
633 | @HasRepoPermissionLevelDecorator('read') |
|
633 | @HasRepoPermissionLevelDecorator('read') | |
634 | @jsonify |
|
634 | @jsonify | |
635 | def comment(self, repo_name, pull_request_id): |
|
635 | def comment(self, repo_name, pull_request_id): | |
636 | pull_request = PullRequest.get_or_404(pull_request_id) |
|
636 | pull_request = PullRequest.get_or_404(pull_request_id) | |
637 | allowed_to_change_status = self._is_allowed_to_change_status(pull_request) |
|
637 | allowed_to_change_status = self._is_allowed_to_change_status(pull_request) | |
638 | return create_cs_pr_comment(repo_name, pull_request=pull_request, |
|
638 | return create_cs_pr_comment(repo_name, pull_request=pull_request, | |
639 | allowed_to_change_status=allowed_to_change_status) |
|
639 | allowed_to_change_status=allowed_to_change_status) | |
640 |
|
640 | |||
641 | @LoginRequired() |
|
641 | @LoginRequired() | |
642 | @HasRepoPermissionLevelDecorator('read') |
|
642 | @HasRepoPermissionLevelDecorator('read') | |
643 | @jsonify |
|
643 | @jsonify | |
644 | def delete_comment(self, repo_name, comment_id): |
|
644 | def delete_comment(self, repo_name, comment_id): | |
645 | return delete_cs_pr_comment(repo_name, comment_id) |
|
645 | return delete_cs_pr_comment(repo_name, comment_id) |
@@ -1,221 +1,221 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 | # This program is free software: you can redistribute it and/or modify |
|
2 | # This program is free software: you can redistribute it and/or modify | |
3 | # it under the terms of the GNU General Public License as published by |
|
3 | # it under the terms of the GNU General Public License as published by | |
4 | # the Free Software Foundation, either version 3 of the License, or |
|
4 | # the Free Software Foundation, either version 3 of the License, or | |
5 | # (at your option) any later version. |
|
5 | # (at your option) any later version. | |
6 | # |
|
6 | # | |
7 | # This program is distributed in the hope that it will be useful, |
|
7 | # This program is distributed in the hope that it will be useful, | |
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | # GNU General Public License for more details. |
|
10 | # GNU General Public License for more details. | |
11 | # |
|
11 | # | |
12 | # You should have received a copy of the GNU General Public License |
|
12 | # You should have received a copy of the GNU General Public License | |
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
14 | """ |
|
14 | """ | |
15 | kallithea.controllers.summary |
|
15 | kallithea.controllers.summary | |
16 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
|
16 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |
17 |
|
17 | |||
18 | Summary controller for Kallithea |
|
18 | Summary controller for Kallithea | |
19 |
|
19 | |||
20 | This file was forked by the Kallithea project in July 2014. |
|
20 | This file was forked by the Kallithea project in July 2014. | |
21 | Original author and date, and relevant copyright and licensing information is below: |
|
21 | Original author and date, and relevant copyright and licensing information is below: | |
22 | :created_on: Apr 18, 2010 |
|
22 | :created_on: Apr 18, 2010 | |
23 | :author: marcink |
|
23 | :author: marcink | |
24 | :copyright: (c) 2013 RhodeCode GmbH, and others. |
|
24 | :copyright: (c) 2013 RhodeCode GmbH, and others. | |
25 | :license: GPLv3, see LICENSE.md for more details. |
|
25 | :license: GPLv3, see LICENSE.md for more details. | |
26 | """ |
|
26 | """ | |
27 |
|
27 | |||
28 | import calendar |
|
28 | import calendar | |
29 | import itertools |
|
29 | import itertools | |
30 | import logging |
|
30 | import logging | |
31 | import traceback |
|
31 | import traceback | |
32 | from datetime import date, timedelta |
|
32 | from datetime import date, timedelta | |
33 | from time import mktime |
|
33 | from time import mktime | |
34 |
|
34 | |||
35 | from beaker.cache import cache_region |
|
35 | from beaker.cache import cache_region | |
36 | from tg import request |
|
36 | from tg import request | |
37 | from tg import tmpl_context as c |
|
37 | from tg import tmpl_context as c | |
38 | from tg.i18n import ugettext as _ |
|
38 | from tg.i18n import ugettext as _ | |
39 | from webob.exc import HTTPBadRequest |
|
39 | from webob.exc import HTTPBadRequest | |
40 |
|
40 | |||
41 | import kallithea.lib.helpers as h |
|
41 | import kallithea.lib.helpers as h | |
42 | from kallithea.config.conf import ALL_EXTS, ALL_READMES, LANGUAGES_EXTENSIONS_MAP |
|
42 | from kallithea.config.conf import ALL_EXTS, ALL_READMES, LANGUAGES_EXTENSIONS_MAP | |
43 | from kallithea.lib.auth import HasRepoPermissionLevelDecorator, LoginRequired |
|
43 | from kallithea.lib.auth import HasRepoPermissionLevelDecorator, LoginRequired | |
44 | from kallithea.lib.base import BaseRepoController, jsonify, render |
|
44 | from kallithea.lib.base import BaseRepoController, jsonify, render | |
45 | from kallithea.lib.celerylib.tasks import get_commits_stats |
|
45 | from kallithea.lib.celerylib.tasks import get_commits_stats | |
46 | from kallithea.lib.compat import json |
|
46 | from kallithea.lib.compat import json | |
47 | from kallithea.lib.markup_renderer import MarkupRenderer |
|
47 | from kallithea.lib.markup_renderer import MarkupRenderer | |
48 | from kallithea.lib.page import Page |
|
48 | from kallithea.lib.page import Page | |
49 | from kallithea.lib.utils2 import safe_int |
|
49 | from kallithea.lib.utils2 import safe_int, safe_unicode | |
50 | from kallithea.lib.vcs.backends.base import EmptyChangeset |
|
50 | from kallithea.lib.vcs.backends.base import EmptyChangeset | |
51 | from kallithea.lib.vcs.exceptions import ChangesetError, EmptyRepositoryError, NodeDoesNotExistError |
|
51 | from kallithea.lib.vcs.exceptions import ChangesetError, EmptyRepositoryError, NodeDoesNotExistError | |
52 | from kallithea.lib.vcs.nodes import FileNode |
|
52 | from kallithea.lib.vcs.nodes import FileNode | |
53 | from kallithea.model.db import Statistics |
|
53 | from kallithea.model.db import Statistics | |
54 |
|
54 | |||
55 |
|
55 | |||
56 | log = logging.getLogger(__name__) |
|
56 | log = logging.getLogger(__name__) | |
57 |
|
57 | |||
58 | README_FILES = [''.join([x[0][0], x[1][0]]) for x in |
|
58 | README_FILES = [''.join([x[0][0], x[1][0]]) for x in | |
59 | sorted(list(itertools.product(ALL_READMES, ALL_EXTS)), |
|
59 | sorted(list(itertools.product(ALL_READMES, ALL_EXTS)), | |
60 | key=lambda y:y[0][1] + y[1][1])] |
|
60 | key=lambda y:y[0][1] + y[1][1])] | |
61 |
|
61 | |||
62 |
|
62 | |||
63 | class SummaryController(BaseRepoController): |
|
63 | class SummaryController(BaseRepoController): | |
64 |
|
64 | |||
65 | def __get_readme_data(self, db_repo): |
|
65 | def __get_readme_data(self, db_repo): | |
66 | repo_name = db_repo.repo_name |
|
66 | repo_name = db_repo.repo_name | |
67 | log.debug('Looking for README file') |
|
67 | log.debug('Looking for README file') | |
68 |
|
68 | |||
69 | @cache_region('long_term', '_get_readme_from_cache') |
|
69 | @cache_region('long_term', '_get_readme_from_cache') | |
70 | def _get_readme_from_cache(*_cache_keys): # parameters are not really used - only as caching key |
|
70 | def _get_readme_from_cache(*_cache_keys): # parameters are not really used - only as caching key | |
71 | readme_data = None |
|
71 | readme_data = None | |
72 | readme_file = None |
|
72 | readme_file = None | |
73 | try: |
|
73 | try: | |
74 | # gets the landing revision! or tip if fails |
|
74 | # gets the landing revision! or tip if fails | |
75 | cs = db_repo.get_landing_changeset() |
|
75 | cs = db_repo.get_landing_changeset() | |
76 | if isinstance(cs, EmptyChangeset): |
|
76 | if isinstance(cs, EmptyChangeset): | |
77 | raise EmptyRepositoryError() |
|
77 | raise EmptyRepositoryError() | |
78 | renderer = MarkupRenderer() |
|
78 | renderer = MarkupRenderer() | |
79 | for f in README_FILES: |
|
79 | for f in README_FILES: | |
80 | try: |
|
80 | try: | |
81 | readme = cs.get_node(f) |
|
81 | readme = cs.get_node(f) | |
82 | if not isinstance(readme, FileNode): |
|
82 | if not isinstance(readme, FileNode): | |
83 | continue |
|
83 | continue | |
84 | readme_file = f |
|
84 | readme_file = f | |
85 | log.debug('Found README file `%s` rendering...', |
|
85 | log.debug('Found README file `%s` rendering...', | |
86 | readme_file) |
|
86 | readme_file) | |
87 | readme_data = renderer.render(readme.content, |
|
87 | readme_data = renderer.render(safe_unicode(readme.content), | |
88 | filename=f) |
|
88 | filename=f) | |
89 | break |
|
89 | break | |
90 | except NodeDoesNotExistError: |
|
90 | except NodeDoesNotExistError: | |
91 | continue |
|
91 | continue | |
92 | except ChangesetError: |
|
92 | except ChangesetError: | |
93 | log.error(traceback.format_exc()) |
|
93 | log.error(traceback.format_exc()) | |
94 | pass |
|
94 | pass | |
95 | except EmptyRepositoryError: |
|
95 | except EmptyRepositoryError: | |
96 | pass |
|
96 | pass | |
97 |
|
97 | |||
98 | return readme_data, readme_file |
|
98 | return readme_data, readme_file | |
99 |
|
99 | |||
100 | kind = 'README' |
|
100 | kind = 'README' | |
101 | return _get_readme_from_cache(repo_name, kind, c.db_repo.changeset_cache.get('raw_id')) |
|
101 | return _get_readme_from_cache(repo_name, kind, c.db_repo.changeset_cache.get('raw_id')) | |
102 |
|
102 | |||
103 | @LoginRequired(allow_default_user=True) |
|
103 | @LoginRequired(allow_default_user=True) | |
104 | @HasRepoPermissionLevelDecorator('read') |
|
104 | @HasRepoPermissionLevelDecorator('read') | |
105 | def index(self, repo_name): |
|
105 | def index(self, repo_name): | |
106 | p = safe_int(request.GET.get('page'), 1) |
|
106 | p = safe_int(request.GET.get('page'), 1) | |
107 | size = safe_int(request.GET.get('size'), 10) |
|
107 | size = safe_int(request.GET.get('size'), 10) | |
108 | try: |
|
108 | try: | |
109 | collection = c.db_repo_scm_instance.get_changesets(reverse=True) |
|
109 | collection = c.db_repo_scm_instance.get_changesets(reverse=True) | |
110 | except EmptyRepositoryError as e: |
|
110 | except EmptyRepositoryError as e: | |
111 | h.flash(unicode(e), category='warning') |
|
111 | h.flash(unicode(e), category='warning') | |
112 | collection = [] |
|
112 | collection = [] | |
113 | c.cs_pagination = Page(collection, page=p, items_per_page=size) |
|
113 | c.cs_pagination = Page(collection, page=p, items_per_page=size) | |
114 | page_revisions = [x.raw_id for x in list(c.cs_pagination)] |
|
114 | page_revisions = [x.raw_id for x in list(c.cs_pagination)] | |
115 | c.cs_comments = c.db_repo.get_comments(page_revisions) |
|
115 | c.cs_comments = c.db_repo.get_comments(page_revisions) | |
116 | c.cs_statuses = c.db_repo.statuses(page_revisions) |
|
116 | c.cs_statuses = c.db_repo.statuses(page_revisions) | |
117 |
|
117 | |||
118 | c.ssh_repo_url = None |
|
118 | c.ssh_repo_url = None | |
119 | if request.authuser.is_default_user: |
|
119 | if request.authuser.is_default_user: | |
120 | username = None |
|
120 | username = None | |
121 | else: |
|
121 | else: | |
122 | username = request.authuser.username |
|
122 | username = request.authuser.username | |
123 | if c.ssh_enabled: |
|
123 | if c.ssh_enabled: | |
124 | c.ssh_repo_url = c.db_repo.clone_url(clone_uri_tmpl=c.clone_ssh_tmpl) |
|
124 | c.ssh_repo_url = c.db_repo.clone_url(clone_uri_tmpl=c.clone_ssh_tmpl) | |
125 |
|
125 | |||
126 | c.clone_repo_url = c.db_repo.clone_url(clone_uri_tmpl=c.clone_uri_tmpl, with_id=False, username=username) |
|
126 | c.clone_repo_url = c.db_repo.clone_url(clone_uri_tmpl=c.clone_uri_tmpl, with_id=False, username=username) | |
127 | c.clone_repo_url_id = c.db_repo.clone_url(clone_uri_tmpl=c.clone_uri_tmpl, with_id=True, username=username) |
|
127 | c.clone_repo_url_id = c.db_repo.clone_url(clone_uri_tmpl=c.clone_uri_tmpl, with_id=True, username=username) | |
128 |
|
128 | |||
129 | if c.db_repo.enable_statistics: |
|
129 | if c.db_repo.enable_statistics: | |
130 | c.show_stats = True |
|
130 | c.show_stats = True | |
131 | else: |
|
131 | else: | |
132 | c.show_stats = False |
|
132 | c.show_stats = False | |
133 |
|
133 | |||
134 | stats = Statistics.query() \ |
|
134 | stats = Statistics.query() \ | |
135 | .filter(Statistics.repository == c.db_repo) \ |
|
135 | .filter(Statistics.repository == c.db_repo) \ | |
136 | .scalar() |
|
136 | .scalar() | |
137 |
|
137 | |||
138 | c.stats_percentage = 0 |
|
138 | c.stats_percentage = 0 | |
139 |
|
139 | |||
140 | if stats and stats.languages: |
|
140 | if stats and stats.languages: | |
141 | c.no_data = False is c.db_repo.enable_statistics |
|
141 | c.no_data = False is c.db_repo.enable_statistics | |
142 | lang_stats_d = json.loads(stats.languages) |
|
142 | lang_stats_d = json.loads(stats.languages) | |
143 |
|
143 | |||
144 | lang_stats = [(x, {"count": y, |
|
144 | lang_stats = [(x, {"count": y, | |
145 | "desc": LANGUAGES_EXTENSIONS_MAP.get(x, '?')}) |
|
145 | "desc": LANGUAGES_EXTENSIONS_MAP.get(x, '?')}) | |
146 | for x, y in lang_stats_d.items()] |
|
146 | for x, y in lang_stats_d.items()] | |
147 | lang_stats.sort(key=lambda k: (-k[1]['count'], k[0])) |
|
147 | lang_stats.sort(key=lambda k: (-k[1]['count'], k[0])) | |
148 |
|
148 | |||
149 | c.trending_languages = lang_stats[:10] |
|
149 | c.trending_languages = lang_stats[:10] | |
150 | else: |
|
150 | else: | |
151 | c.no_data = True |
|
151 | c.no_data = True | |
152 | c.trending_languages = [] |
|
152 | c.trending_languages = [] | |
153 |
|
153 | |||
154 | c.enable_downloads = c.db_repo.enable_downloads |
|
154 | c.enable_downloads = c.db_repo.enable_downloads | |
155 | c.readme_data, c.readme_file = \ |
|
155 | c.readme_data, c.readme_file = \ | |
156 | self.__get_readme_data(c.db_repo) |
|
156 | self.__get_readme_data(c.db_repo) | |
157 | return render('summary/summary.html') |
|
157 | return render('summary/summary.html') | |
158 |
|
158 | |||
159 | @LoginRequired() |
|
159 | @LoginRequired() | |
160 | @HasRepoPermissionLevelDecorator('read') |
|
160 | @HasRepoPermissionLevelDecorator('read') | |
161 | @jsonify |
|
161 | @jsonify | |
162 | def repo_size(self, repo_name): |
|
162 | def repo_size(self, repo_name): | |
163 | if request.is_xhr: |
|
163 | if request.is_xhr: | |
164 | return c.db_repo._repo_size() |
|
164 | return c.db_repo._repo_size() | |
165 | else: |
|
165 | else: | |
166 | raise HTTPBadRequest() |
|
166 | raise HTTPBadRequest() | |
167 |
|
167 | |||
168 | @LoginRequired(allow_default_user=True) |
|
168 | @LoginRequired(allow_default_user=True) | |
169 | @HasRepoPermissionLevelDecorator('read') |
|
169 | @HasRepoPermissionLevelDecorator('read') | |
170 | def statistics(self, repo_name): |
|
170 | def statistics(self, repo_name): | |
171 | if c.db_repo.enable_statistics: |
|
171 | if c.db_repo.enable_statistics: | |
172 | c.show_stats = True |
|
172 | c.show_stats = True | |
173 | c.no_data_msg = _('No data ready yet') |
|
173 | c.no_data_msg = _('No data ready yet') | |
174 | else: |
|
174 | else: | |
175 | c.show_stats = False |
|
175 | c.show_stats = False | |
176 | c.no_data_msg = _('Statistics are disabled for this repository') |
|
176 | c.no_data_msg = _('Statistics are disabled for this repository') | |
177 |
|
177 | |||
178 | td = date.today() + timedelta(days=1) |
|
178 | td = date.today() + timedelta(days=1) | |
179 | td_1m = td - timedelta(days=calendar.mdays[td.month]) |
|
179 | td_1m = td - timedelta(days=calendar.mdays[td.month]) | |
180 | td_1y = td - timedelta(days=365) |
|
180 | td_1y = td - timedelta(days=365) | |
181 |
|
181 | |||
182 | ts_min_m = mktime(td_1m.timetuple()) |
|
182 | ts_min_m = mktime(td_1m.timetuple()) | |
183 | ts_min_y = mktime(td_1y.timetuple()) |
|
183 | ts_min_y = mktime(td_1y.timetuple()) | |
184 | ts_max_y = mktime(td.timetuple()) |
|
184 | ts_max_y = mktime(td.timetuple()) | |
185 | c.ts_min = ts_min_m |
|
185 | c.ts_min = ts_min_m | |
186 | c.ts_max = ts_max_y |
|
186 | c.ts_max = ts_max_y | |
187 |
|
187 | |||
188 | stats = Statistics.query() \ |
|
188 | stats = Statistics.query() \ | |
189 | .filter(Statistics.repository == c.db_repo) \ |
|
189 | .filter(Statistics.repository == c.db_repo) \ | |
190 | .scalar() |
|
190 | .scalar() | |
191 | c.stats_percentage = 0 |
|
191 | c.stats_percentage = 0 | |
192 | if stats and stats.languages: |
|
192 | if stats and stats.languages: | |
193 | c.no_data = False is c.db_repo.enable_statistics |
|
193 | c.no_data = False is c.db_repo.enable_statistics | |
194 | lang_stats_d = json.loads(stats.languages) |
|
194 | lang_stats_d = json.loads(stats.languages) | |
195 | c.commit_data = json.loads(stats.commit_activity) |
|
195 | c.commit_data = json.loads(stats.commit_activity) | |
196 | c.overview_data = json.loads(stats.commit_activity_combined) |
|
196 | c.overview_data = json.loads(stats.commit_activity_combined) | |
197 |
|
197 | |||
198 | lang_stats = ((x, {"count": y, |
|
198 | lang_stats = ((x, {"count": y, | |
199 | "desc": LANGUAGES_EXTENSIONS_MAP.get(x)}) |
|
199 | "desc": LANGUAGES_EXTENSIONS_MAP.get(x)}) | |
200 | for x, y in lang_stats_d.items()) |
|
200 | for x, y in lang_stats_d.items()) | |
201 |
|
201 | |||
202 | c.trending_languages = ( |
|
202 | c.trending_languages = ( | |
203 | sorted(lang_stats, reverse=True, key=lambda k: k[1])[:10] |
|
203 | sorted(lang_stats, reverse=True, key=lambda k: k[1])[:10] | |
204 | ) |
|
204 | ) | |
205 | last_rev = stats.stat_on_revision + 1 |
|
205 | last_rev = stats.stat_on_revision + 1 | |
206 | c.repo_last_rev = c.db_repo_scm_instance.count() \ |
|
206 | c.repo_last_rev = c.db_repo_scm_instance.count() \ | |
207 | if c.db_repo_scm_instance.revisions else 0 |
|
207 | if c.db_repo_scm_instance.revisions else 0 | |
208 | if last_rev == 0 or c.repo_last_rev == 0: |
|
208 | if last_rev == 0 or c.repo_last_rev == 0: | |
209 | pass |
|
209 | pass | |
210 | else: |
|
210 | else: | |
211 | c.stats_percentage = '%.2f' % ((float((last_rev)) / |
|
211 | c.stats_percentage = '%.2f' % ((float((last_rev)) / | |
212 | c.repo_last_rev) * 100) |
|
212 | c.repo_last_rev) * 100) | |
213 | else: |
|
213 | else: | |
214 | c.commit_data = {} |
|
214 | c.commit_data = {} | |
215 | c.overview_data = ([[ts_min_y, 0], [ts_max_y, 10]]) |
|
215 | c.overview_data = ([[ts_min_y, 0], [ts_max_y, 10]]) | |
216 | c.trending_languages = {} |
|
216 | c.trending_languages = {} | |
217 | c.no_data = True |
|
217 | c.no_data = True | |
218 |
|
218 | |||
219 | recurse_limit = 500 # don't recurse more than 500 times when parsing |
|
219 | recurse_limit = 500 # don't recurse more than 500 times when parsing | |
220 | get_commits_stats(c.db_repo.repo_name, ts_min_y, ts_max_y, recurse_limit) |
|
220 | get_commits_stats(c.db_repo.repo_name, ts_min_y, ts_max_y, recurse_limit) | |
221 | return render('summary/statistics.html') |
|
221 | return render('summary/statistics.html') |
@@ -1,206 +1,207 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 | # This program is free software: you can redistribute it and/or modify |
|
2 | # This program is free software: you can redistribute it and/or modify | |
3 | # it under the terms of the GNU General Public License as published by |
|
3 | # it under the terms of the GNU General Public License as published by | |
4 | # the Free Software Foundation, either version 3 of the License, or |
|
4 | # the Free Software Foundation, either version 3 of the License, or | |
5 | # (at your option) any later version. |
|
5 | # (at your option) any later version. | |
6 | # |
|
6 | # | |
7 | # This program is distributed in the hope that it will be useful, |
|
7 | # This program is distributed in the hope that it will be useful, | |
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | # GNU General Public License for more details. |
|
10 | # GNU General Public License for more details. | |
11 | # |
|
11 | # | |
12 | # You should have received a copy of the GNU General Public License |
|
12 | # You should have received a copy of the GNU General Public License | |
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
14 | """ |
|
14 | """ | |
15 | kallithea.lib.annotate |
|
15 | kallithea.lib.annotate | |
16 | ~~~~~~~~~~~~~~~~~~~~~~ |
|
16 | ~~~~~~~~~~~~~~~~~~~~~~ | |
17 |
|
17 | |||
18 | Annotation library for usage in Kallithea, previously part of vcs |
|
18 | Annotation library for usage in Kallithea, previously part of vcs | |
19 |
|
19 | |||
20 | This file was forked by the Kallithea project in July 2014. |
|
20 | This file was forked by the Kallithea project in July 2014. | |
21 | Original author and date, and relevant copyright and licensing information is below: |
|
21 | Original author and date, and relevant copyright and licensing information is below: | |
22 | :created_on: Dec 4, 2011 |
|
22 | :created_on: Dec 4, 2011 | |
23 | :author: marcink |
|
23 | :author: marcink | |
24 | :copyright: (c) 2013 RhodeCode GmbH, and others. |
|
24 | :copyright: (c) 2013 RhodeCode GmbH, and others. | |
25 | :license: GPLv3, see LICENSE.md for more details. |
|
25 | :license: GPLv3, see LICENSE.md for more details. | |
26 | """ |
|
26 | """ | |
27 |
|
27 | |||
28 | from pygments import highlight |
|
28 | from pygments import highlight | |
29 | from pygments.formatters import HtmlFormatter |
|
29 | from pygments.formatters import HtmlFormatter | |
30 |
|
30 | |||
31 | from kallithea.lib.vcs.exceptions import VCSError |
|
31 | from kallithea.lib.vcs.exceptions import VCSError | |
32 | from kallithea.lib.vcs.nodes import FileNode |
|
32 | from kallithea.lib.vcs.nodes import FileNode | |
|
33 | from kallithea.lib.vcs.utils import safe_unicode | |||
33 |
|
34 | |||
34 |
|
35 | |||
35 | def annotate_highlight(filenode, annotate_from_changeset_func=None, |
|
36 | def annotate_highlight(filenode, annotate_from_changeset_func=None, | |
36 | order=None, headers=None, **options): |
|
37 | order=None, headers=None, **options): | |
37 | """ |
|
38 | """ | |
38 | Returns html portion containing annotated table with 3 columns: line |
|
39 | Returns html portion containing annotated table with 3 columns: line | |
39 | numbers, changeset information and pygmentized line of code. |
|
40 | numbers, changeset information and pygmentized line of code. | |
40 |
|
41 | |||
41 | :param filenode: FileNode object |
|
42 | :param filenode: FileNode object | |
42 | :param annotate_from_changeset_func: function taking changeset and |
|
43 | :param annotate_from_changeset_func: function taking changeset and | |
43 | returning single annotate cell; needs break line at the end |
|
44 | returning single annotate cell; needs break line at the end | |
44 | :param order: ordered sequence of ``ls`` (line numbers column), |
|
45 | :param order: ordered sequence of ``ls`` (line numbers column), | |
45 | ``annotate`` (annotate column), ``code`` (code column); Default is |
|
46 | ``annotate`` (annotate column), ``code`` (code column); Default is | |
46 | ``['ls', 'annotate', 'code']`` |
|
47 | ``['ls', 'annotate', 'code']`` | |
47 | :param headers: dictionary with headers (keys are whats in ``order`` |
|
48 | :param headers: dictionary with headers (keys are whats in ``order`` | |
48 | parameter) |
|
49 | parameter) | |
49 | """ |
|
50 | """ | |
50 | from kallithea.lib.pygmentsutils import get_custom_lexer |
|
51 | from kallithea.lib.pygmentsutils import get_custom_lexer | |
51 | options['linenos'] = True |
|
52 | options['linenos'] = True | |
52 | formatter = AnnotateHtmlFormatter(filenode=filenode, order=order, |
|
53 | formatter = AnnotateHtmlFormatter(filenode=filenode, order=order, | |
53 | headers=headers, |
|
54 | headers=headers, | |
54 | annotate_from_changeset_func=annotate_from_changeset_func, **options) |
|
55 | annotate_from_changeset_func=annotate_from_changeset_func, **options) | |
55 | lexer = get_custom_lexer(filenode.extension) or filenode.lexer |
|
56 | lexer = get_custom_lexer(filenode.extension) or filenode.lexer | |
56 | highlighted = highlight(filenode.content, lexer, formatter) |
|
57 | highlighted = highlight(safe_unicode(filenode.content), lexer, formatter) | |
57 | return highlighted |
|
58 | return highlighted | |
58 |
|
59 | |||
59 |
|
60 | |||
60 | class AnnotateHtmlFormatter(HtmlFormatter): |
|
61 | class AnnotateHtmlFormatter(HtmlFormatter): | |
61 |
|
62 | |||
62 | def __init__(self, filenode, annotate_from_changeset_func=None, |
|
63 | def __init__(self, filenode, annotate_from_changeset_func=None, | |
63 | order=None, **options): |
|
64 | order=None, **options): | |
64 | """ |
|
65 | """ | |
65 | If ``annotate_from_changeset_func`` is passed it should be a function |
|
66 | If ``annotate_from_changeset_func`` is passed it should be a function | |
66 | which returns string from the given changeset. For example, we may pass |
|
67 | which returns string from the given changeset. For example, we may pass | |
67 | following function as ``annotate_from_changeset_func``:: |
|
68 | following function as ``annotate_from_changeset_func``:: | |
68 |
|
69 | |||
69 | def changeset_to_anchor(changeset): |
|
70 | def changeset_to_anchor(changeset): | |
70 | return '<a href="/changesets/%s/">%s</a>\n' % \ |
|
71 | return '<a href="/changesets/%s/">%s</a>\n' % \ | |
71 | (changeset.id, changeset.id) |
|
72 | (changeset.id, changeset.id) | |
72 |
|
73 | |||
73 | :param annotate_from_changeset_func: see above |
|
74 | :param annotate_from_changeset_func: see above | |
74 | :param order: (default: ``['ls', 'annotate', 'code']``); order of |
|
75 | :param order: (default: ``['ls', 'annotate', 'code']``); order of | |
75 | columns; |
|
76 | columns; | |
76 | :param options: standard pygment's HtmlFormatter options, there is |
|
77 | :param options: standard pygment's HtmlFormatter options, there is | |
77 | extra option tough, ``headers``. For instance we can pass:: |
|
78 | extra option tough, ``headers``. For instance we can pass:: | |
78 |
|
79 | |||
79 | formatter = AnnotateHtmlFormatter(filenode, headers={ |
|
80 | formatter = AnnotateHtmlFormatter(filenode, headers={ | |
80 | 'ls': '#', |
|
81 | 'ls': '#', | |
81 | 'annotate': 'Annotate', |
|
82 | 'annotate': 'Annotate', | |
82 | 'code': 'Code', |
|
83 | 'code': 'Code', | |
83 | }) |
|
84 | }) | |
84 |
|
85 | |||
85 | """ |
|
86 | """ | |
86 | super(AnnotateHtmlFormatter, self).__init__(**options) |
|
87 | super(AnnotateHtmlFormatter, self).__init__(**options) | |
87 | self.annotate_from_changeset_func = annotate_from_changeset_func |
|
88 | self.annotate_from_changeset_func = annotate_from_changeset_func | |
88 | self.order = order or ('ls', 'annotate', 'code') |
|
89 | self.order = order or ('ls', 'annotate', 'code') | |
89 | headers = options.pop('headers', None) |
|
90 | headers = options.pop('headers', None) | |
90 | if headers and not ('ls' in headers and 'annotate' in headers and |
|
91 | if headers and not ('ls' in headers and 'annotate' in headers and | |
91 | 'code' in headers |
|
92 | 'code' in headers | |
92 | ): |
|
93 | ): | |
93 | raise ValueError("If headers option dict is specified it must " |
|
94 | raise ValueError("If headers option dict is specified it must " | |
94 | "all 'ls', 'annotate' and 'code' keys") |
|
95 | "all 'ls', 'annotate' and 'code' keys") | |
95 | self.headers = headers |
|
96 | self.headers = headers | |
96 | if isinstance(filenode, FileNode): |
|
97 | if isinstance(filenode, FileNode): | |
97 | self.filenode = filenode |
|
98 | self.filenode = filenode | |
98 | else: |
|
99 | else: | |
99 | raise VCSError("This formatter expect FileNode parameter, not %r" |
|
100 | raise VCSError("This formatter expect FileNode parameter, not %r" | |
100 | % type(filenode)) |
|
101 | % type(filenode)) | |
101 |
|
102 | |||
102 | def annotate_from_changeset(self, changeset): |
|
103 | def annotate_from_changeset(self, changeset): | |
103 | """ |
|
104 | """ | |
104 | Returns full html line for single changeset per annotated line. |
|
105 | Returns full html line for single changeset per annotated line. | |
105 | """ |
|
106 | """ | |
106 | if self.annotate_from_changeset_func: |
|
107 | if self.annotate_from_changeset_func: | |
107 | return self.annotate_from_changeset_func(changeset) |
|
108 | return self.annotate_from_changeset_func(changeset) | |
108 | else: |
|
109 | else: | |
109 | return ''.join((changeset.id, '\n')) |
|
110 | return ''.join((changeset.id, '\n')) | |
110 |
|
111 | |||
111 | def _wrap_tablelinenos(self, inner): |
|
112 | def _wrap_tablelinenos(self, inner): | |
112 | inner_lines = [] |
|
113 | inner_lines = [] | |
113 | lncount = 0 |
|
114 | lncount = 0 | |
114 | for t, line in inner: |
|
115 | for t, line in inner: | |
115 | if t: |
|
116 | if t: | |
116 | lncount += 1 |
|
117 | lncount += 1 | |
117 | inner_lines.append(line) |
|
118 | inner_lines.append(line) | |
118 |
|
119 | |||
119 | fl = self.linenostart |
|
120 | fl = self.linenostart | |
120 | mw = len(str(lncount + fl - 1)) |
|
121 | mw = len(str(lncount + fl - 1)) | |
121 | sp = self.linenospecial |
|
122 | sp = self.linenospecial | |
122 | st = self.linenostep |
|
123 | st = self.linenostep | |
123 | la = self.lineanchors |
|
124 | la = self.lineanchors | |
124 | aln = self.anchorlinenos |
|
125 | aln = self.anchorlinenos | |
125 | if sp: |
|
126 | if sp: | |
126 | lines = [] |
|
127 | lines = [] | |
127 |
|
128 | |||
128 | for i in range(fl, fl + lncount): |
|
129 | for i in range(fl, fl + lncount): | |
129 | if i % st == 0: |
|
130 | if i % st == 0: | |
130 | if i % sp == 0: |
|
131 | if i % sp == 0: | |
131 | if aln: |
|
132 | if aln: | |
132 | lines.append('<a href="#%s-%d" class="special">' |
|
133 | lines.append('<a href="#%s-%d" class="special">' | |
133 | '%*d</a>' % |
|
134 | '%*d</a>' % | |
134 | (la, i, mw, i)) |
|
135 | (la, i, mw, i)) | |
135 | else: |
|
136 | else: | |
136 | lines.append('<span class="special">' |
|
137 | lines.append('<span class="special">' | |
137 | '%*d</span>' % (mw, i)) |
|
138 | '%*d</span>' % (mw, i)) | |
138 | else: |
|
139 | else: | |
139 | if aln: |
|
140 | if aln: | |
140 | lines.append('<a href="#%s-%d">' |
|
141 | lines.append('<a href="#%s-%d">' | |
141 | '%*d</a>' % (la, i, mw, i)) |
|
142 | '%*d</a>' % (la, i, mw, i)) | |
142 | else: |
|
143 | else: | |
143 | lines.append('%*d' % (mw, i)) |
|
144 | lines.append('%*d' % (mw, i)) | |
144 | else: |
|
145 | else: | |
145 | lines.append('') |
|
146 | lines.append('') | |
146 | ls = '\n'.join(lines) |
|
147 | ls = '\n'.join(lines) | |
147 | else: |
|
148 | else: | |
148 | lines = [] |
|
149 | lines = [] | |
149 | for i in range(fl, fl + lncount): |
|
150 | for i in range(fl, fl + lncount): | |
150 | if i % st == 0: |
|
151 | if i % st == 0: | |
151 | if aln: |
|
152 | if aln: | |
152 | lines.append('<a href="#%s-%d">%*d</a>' |
|
153 | lines.append('<a href="#%s-%d">%*d</a>' | |
153 | % (la, i, mw, i)) |
|
154 | % (la, i, mw, i)) | |
154 | else: |
|
155 | else: | |
155 | lines.append('%*d' % (mw, i)) |
|
156 | lines.append('%*d' % (mw, i)) | |
156 | else: |
|
157 | else: | |
157 | lines.append('') |
|
158 | lines.append('') | |
158 | ls = '\n'.join(lines) |
|
159 | ls = '\n'.join(lines) | |
159 |
|
160 | |||
160 | # annotate_changesets = [tup[1] for tup in self.filenode.annotate] |
|
161 | # annotate_changesets = [tup[1] for tup in self.filenode.annotate] | |
161 | # # TODO: not sure what that fixes |
|
162 | # # TODO: not sure what that fixes | |
162 | # # If pygments cropped last lines break we need do that too |
|
163 | # # If pygments cropped last lines break we need do that too | |
163 | # ln_cs = len(annotate_changesets) |
|
164 | # ln_cs = len(annotate_changesets) | |
164 | # ln_ = len(ls.splitlines()) |
|
165 | # ln_ = len(ls.splitlines()) | |
165 | # if ln_cs > ln_: |
|
166 | # if ln_cs > ln_: | |
166 | # annotate_changesets = annotate_changesets[:ln_ - ln_cs] |
|
167 | # annotate_changesets = annotate_changesets[:ln_ - ln_cs] | |
167 | annotate = ''.join((self.annotate_from_changeset(el[2]()) |
|
168 | annotate = ''.join((self.annotate_from_changeset(el[2]()) | |
168 | for el in self.filenode.annotate)) |
|
169 | for el in self.filenode.annotate)) | |
169 | # in case you wonder about the seemingly redundant <div> here: |
|
170 | # in case you wonder about the seemingly redundant <div> here: | |
170 | # since the content in the other cell also is wrapped in a div, |
|
171 | # since the content in the other cell also is wrapped in a div, | |
171 | # some browsers in some configurations seem to mess up the formatting. |
|
172 | # some browsers in some configurations seem to mess up the formatting. | |
172 | ''' |
|
173 | ''' | |
173 | yield 0, ('<table class="%stable">' % self.cssclass + |
|
174 | yield 0, ('<table class="%stable">' % self.cssclass + | |
174 | '<tr><td class="linenos"><div class="linenodiv"><pre>' + |
|
175 | '<tr><td class="linenos"><div class="linenodiv"><pre>' + | |
175 | ls + '</pre></div></td>' + |
|
176 | ls + '</pre></div></td>' + | |
176 | '<td class="code">') |
|
177 | '<td class="code">') | |
177 | yield 0, ''.join(inner_lines) |
|
178 | yield 0, ''.join(inner_lines) | |
178 | yield 0, '</td></tr></table>' |
|
179 | yield 0, '</td></tr></table>' | |
179 |
|
180 | |||
180 | ''' |
|
181 | ''' | |
181 | headers_row = [] |
|
182 | headers_row = [] | |
182 | if self.headers: |
|
183 | if self.headers: | |
183 | headers_row = ['<tr class="annotate-header">'] |
|
184 | headers_row = ['<tr class="annotate-header">'] | |
184 | for key in self.order: |
|
185 | for key in self.order: | |
185 | td = ''.join(('<td>', self.headers[key], '</td>')) |
|
186 | td = ''.join(('<td>', self.headers[key], '</td>')) | |
186 | headers_row.append(td) |
|
187 | headers_row.append(td) | |
187 | headers_row.append('</tr>') |
|
188 | headers_row.append('</tr>') | |
188 |
|
189 | |||
189 | body_row_start = ['<tr>'] |
|
190 | body_row_start = ['<tr>'] | |
190 | for key in self.order: |
|
191 | for key in self.order: | |
191 | if key == 'ls': |
|
192 | if key == 'ls': | |
192 | body_row_start.append( |
|
193 | body_row_start.append( | |
193 | '<td class="linenos"><div class="linenodiv"><pre>' + |
|
194 | '<td class="linenos"><div class="linenodiv"><pre>' + | |
194 | ls + '</pre></div></td>') |
|
195 | ls + '</pre></div></td>') | |
195 | elif key == 'annotate': |
|
196 | elif key == 'annotate': | |
196 | body_row_start.append( |
|
197 | body_row_start.append( | |
197 | '<td class="annotate"><div class="annotatediv"><pre>' + |
|
198 | '<td class="annotate"><div class="annotatediv"><pre>' + | |
198 | annotate + '</pre></div></td>') |
|
199 | annotate + '</pre></div></td>') | |
199 | elif key == 'code': |
|
200 | elif key == 'code': | |
200 | body_row_start.append('<td class="code">') |
|
201 | body_row_start.append('<td class="code">') | |
201 | yield 0, ('<table class="%stable">' % self.cssclass + |
|
202 | yield 0, ('<table class="%stable">' % self.cssclass + | |
202 | ''.join(headers_row) + |
|
203 | ''.join(headers_row) + | |
203 | ''.join(body_row_start) |
|
204 | ''.join(body_row_start) | |
204 | ) |
|
205 | ) | |
205 | yield 0, ''.join(inner_lines) |
|
206 | yield 0, ''.join(inner_lines) | |
206 | yield 0, '</td></tr></table>' |
|
207 | yield 0, '</td></tr></table>' |
@@ -1,676 +1,679 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 | # This program is free software: you can redistribute it and/or modify |
|
2 | # This program is free software: you can redistribute it and/or modify | |
3 | # it under the terms of the GNU General Public License as published by |
|
3 | # it under the terms of the GNU General Public License as published by | |
4 | # the Free Software Foundation, either version 3 of the License, or |
|
4 | # the Free Software Foundation, either version 3 of the License, or | |
5 | # (at your option) any later version. |
|
5 | # (at your option) any later version. | |
6 | # |
|
6 | # | |
7 | # This program is distributed in the hope that it will be useful, |
|
7 | # This program is distributed in the hope that it will be useful, | |
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | # GNU General Public License for more details. |
|
10 | # GNU General Public License for more details. | |
11 | # |
|
11 | # | |
12 | # You should have received a copy of the GNU General Public License |
|
12 | # You should have received a copy of the GNU General Public License | |
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
14 | """ |
|
14 | """ | |
15 | kallithea.lib.diffs |
|
15 | kallithea.lib.diffs | |
16 | ~~~~~~~~~~~~~~~~~~~ |
|
16 | ~~~~~~~~~~~~~~~~~~~ | |
17 |
|
17 | |||
18 | Set of diffing helpers, previously part of vcs |
|
18 | Set of diffing helpers, previously part of vcs | |
19 |
|
19 | |||
20 |
|
20 | |||
21 | This file was forked by the Kallithea project in July 2014. |
|
21 | This file was forked by the Kallithea project in July 2014. | |
22 | Original author and date, and relevant copyright and licensing information is below: |
|
22 | Original author and date, and relevant copyright and licensing information is below: | |
23 | :created_on: Dec 4, 2011 |
|
23 | :created_on: Dec 4, 2011 | |
24 | :author: marcink |
|
24 | :author: marcink | |
25 | :copyright: (c) 2013 RhodeCode GmbH, and others. |
|
25 | :copyright: (c) 2013 RhodeCode GmbH, and others. | |
26 | :license: GPLv3, see LICENSE.md for more details. |
|
26 | :license: GPLv3, see LICENSE.md for more details. | |
27 | """ |
|
27 | """ | |
28 | import difflib |
|
28 | import difflib | |
29 | import logging |
|
29 | import logging | |
30 | import re |
|
30 | import re | |
31 |
|
31 | |||
32 | from tg.i18n import ugettext as _ |
|
32 | from tg.i18n import ugettext as _ | |
33 |
|
33 | |||
34 | from kallithea.lib import helpers as h |
|
34 | from kallithea.lib import helpers as h | |
35 | from kallithea.lib.utils2 import safe_unicode |
|
35 | from kallithea.lib.utils2 import safe_unicode | |
36 | from kallithea.lib.vcs.backends.base import EmptyChangeset |
|
36 | from kallithea.lib.vcs.backends.base import EmptyChangeset | |
37 | from kallithea.lib.vcs.exceptions import VCSError |
|
37 | from kallithea.lib.vcs.exceptions import VCSError | |
38 | from kallithea.lib.vcs.nodes import FileNode, SubModuleNode |
|
38 | from kallithea.lib.vcs.nodes import FileNode, SubModuleNode | |
39 |
|
39 | |||
40 |
|
40 | |||
41 | log = logging.getLogger(__name__) |
|
41 | log = logging.getLogger(__name__) | |
42 |
|
42 | |||
43 |
|
43 | |||
44 | def _safe_id(idstring): |
|
44 | def _safe_id(idstring): | |
45 | r"""Make a string safe for including in an id attribute. |
|
45 | r"""Make a string safe for including in an id attribute. | |
46 |
|
46 | |||
47 | The HTML spec says that id attributes 'must begin with |
|
47 | The HTML spec says that id attributes 'must begin with | |
48 | a letter ([A-Za-z]) and may be followed by any number |
|
48 | a letter ([A-Za-z]) and may be followed by any number | |
49 | of letters, digits ([0-9]), hyphens ("-"), underscores |
|
49 | of letters, digits ([0-9]), hyphens ("-"), underscores | |
50 | ("_"), colons (":"), and periods (".")'. These regexps |
|
50 | ("_"), colons (":"), and periods (".")'. These regexps | |
51 | are slightly over-zealous, in that they remove colons |
|
51 | are slightly over-zealous, in that they remove colons | |
52 | and periods unnecessarily. |
|
52 | and periods unnecessarily. | |
53 |
|
53 | |||
54 | Whitespace is transformed into underscores, and then |
|
54 | Whitespace is transformed into underscores, and then | |
55 | anything which is not a hyphen or a character that |
|
55 | anything which is not a hyphen or a character that | |
56 | matches \w (alphanumerics and underscore) is removed. |
|
56 | matches \w (alphanumerics and underscore) is removed. | |
57 |
|
57 | |||
58 | """ |
|
58 | """ | |
59 | # Transform all whitespace to underscore |
|
59 | # Transform all whitespace to underscore | |
60 | idstring = re.sub(r'\s', "_", idstring) |
|
60 | idstring = re.sub(r'\s', "_", idstring) | |
61 | # Remove everything that is not a hyphen or a member of \w |
|
61 | # Remove everything that is not a hyphen or a member of \w | |
62 | idstring = re.sub(r'(?!-)\W', "", idstring).lower() |
|
62 | idstring = re.sub(r'(?!-)\W', "", idstring).lower() | |
63 | return idstring |
|
63 | return idstring | |
64 |
|
64 | |||
65 |
|
65 | |||
66 | def as_html(table_class='code-difftable', line_class='line', |
|
66 | def as_html(table_class='code-difftable', line_class='line', | |
67 | old_lineno_class='lineno old', new_lineno_class='lineno new', |
|
67 | old_lineno_class='lineno old', new_lineno_class='lineno new', | |
68 | no_lineno_class='lineno', |
|
68 | no_lineno_class='lineno', | |
69 | code_class='code', enable_comments=False, parsed_lines=None): |
|
69 | code_class='code', enable_comments=False, parsed_lines=None): | |
70 | """ |
|
70 | """ | |
71 | Return given diff as html table with customized css classes |
|
71 | Return given diff as html table with customized css classes | |
72 | """ |
|
72 | """ | |
73 | def _link_to_if(condition, label, url): |
|
73 | def _link_to_if(condition, label, url): | |
74 | """ |
|
74 | """ | |
75 | Generates a link if condition is meet or just the label if not. |
|
75 | Generates a link if condition is meet or just the label if not. | |
76 | """ |
|
76 | """ | |
77 |
|
77 | |||
78 | if condition: |
|
78 | if condition: | |
79 | return '''<a href="%(url)s" data-pseudo-content="%(label)s"></a>''' % { |
|
79 | return '''<a href="%(url)s" data-pseudo-content="%(label)s"></a>''' % { | |
80 | 'url': url, |
|
80 | 'url': url, | |
81 | 'label': label |
|
81 | 'label': label | |
82 | } |
|
82 | } | |
83 | else: |
|
83 | else: | |
84 | return label |
|
84 | return label | |
85 |
|
85 | |||
86 | _html_empty = True |
|
86 | _html_empty = True | |
87 | _html = [] |
|
87 | _html = [] | |
88 | _html.append('''<table class="%(table_class)s">\n''' % { |
|
88 | _html.append('''<table class="%(table_class)s">\n''' % { | |
89 | 'table_class': table_class |
|
89 | 'table_class': table_class | |
90 | }) |
|
90 | }) | |
91 |
|
91 | |||
92 | for diff in parsed_lines: |
|
92 | for diff in parsed_lines: | |
93 | for line in diff['chunks']: |
|
93 | for line in diff['chunks']: | |
94 | _html_empty = False |
|
94 | _html_empty = False | |
95 | for change in line: |
|
95 | for change in line: | |
96 | _html.append('''<tr class="%(lc)s %(action)s">\n''' % { |
|
96 | _html.append('''<tr class="%(lc)s %(action)s">\n''' % { | |
97 | 'lc': line_class, |
|
97 | 'lc': line_class, | |
98 | 'action': change['action'] |
|
98 | 'action': change['action'] | |
99 | }) |
|
99 | }) | |
100 | anchor_old_id = '' |
|
100 | anchor_old_id = '' | |
101 | anchor_new_id = '' |
|
101 | anchor_new_id = '' | |
102 | anchor_old = "%(filename)s_o%(oldline_no)s" % { |
|
102 | anchor_old = "%(filename)s_o%(oldline_no)s" % { | |
103 | 'filename': _safe_id(diff['filename']), |
|
103 | 'filename': _safe_id(diff['filename']), | |
104 | 'oldline_no': change['old_lineno'] |
|
104 | 'oldline_no': change['old_lineno'] | |
105 | } |
|
105 | } | |
106 | anchor_new = "%(filename)s_n%(oldline_no)s" % { |
|
106 | anchor_new = "%(filename)s_n%(oldline_no)s" % { | |
107 | 'filename': _safe_id(diff['filename']), |
|
107 | 'filename': _safe_id(diff['filename']), | |
108 | 'oldline_no': change['new_lineno'] |
|
108 | 'oldline_no': change['new_lineno'] | |
109 | } |
|
109 | } | |
110 | cond_old = (change['old_lineno'] != '...' and |
|
110 | cond_old = (change['old_lineno'] != '...' and | |
111 | change['old_lineno']) |
|
111 | change['old_lineno']) | |
112 | cond_new = (change['new_lineno'] != '...' and |
|
112 | cond_new = (change['new_lineno'] != '...' and | |
113 | change['new_lineno']) |
|
113 | change['new_lineno']) | |
114 | no_lineno = (change['old_lineno'] == '...' and |
|
114 | no_lineno = (change['old_lineno'] == '...' and | |
115 | change['new_lineno'] == '...') |
|
115 | change['new_lineno'] == '...') | |
116 | if cond_old: |
|
116 | if cond_old: | |
117 | anchor_old_id = 'id="%s"' % anchor_old |
|
117 | anchor_old_id = 'id="%s"' % anchor_old | |
118 | if cond_new: |
|
118 | if cond_new: | |
119 | anchor_new_id = 'id="%s"' % anchor_new |
|
119 | anchor_new_id = 'id="%s"' % anchor_new | |
120 | ########################################################### |
|
120 | ########################################################### | |
121 | # OLD LINE NUMBER |
|
121 | # OLD LINE NUMBER | |
122 | ########################################################### |
|
122 | ########################################################### | |
123 | _html.append('''\t<td %(a_id)s class="%(olc)s" %(colspan)s>''' % { |
|
123 | _html.append('''\t<td %(a_id)s class="%(olc)s" %(colspan)s>''' % { | |
124 | 'a_id': anchor_old_id, |
|
124 | 'a_id': anchor_old_id, | |
125 | 'olc': no_lineno_class if no_lineno else old_lineno_class, |
|
125 | 'olc': no_lineno_class if no_lineno else old_lineno_class, | |
126 | 'colspan': 'colspan="2"' if no_lineno else '' |
|
126 | 'colspan': 'colspan="2"' if no_lineno else '' | |
127 | }) |
|
127 | }) | |
128 |
|
128 | |||
129 | _html.append('''%(link)s''' % { |
|
129 | _html.append('''%(link)s''' % { | |
130 | 'link': _link_to_if(not no_lineno, change['old_lineno'], |
|
130 | 'link': _link_to_if(not no_lineno, change['old_lineno'], | |
131 | '#%s' % anchor_old) |
|
131 | '#%s' % anchor_old) | |
132 | }) |
|
132 | }) | |
133 | _html.append('''</td>\n''') |
|
133 | _html.append('''</td>\n''') | |
134 | ########################################################### |
|
134 | ########################################################### | |
135 | # NEW LINE NUMBER |
|
135 | # NEW LINE NUMBER | |
136 | ########################################################### |
|
136 | ########################################################### | |
137 |
|
137 | |||
138 | if not no_lineno: |
|
138 | if not no_lineno: | |
139 | _html.append('''\t<td %(a_id)s class="%(nlc)s">''' % { |
|
139 | _html.append('''\t<td %(a_id)s class="%(nlc)s">''' % { | |
140 | 'a_id': anchor_new_id, |
|
140 | 'a_id': anchor_new_id, | |
141 | 'nlc': new_lineno_class |
|
141 | 'nlc': new_lineno_class | |
142 | }) |
|
142 | }) | |
143 |
|
143 | |||
144 | _html.append('''%(link)s''' % { |
|
144 | _html.append('''%(link)s''' % { | |
145 | 'link': _link_to_if(True, change['new_lineno'], |
|
145 | 'link': _link_to_if(True, change['new_lineno'], | |
146 | '#%s' % anchor_new) |
|
146 | '#%s' % anchor_new) | |
147 | }) |
|
147 | }) | |
148 | _html.append('''</td>\n''') |
|
148 | _html.append('''</td>\n''') | |
149 | ########################################################### |
|
149 | ########################################################### | |
150 | # CODE |
|
150 | # CODE | |
151 | ########################################################### |
|
151 | ########################################################### | |
152 | comments = '' if enable_comments else 'no-comment' |
|
152 | comments = '' if enable_comments else 'no-comment' | |
153 | _html.append('''\t<td class="%(cc)s %(inc)s">''' % { |
|
153 | _html.append('''\t<td class="%(cc)s %(inc)s">''' % { | |
154 | 'cc': code_class, |
|
154 | 'cc': code_class, | |
155 | 'inc': comments |
|
155 | 'inc': comments | |
156 | }) |
|
156 | }) | |
157 | _html.append('''\n\t\t<div class="add-bubble"><div> </div></div><pre>%(code)s</pre>\n''' % { |
|
157 | _html.append('''\n\t\t<div class="add-bubble"><div> </div></div><pre>%(code)s</pre>\n''' % { | |
158 | 'code': change['line'] |
|
158 | 'code': change['line'] | |
159 | }) |
|
159 | }) | |
160 |
|
160 | |||
161 | _html.append('''\t</td>''') |
|
161 | _html.append('''\t</td>''') | |
162 | _html.append('''\n</tr>\n''') |
|
162 | _html.append('''\n</tr>\n''') | |
163 | _html.append('''</table>''') |
|
163 | _html.append('''</table>''') | |
164 | if _html_empty: |
|
164 | if _html_empty: | |
165 | return None |
|
165 | return None | |
166 | return ''.join(_html) |
|
166 | return ''.join(_html) | |
167 |
|
167 | |||
168 |
|
168 | |||
169 | def wrap_to_table(html): |
|
169 | def wrap_to_table(html): | |
170 | """Given a string with html, return it wrapped in a table, similar to what |
|
170 | """Given a string with html, return it wrapped in a table, similar to what | |
171 | DiffProcessor returns.""" |
|
171 | DiffProcessor returns.""" | |
172 | return '''\ |
|
172 | return '''\ | |
173 | <table class="code-difftable"> |
|
173 | <table class="code-difftable"> | |
174 | <tr class="line no-comment"> |
|
174 | <tr class="line no-comment"> | |
175 | <td class="lineno new"></td> |
|
175 | <td class="lineno new"></td> | |
176 | <td class="code no-comment"><pre>%s</pre></td> |
|
176 | <td class="code no-comment"><pre>%s</pre></td> | |
177 | </tr> |
|
177 | </tr> | |
178 | </table>''' % html |
|
178 | </table>''' % html | |
179 |
|
179 | |||
180 |
|
180 | |||
181 | def wrapped_diff(filenode_old, filenode_new, diff_limit=None, |
|
181 | def wrapped_diff(filenode_old, filenode_new, diff_limit=None, | |
182 | ignore_whitespace=True, line_context=3, |
|
182 | ignore_whitespace=True, line_context=3, | |
183 | enable_comments=False): |
|
183 | enable_comments=False): | |
184 | """ |
|
184 | """ | |
185 | Returns a file diff wrapped into a table. |
|
185 | Returns a file diff wrapped into a table. | |
186 | Checks for diff_limit and presents a message if the diff is too big. |
|
186 | Checks for diff_limit and presents a message if the diff is too big. | |
187 | """ |
|
187 | """ | |
188 | if filenode_old is None: |
|
188 | if filenode_old is None: | |
189 | filenode_old = FileNode(filenode_new.path, '', EmptyChangeset()) |
|
189 | filenode_old = FileNode(filenode_new.path, '', EmptyChangeset()) | |
190 |
|
190 | |||
191 | op = None |
|
191 | op = None | |
192 | a_path = filenode_old.path # default, might be overriden by actual rename in diff |
|
192 | a_path = filenode_old.path # default, might be overriden by actual rename in diff | |
193 | if filenode_old.is_binary or filenode_new.is_binary: |
|
193 | if filenode_old.is_binary or filenode_new.is_binary: | |
194 | html_diff = wrap_to_table(_('Binary file')) |
|
194 | html_diff = wrap_to_table(_('Binary file')) | |
195 | stats = (0, 0) |
|
195 | stats = (0, 0) | |
196 |
|
196 | |||
197 | elif diff_limit != -1 and ( |
|
197 | elif diff_limit != -1 and ( | |
198 | diff_limit is None or |
|
198 | diff_limit is None or | |
199 | (filenode_old.size < diff_limit and filenode_new.size < diff_limit)): |
|
199 | (filenode_old.size < diff_limit and filenode_new.size < diff_limit)): | |
200 |
|
200 | |||
201 | raw_diff = get_gitdiff(filenode_old, filenode_new, |
|
201 | raw_diff = get_gitdiff(filenode_old, filenode_new, | |
202 | ignore_whitespace=ignore_whitespace, |
|
202 | ignore_whitespace=ignore_whitespace, | |
203 | context=line_context) |
|
203 | context=line_context) | |
204 | diff_processor = DiffProcessor(raw_diff) |
|
204 | diff_processor = DiffProcessor(raw_diff) | |
205 | if diff_processor.parsed: # there should be exactly one element, for the specified file |
|
205 | if diff_processor.parsed: # there should be exactly one element, for the specified file | |
206 | f = diff_processor.parsed[0] |
|
206 | f = diff_processor.parsed[0] | |
207 | op = f['operation'] |
|
207 | op = f['operation'] | |
208 | a_path = f['old_filename'] |
|
208 | a_path = f['old_filename'] | |
209 |
|
209 | |||
210 | html_diff = as_html(parsed_lines=diff_processor.parsed, enable_comments=enable_comments) |
|
210 | html_diff = as_html(parsed_lines=diff_processor.parsed, enable_comments=enable_comments) | |
211 | stats = diff_processor.stat() |
|
211 | stats = diff_processor.stat() | |
212 |
|
212 | |||
213 | else: |
|
213 | else: | |
214 | html_diff = wrap_to_table(_('Changeset was too big and was cut off, use ' |
|
214 | html_diff = wrap_to_table(_('Changeset was too big and was cut off, use ' | |
215 | 'diff menu to display this diff')) |
|
215 | 'diff menu to display this diff')) | |
216 | stats = (0, 0) |
|
216 | stats = (0, 0) | |
217 |
|
217 | |||
218 | if not html_diff: |
|
218 | if not html_diff: | |
219 | submodules = [o for o in [filenode_new, filenode_old] if isinstance(o, SubModuleNode)] |
|
219 | submodules = [o for o in [filenode_new, filenode_old] if isinstance(o, SubModuleNode)] | |
220 | if submodules: |
|
220 | if submodules: | |
221 | html_diff = wrap_to_table(h.escape('Submodule %r' % submodules[0])) |
|
221 | html_diff = wrap_to_table(h.escape('Submodule %r' % submodules[0])) | |
222 | else: |
|
222 | else: | |
223 | html_diff = wrap_to_table(_('No changes detected')) |
|
223 | html_diff = wrap_to_table(_('No changes detected')) | |
224 |
|
224 | |||
225 | cs1 = filenode_old.changeset.raw_id |
|
225 | cs1 = filenode_old.changeset.raw_id | |
226 | cs2 = filenode_new.changeset.raw_id |
|
226 | cs2 = filenode_new.changeset.raw_id | |
227 |
|
227 | |||
228 | return cs1, cs2, a_path, html_diff, stats, op |
|
228 | return cs1, cs2, a_path, html_diff, stats, op | |
229 |
|
229 | |||
230 |
|
230 | |||
231 | def get_gitdiff(filenode_old, filenode_new, ignore_whitespace=True, context=3): |
|
231 | def get_gitdiff(filenode_old, filenode_new, ignore_whitespace=True, context=3): | |
232 | """ |
|
232 | """ | |
233 | Returns git style diff between given ``filenode_old`` and ``filenode_new``. |
|
233 | Returns git style diff between given ``filenode_old`` and ``filenode_new``. | |
234 | """ |
|
234 | """ | |
235 | # make sure we pass in default context |
|
235 | # make sure we pass in default context | |
236 | context = context or 3 |
|
236 | context = context or 3 | |
237 | submodules = [o for o in [filenode_new, filenode_old] if isinstance(o, SubModuleNode)] |
|
237 | submodules = [o for o in [filenode_new, filenode_old] if isinstance(o, SubModuleNode)] | |
238 | if submodules: |
|
238 | if submodules: | |
239 | return '' |
|
239 | return '' | |
240 |
|
240 | |||
241 | for filenode in (filenode_old, filenode_new): |
|
241 | for filenode in (filenode_old, filenode_new): | |
242 | if not isinstance(filenode, FileNode): |
|
242 | if not isinstance(filenode, FileNode): | |
243 | raise VCSError("Given object should be FileNode object, not %s" |
|
243 | raise VCSError("Given object should be FileNode object, not %s" | |
244 | % filenode.__class__) |
|
244 | % filenode.__class__) | |
245 |
|
245 | |||
246 | repo = filenode_new.changeset.repository |
|
246 | repo = filenode_new.changeset.repository | |
247 | old_raw_id = getattr(filenode_old.changeset, 'raw_id', repo.EMPTY_CHANGESET) |
|
247 | old_raw_id = getattr(filenode_old.changeset, 'raw_id', repo.EMPTY_CHANGESET) | |
248 | new_raw_id = getattr(filenode_new.changeset, 'raw_id', repo.EMPTY_CHANGESET) |
|
248 | new_raw_id = getattr(filenode_new.changeset, 'raw_id', repo.EMPTY_CHANGESET) | |
249 |
|
249 | |||
250 | vcs_gitdiff = get_diff(repo, old_raw_id, new_raw_id, filenode_new.path, |
|
250 | vcs_gitdiff = get_diff(repo, old_raw_id, new_raw_id, filenode_new.path, | |
251 | ignore_whitespace, context) |
|
251 | ignore_whitespace, context) | |
252 | return vcs_gitdiff |
|
252 | return vcs_gitdiff | |
253 |
|
253 | |||
254 |
|
254 | |||
255 | def get_diff(scm_instance, rev1, rev2, path=None, ignore_whitespace=False, context=3): |
|
255 | def get_diff(scm_instance, rev1, rev2, path=None, ignore_whitespace=False, context=3): | |
256 | """ |
|
256 | """ | |
257 | A thin wrapper around vcs lib get_diff. |
|
257 | A thin wrapper around vcs lib get_diff. | |
258 | """ |
|
258 | """ | |
259 | try: |
|
259 | try: | |
260 | return scm_instance.get_diff(rev1, rev2, path=path, |
|
260 | return scm_instance.get_diff(rev1, rev2, path=path, | |
261 | ignore_whitespace=ignore_whitespace, context=context) |
|
261 | ignore_whitespace=ignore_whitespace, context=context) | |
262 | except MemoryError: |
|
262 | except MemoryError: | |
263 | h.flash('MemoryError: Diff is too big', category='error') |
|
263 | h.flash('MemoryError: Diff is too big', category='error') | |
264 | return '' |
|
264 | return '' | |
265 |
|
265 | |||
266 |
|
266 | |||
267 | NEW_FILENODE = 1 |
|
267 | NEW_FILENODE = 1 | |
268 | DEL_FILENODE = 2 |
|
268 | DEL_FILENODE = 2 | |
269 | MOD_FILENODE = 3 |
|
269 | MOD_FILENODE = 3 | |
270 | RENAMED_FILENODE = 4 |
|
270 | RENAMED_FILENODE = 4 | |
271 | COPIED_FILENODE = 5 |
|
271 | COPIED_FILENODE = 5 | |
272 | CHMOD_FILENODE = 6 |
|
272 | CHMOD_FILENODE = 6 | |
273 | BIN_FILENODE = 7 |
|
273 | BIN_FILENODE = 7 | |
274 |
|
274 | |||
275 |
|
275 | |||
276 | class DiffProcessor(object): |
|
276 | class DiffProcessor(object): | |
277 | """ |
|
277 | """ | |
278 | Give it a unified or git diff and it returns a list of the files that were |
|
278 | Give it a unified or git diff and it returns a list of the files that were | |
279 | mentioned in the diff together with a dict of meta information that |
|
279 | mentioned in the diff together with a dict of meta information that | |
280 | can be used to render it in a HTML template. |
|
280 | can be used to render it in a HTML template. | |
281 | """ |
|
281 | """ | |
282 | _diff_git_re = re.compile('^diff --git', re.MULTILINE) |
|
282 | _diff_git_re = re.compile('^diff --git', re.MULTILINE) | |
283 |
|
283 | |||
284 | def __init__(self, diff, vcs='hg', diff_limit=None, inline_diff=True): |
|
284 | def __init__(self, diff, vcs='hg', diff_limit=None, inline_diff=True): | |
285 | """ |
|
285 | """ | |
286 | :param diff: a text in diff format |
|
286 | :param diff: a text in diff format | |
287 | :param vcs: type of version control hg or git |
|
287 | :param vcs: type of version control hg or git | |
288 | :param diff_limit: define the size of diff that is considered "big" |
|
288 | :param diff_limit: define the size of diff that is considered "big" | |
289 | based on that parameter cut off will be triggered, set to None |
|
289 | based on that parameter cut off will be triggered, set to None | |
290 | to show full diff |
|
290 | to show full diff | |
291 | """ |
|
291 | """ | |
292 |
if not isinstance(diff, b |
|
292 | if not isinstance(diff, bytes): | |
293 |
raise Exception('Diff must be |
|
293 | raise Exception('Diff must be bytes - got %s' % type(diff)) | |
294 |
|
294 | |||
295 | self._diff = diff |
|
295 | self._diff = diff | |
296 | self.adds = 0 |
|
296 | self.adds = 0 | |
297 | self.removes = 0 |
|
297 | self.removes = 0 | |
298 | self.diff_limit = diff_limit |
|
298 | self.diff_limit = diff_limit | |
299 | self.limited_diff = False |
|
299 | self.limited_diff = False | |
300 | self.vcs = vcs |
|
300 | self.vcs = vcs | |
301 | self.parsed = self._parse_gitdiff(inline_diff=inline_diff) |
|
301 | self.parsed = self._parse_gitdiff(inline_diff=inline_diff) | |
302 |
|
302 | |||
303 | def _parse_gitdiff(self, inline_diff): |
|
303 | def _parse_gitdiff(self, inline_diff): | |
304 | """Parse self._diff and return a list of dicts with meta info and chunks for each file. |
|
304 | """Parse self._diff and return a list of dicts with meta info and chunks for each file. | |
305 | Might set limited_diff. |
|
305 | Might set limited_diff. | |
306 | Optionally, do an extra pass and to extra markup of one-liner changes. |
|
306 | Optionally, do an extra pass and to extra markup of one-liner changes. | |
307 | """ |
|
307 | """ | |
308 | _files = [] # list of dicts with meta info and chunks |
|
308 | _files = [] # list of dicts with meta info and chunks | |
309 |
|
309 | |||
310 | starts = [m.start() for m in self._diff_git_re.finditer(self._diff)] |
|
310 | starts = [m.start() for m in self._diff_git_re.finditer(self._diff)] | |
311 | starts.append(len(self._diff)) |
|
311 | starts.append(len(self._diff)) | |
312 |
|
312 | |||
313 | for start, end in zip(starts, starts[1:]): |
|
313 | for start, end in zip(starts, starts[1:]): | |
314 | if self.diff_limit and end > self.diff_limit: |
|
314 | if self.diff_limit and end > self.diff_limit: | |
315 | self.limited_diff = True |
|
315 | self.limited_diff = True | |
316 | continue |
|
316 | continue | |
317 |
|
317 | |||
318 | head, diff_lines = _get_header(self.vcs, buffer(self._diff, start, end - start)) |
|
318 | head, diff_lines = _get_header(self.vcs, buffer(self._diff, start, end - start)) | |
319 |
|
319 | |||
320 | op = None |
|
320 | op = None | |
321 | stats = { |
|
321 | stats = { | |
322 | 'added': 0, |
|
322 | 'added': 0, | |
323 | 'deleted': 0, |
|
323 | 'deleted': 0, | |
324 | 'binary': False, |
|
324 | 'binary': False, | |
325 | 'ops': {}, |
|
325 | 'ops': {}, | |
326 | } |
|
326 | } | |
327 |
|
327 | |||
328 | if head['deleted_file_mode']: |
|
328 | if head['deleted_file_mode']: | |
329 | op = 'removed' |
|
329 | op = 'removed' | |
330 | stats['binary'] = True |
|
330 | stats['binary'] = True | |
331 | stats['ops'][DEL_FILENODE] = 'deleted file' |
|
331 | stats['ops'][DEL_FILENODE] = 'deleted file' | |
332 |
|
332 | |||
333 | elif head['new_file_mode']: |
|
333 | elif head['new_file_mode']: | |
334 | op = 'added' |
|
334 | op = 'added' | |
335 | stats['binary'] = True |
|
335 | stats['binary'] = True | |
336 | stats['ops'][NEW_FILENODE] = 'new file %s' % head['new_file_mode'] |
|
336 | stats['ops'][NEW_FILENODE] = 'new file %s' % head['new_file_mode'] | |
337 | else: # modify operation, can be cp, rename, chmod |
|
337 | else: # modify operation, can be cp, rename, chmod | |
338 | # CHMOD |
|
338 | # CHMOD | |
339 | if head['new_mode'] and head['old_mode']: |
|
339 | if head['new_mode'] and head['old_mode']: | |
340 | op = 'modified' |
|
340 | op = 'modified' | |
341 | stats['binary'] = True |
|
341 | stats['binary'] = True | |
342 | stats['ops'][CHMOD_FILENODE] = ('modified file chmod %s => %s' |
|
342 | stats['ops'][CHMOD_FILENODE] = ('modified file chmod %s => %s' | |
343 | % (head['old_mode'], head['new_mode'])) |
|
343 | % (head['old_mode'], head['new_mode'])) | |
344 | # RENAME |
|
344 | # RENAME | |
345 | if (head['rename_from'] and head['rename_to'] |
|
345 | if (head['rename_from'] and head['rename_to'] | |
346 | and head['rename_from'] != head['rename_to']): |
|
346 | and head['rename_from'] != head['rename_to']): | |
347 | op = 'renamed' |
|
347 | op = 'renamed' | |
348 | stats['binary'] = True |
|
348 | stats['binary'] = True | |
349 | stats['ops'][RENAMED_FILENODE] = ('file renamed from %s to %s' |
|
349 | stats['ops'][RENAMED_FILENODE] = ('file renamed from %s to %s' | |
350 | % (head['rename_from'], head['rename_to'])) |
|
350 | % (head['rename_from'], head['rename_to'])) | |
351 | # COPY |
|
351 | # COPY | |
352 | if head.get('copy_from') and head.get('copy_to'): |
|
352 | if head.get('copy_from') and head.get('copy_to'): | |
353 | op = 'modified' |
|
353 | op = 'modified' | |
354 | stats['binary'] = True |
|
354 | stats['binary'] = True | |
355 | stats['ops'][COPIED_FILENODE] = ('file copied from %s to %s' |
|
355 | stats['ops'][COPIED_FILENODE] = ('file copied from %s to %s' | |
356 | % (head['copy_from'], head['copy_to'])) |
|
356 | % (head['copy_from'], head['copy_to'])) | |
357 | # FALL BACK: detect missed old style add or remove |
|
357 | # FALL BACK: detect missed old style add or remove | |
358 | if op is None: |
|
358 | if op is None: | |
359 | if not head['a_file'] and head['b_file']: |
|
359 | if not head['a_file'] and head['b_file']: | |
360 | op = 'added' |
|
360 | op = 'added' | |
361 | stats['binary'] = True |
|
361 | stats['binary'] = True | |
362 | stats['ops'][NEW_FILENODE] = 'new file' |
|
362 | stats['ops'][NEW_FILENODE] = 'new file' | |
363 |
|
363 | |||
364 | elif head['a_file'] and not head['b_file']: |
|
364 | elif head['a_file'] and not head['b_file']: | |
365 | op = 'removed' |
|
365 | op = 'removed' | |
366 | stats['binary'] = True |
|
366 | stats['binary'] = True | |
367 | stats['ops'][DEL_FILENODE] = 'deleted file' |
|
367 | stats['ops'][DEL_FILENODE] = 'deleted file' | |
368 |
|
368 | |||
369 | # it's not ADD not DELETE |
|
369 | # it's not ADD not DELETE | |
370 | if op is None: |
|
370 | if op is None: | |
371 | op = 'modified' |
|
371 | op = 'modified' | |
372 | stats['binary'] = True |
|
372 | stats['binary'] = True | |
373 | stats['ops'][MOD_FILENODE] = 'modified file' |
|
373 | stats['ops'][MOD_FILENODE] = 'modified file' | |
374 |
|
374 | |||
375 | # a real non-binary diff |
|
375 | # a real non-binary diff | |
376 | if head['a_file'] or head['b_file']: |
|
376 | if head['a_file'] or head['b_file']: | |
377 | chunks, added, deleted = _parse_lines(diff_lines) |
|
377 | chunks, added, deleted = _parse_lines(diff_lines) | |
378 | stats['binary'] = False |
|
378 | stats['binary'] = False | |
379 | stats['added'] = added |
|
379 | stats['added'] = added | |
380 | stats['deleted'] = deleted |
|
380 | stats['deleted'] = deleted | |
381 | # explicit mark that it's a modified file |
|
381 | # explicit mark that it's a modified file | |
382 | if op == 'modified': |
|
382 | if op == 'modified': | |
383 | stats['ops'][MOD_FILENODE] = 'modified file' |
|
383 | stats['ops'][MOD_FILENODE] = 'modified file' | |
384 | else: # Git binary patch (or empty diff) |
|
384 | else: # Git binary patch (or empty diff) | |
385 | # Git binary patch |
|
385 | # Git binary patch | |
386 | if head['bin_patch']: |
|
386 | if head['bin_patch']: | |
387 | stats['ops'][BIN_FILENODE] = 'binary diff not shown' |
|
387 | stats['ops'][BIN_FILENODE] = 'binary diff not shown' | |
388 | chunks = [] |
|
388 | chunks = [] | |
389 |
|
389 | |||
390 | if op == 'removed' and chunks: |
|
390 | if op == 'removed' and chunks: | |
391 | # a way of seeing deleted content could perhaps be nice - but |
|
391 | # a way of seeing deleted content could perhaps be nice - but | |
392 | # not with the current UI |
|
392 | # not with the current UI | |
393 | chunks = [] |
|
393 | chunks = [] | |
394 |
|
394 | |||
395 | chunks.insert(0, [{ |
|
395 | chunks.insert(0, [{ | |
396 | 'old_lineno': '', |
|
396 | 'old_lineno': '', | |
397 | 'new_lineno': '', |
|
397 | 'new_lineno': '', | |
398 | 'action': 'context', |
|
398 | 'action': 'context', | |
399 | 'line': msg, |
|
399 | 'line': msg, | |
400 | } for _op, msg in stats['ops'].iteritems() |
|
400 | } for _op, msg in stats['ops'].iteritems() | |
401 | if _op not in [MOD_FILENODE]]) |
|
401 | if _op not in [MOD_FILENODE]]) | |
402 |
|
402 | |||
403 | _files.append({ |
|
403 | _files.append({ | |
404 | 'old_filename': head['a_path'], |
|
404 | 'old_filename': head['a_path'], | |
405 | 'filename': head['b_path'], |
|
405 | 'filename': head['b_path'], | |
406 | 'old_revision': head['a_blob_id'], |
|
406 | 'old_revision': head['a_blob_id'], | |
407 | 'new_revision': head['b_blob_id'], |
|
407 | 'new_revision': head['b_blob_id'], | |
408 | 'chunks': chunks, |
|
408 | 'chunks': chunks, | |
409 | 'operation': op, |
|
409 | 'operation': op, | |
410 | 'stats': stats, |
|
410 | 'stats': stats, | |
411 | }) |
|
411 | }) | |
412 |
|
412 | |||
413 | if not inline_diff: |
|
413 | if not inline_diff: | |
414 | return _files |
|
414 | return _files | |
415 |
|
415 | |||
416 | # highlight inline changes when one del is followed by one add |
|
416 | # highlight inline changes when one del is followed by one add | |
417 | for diff_data in _files: |
|
417 | for diff_data in _files: | |
418 | for chunk in diff_data['chunks']: |
|
418 | for chunk in diff_data['chunks']: | |
419 | lineiter = iter(chunk) |
|
419 | lineiter = iter(chunk) | |
420 | try: |
|
420 | try: | |
421 | peekline = lineiter.next() |
|
421 | peekline = lineiter.next() | |
422 | while True: |
|
422 | while True: | |
423 | # find a first del line |
|
423 | # find a first del line | |
424 | while peekline['action'] != 'del': |
|
424 | while peekline['action'] != 'del': | |
425 | peekline = lineiter.next() |
|
425 | peekline = lineiter.next() | |
426 | delline = peekline |
|
426 | delline = peekline | |
427 | peekline = lineiter.next() |
|
427 | peekline = lineiter.next() | |
428 | # if not followed by add, eat all following del lines |
|
428 | # if not followed by add, eat all following del lines | |
429 | if peekline['action'] != 'add': |
|
429 | if peekline['action'] != 'add': | |
430 | while peekline['action'] == 'del': |
|
430 | while peekline['action'] == 'del': | |
431 | peekline = lineiter.next() |
|
431 | peekline = lineiter.next() | |
432 | continue |
|
432 | continue | |
433 | # found an add - make sure it is the only one |
|
433 | # found an add - make sure it is the only one | |
434 | addline = peekline |
|
434 | addline = peekline | |
435 | try: |
|
435 | try: | |
436 | peekline = lineiter.next() |
|
436 | peekline = lineiter.next() | |
437 | except StopIteration: |
|
437 | except StopIteration: | |
438 | # add was last line - ok |
|
438 | # add was last line - ok | |
439 | _highlight_inline_diff(delline, addline) |
|
439 | _highlight_inline_diff(delline, addline) | |
440 | raise |
|
440 | raise | |
441 | if peekline['action'] != 'add': |
|
441 | if peekline['action'] != 'add': | |
442 | # there was only one add line - ok |
|
442 | # there was only one add line - ok | |
443 | _highlight_inline_diff(delline, addline) |
|
443 | _highlight_inline_diff(delline, addline) | |
444 | except StopIteration: |
|
444 | except StopIteration: | |
445 | pass |
|
445 | pass | |
446 |
|
446 | |||
447 | return _files |
|
447 | return _files | |
448 |
|
448 | |||
449 | def stat(self): |
|
449 | def stat(self): | |
450 | """ |
|
450 | """ | |
451 | Returns tuple of added, and removed lines for this instance |
|
451 | Returns tuple of added, and removed lines for this instance | |
452 | """ |
|
452 | """ | |
453 | return self.adds, self.removes |
|
453 | return self.adds, self.removes | |
454 |
|
454 | |||
455 |
|
455 | |||
456 | _escape_re = re.compile(r'(&)|(<)|(>)|(\t)|(\r)|(?<=.)( \n| $)') |
|
456 | _escape_re = re.compile(r'(&)|(<)|(>)|(\t)|(\r)|(?<=.)( \n| $)') | |
457 |
|
457 | |||
458 |
|
458 | |||
459 | def _escaper(string): |
|
459 | def _escaper(string): | |
460 | """ |
|
460 | """ | |
461 | Do HTML escaping/markup |
|
461 | Do HTML escaping/markup | |
462 | """ |
|
462 | """ | |
463 |
|
463 | |||
464 | def substitute(m): |
|
464 | def substitute(m): | |
465 | groups = m.groups() |
|
465 | groups = m.groups() | |
466 | if groups[0]: |
|
466 | if groups[0]: | |
467 | return '&' |
|
467 | return '&' | |
468 | if groups[1]: |
|
468 | if groups[1]: | |
469 | return '<' |
|
469 | return '<' | |
470 | if groups[2]: |
|
470 | if groups[2]: | |
471 | return '>' |
|
471 | return '>' | |
472 | if groups[3]: |
|
472 | if groups[3]: | |
473 | return '<u>\t</u>' |
|
473 | return '<u>\t</u>' | |
474 | if groups[4]: |
|
474 | if groups[4]: | |
475 | return '<u class="cr"></u>' |
|
475 | return '<u class="cr"></u>' | |
476 | if groups[5]: |
|
476 | if groups[5]: | |
477 | return ' <i></i>' |
|
477 | return ' <i></i>' | |
478 | assert False |
|
478 | assert False | |
479 |
|
479 | |||
480 | return _escape_re.sub(substitute, safe_unicode(string)) |
|
480 | return _escape_re.sub(substitute, safe_unicode(string)) | |
481 |
|
481 | |||
482 |
|
482 | |||
483 | _git_header_re = re.compile(r""" |
|
483 | _git_header_re = re.compile(r""" | |
484 | ^diff[ ]--git[ ]a/(?P<a_path>.+?)[ ]b/(?P<b_path>.+?)\n |
|
484 | ^diff[ ]--git[ ]a/(?P<a_path>.+?)[ ]b/(?P<b_path>.+?)\n | |
485 | (?:^old[ ]mode[ ](?P<old_mode>\d+)\n |
|
485 | (?:^old[ ]mode[ ](?P<old_mode>\d+)\n | |
486 | ^new[ ]mode[ ](?P<new_mode>\d+)(?:\n|$))? |
|
486 | ^new[ ]mode[ ](?P<new_mode>\d+)(?:\n|$))? | |
487 | (?:^similarity[ ]index[ ](?P<similarity_index>\d+)%\n |
|
487 | (?:^similarity[ ]index[ ](?P<similarity_index>\d+)%\n | |
488 | ^rename[ ]from[ ](?P<rename_from>.+)\n |
|
488 | ^rename[ ]from[ ](?P<rename_from>.+)\n | |
489 | ^rename[ ]to[ ](?P<rename_to>.+)(?:\n|$))? |
|
489 | ^rename[ ]to[ ](?P<rename_to>.+)(?:\n|$))? | |
490 | (?:^new[ ]file[ ]mode[ ](?P<new_file_mode>.+)(?:\n|$))? |
|
490 | (?:^new[ ]file[ ]mode[ ](?P<new_file_mode>.+)(?:\n|$))? | |
491 | (?:^deleted[ ]file[ ]mode[ ](?P<deleted_file_mode>.+)(?:\n|$))? |
|
491 | (?:^deleted[ ]file[ ]mode[ ](?P<deleted_file_mode>.+)(?:\n|$))? | |
492 | (?:^index[ ](?P<a_blob_id>[0-9A-Fa-f]+) |
|
492 | (?:^index[ ](?P<a_blob_id>[0-9A-Fa-f]+) | |
493 | \.\.(?P<b_blob_id>[0-9A-Fa-f]+)[ ]?(?P<b_mode>.+)?(?:\n|$))? |
|
493 | \.\.(?P<b_blob_id>[0-9A-Fa-f]+)[ ]?(?P<b_mode>.+)?(?:\n|$))? | |
494 | (?:^(?P<bin_patch>GIT[ ]binary[ ]patch)(?:\n|$))? |
|
494 | (?:^(?P<bin_patch>GIT[ ]binary[ ]patch)(?:\n|$))? | |
495 | (?:^---[ ](a/(?P<a_file>.+?)|/dev/null)\t?(?:\n|$))? |
|
495 | (?:^---[ ](a/(?P<a_file>.+?)|/dev/null)\t?(?:\n|$))? | |
496 | (?:^\+\+\+[ ](b/(?P<b_file>.+?)|/dev/null)\t?(?:\n|$))? |
|
496 | (?:^\+\+\+[ ](b/(?P<b_file>.+?)|/dev/null)\t?(?:\n|$))? | |
497 | """, re.VERBOSE | re.MULTILINE) |
|
497 | """, re.VERBOSE | re.MULTILINE) | |
498 |
|
498 | |||
499 |
|
499 | |||
500 | _hg_header_re = re.compile(r""" |
|
500 | _hg_header_re = re.compile(r""" | |
501 | ^diff[ ]--git[ ]a/(?P<a_path>.+?)[ ]b/(?P<b_path>.+?)\n |
|
501 | ^diff[ ]--git[ ]a/(?P<a_path>.+?)[ ]b/(?P<b_path>.+?)\n | |
502 | (?:^old[ ]mode[ ](?P<old_mode>\d+)\n |
|
502 | (?:^old[ ]mode[ ](?P<old_mode>\d+)\n | |
503 | ^new[ ]mode[ ](?P<new_mode>\d+)(?:\n|$))? |
|
503 | ^new[ ]mode[ ](?P<new_mode>\d+)(?:\n|$))? | |
504 | (?:^similarity[ ]index[ ](?P<similarity_index>\d+)%(?:\n|$))? |
|
504 | (?:^similarity[ ]index[ ](?P<similarity_index>\d+)%(?:\n|$))? | |
505 | (?:^rename[ ]from[ ](?P<rename_from>.+)\n |
|
505 | (?:^rename[ ]from[ ](?P<rename_from>.+)\n | |
506 | ^rename[ ]to[ ](?P<rename_to>.+)(?:\n|$))? |
|
506 | ^rename[ ]to[ ](?P<rename_to>.+)(?:\n|$))? | |
507 | (?:^copy[ ]from[ ](?P<copy_from>.+)\n |
|
507 | (?:^copy[ ]from[ ](?P<copy_from>.+)\n | |
508 | ^copy[ ]to[ ](?P<copy_to>.+)(?:\n|$))? |
|
508 | ^copy[ ]to[ ](?P<copy_to>.+)(?:\n|$))? | |
509 | (?:^new[ ]file[ ]mode[ ](?P<new_file_mode>.+)(?:\n|$))? |
|
509 | (?:^new[ ]file[ ]mode[ ](?P<new_file_mode>.+)(?:\n|$))? | |
510 | (?:^deleted[ ]file[ ]mode[ ](?P<deleted_file_mode>.+)(?:\n|$))? |
|
510 | (?:^deleted[ ]file[ ]mode[ ](?P<deleted_file_mode>.+)(?:\n|$))? | |
511 | (?:^index[ ](?P<a_blob_id>[0-9A-Fa-f]+) |
|
511 | (?:^index[ ](?P<a_blob_id>[0-9A-Fa-f]+) | |
512 | \.\.(?P<b_blob_id>[0-9A-Fa-f]+)[ ]?(?P<b_mode>.+)?(?:\n|$))? |
|
512 | \.\.(?P<b_blob_id>[0-9A-Fa-f]+)[ ]?(?P<b_mode>.+)?(?:\n|$))? | |
513 | (?:^(?P<bin_patch>GIT[ ]binary[ ]patch)(?:\n|$))? |
|
513 | (?:^(?P<bin_patch>GIT[ ]binary[ ]patch)(?:\n|$))? | |
514 | (?:^---[ ](a/(?P<a_file>.+?)|/dev/null)\t?(?:\n|$))? |
|
514 | (?:^---[ ](a/(?P<a_file>.+?)|/dev/null)\t?(?:\n|$))? | |
515 | (?:^\+\+\+[ ](b/(?P<b_file>.+?)|/dev/null)\t?(?:\n|$))? |
|
515 | (?:^\+\+\+[ ](b/(?P<b_file>.+?)|/dev/null)\t?(?:\n|$))? | |
516 | """, re.VERBOSE | re.MULTILINE) |
|
516 | """, re.VERBOSE | re.MULTILINE) | |
517 |
|
517 | |||
518 |
|
518 | |||
|
519 | _header_next_check = re.compile(br'''(?!@)(?!literal )(?!delta )''') | |||
|
520 | ||||
|
521 | ||||
519 | def _get_header(vcs, diff_chunk): |
|
522 | def _get_header(vcs, diff_chunk): | |
520 | """ |
|
523 | """ | |
521 | Parses a Git diff for a single file (header and chunks) and returns a tuple with: |
|
524 | Parses a Git diff for a single file (header and chunks) and returns a tuple with: | |
522 |
|
525 | |||
523 | 1. A dict with meta info: |
|
526 | 1. A dict with meta info: | |
524 |
|
527 | |||
525 | a_path, b_path, similarity_index, rename_from, rename_to, |
|
528 | a_path, b_path, similarity_index, rename_from, rename_to, | |
526 | old_mode, new_mode, new_file_mode, deleted_file_mode, |
|
529 | old_mode, new_mode, new_file_mode, deleted_file_mode, | |
527 | a_blob_id, b_blob_id, b_mode, a_file, b_file |
|
530 | a_blob_id, b_blob_id, b_mode, a_file, b_file | |
528 |
|
531 | |||
529 | 2. An iterator yielding lines with simple HTML markup. |
|
532 | 2. An iterator yielding lines with simple HTML markup. | |
530 | """ |
|
533 | """ | |
531 | match = None |
|
534 | match = None | |
532 | if vcs == 'git': |
|
535 | if vcs == 'git': | |
533 | match = _git_header_re.match(diff_chunk) |
|
536 | match = _git_header_re.match(diff_chunk) | |
534 | elif vcs == 'hg': |
|
537 | elif vcs == 'hg': | |
535 | match = _hg_header_re.match(diff_chunk) |
|
538 | match = _hg_header_re.match(diff_chunk) | |
536 | if match is None: |
|
539 | if match is None: | |
537 | raise Exception('diff not recognized as valid %s diff' % vcs) |
|
540 | raise Exception('diff not recognized as valid %s diff' % vcs) | |
538 | meta_info = match.groupdict() |
|
541 | meta_info = match.groupdict() | |
539 | rest = diff_chunk[match.end():] |
|
542 | rest = diff_chunk[match.end():] | |
540 | if rest and not rest.startswith('@') and not rest.startswith('literal ') and not rest.startswith('delta '): |
|
543 | if rest and _header_next_check.match(rest): | |
541 | raise Exception('cannot parse %s diff header: %r followed by %r' % (vcs, diff_chunk[:match.end()], rest[:1000])) |
|
544 | raise Exception('cannot parse %s diff header: %r followed by %r' % (vcs, diff_chunk[:match.end()], rest[:1000])) | |
542 | diff_lines = (_escaper(m.group(0)) for m in re.finditer(r'.*\n|.+$', rest)) # don't split on \r as str.splitlines do |
|
545 | diff_lines = (_escaper(m.group(0)) for m in re.finditer(r'.*\n|.+$', rest)) # don't split on \r as str.splitlines do | |
543 | return meta_info, diff_lines |
|
546 | return meta_info, diff_lines | |
544 |
|
547 | |||
545 |
|
548 | |||
546 | _chunk_re = re.compile(r'^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@(.*)') |
|
549 | _chunk_re = re.compile(r'^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@(.*)') | |
547 | _newline_marker = re.compile(r'^\\ No newline at end of file') |
|
550 | _newline_marker = re.compile(r'^\\ No newline at end of file') | |
548 |
|
551 | |||
549 |
|
552 | |||
550 | def _parse_lines(diff_lines): |
|
553 | def _parse_lines(diff_lines): | |
551 | """ |
|
554 | """ | |
552 | Given an iterator of diff body lines, parse them and return a dict per |
|
555 | Given an iterator of diff body lines, parse them and return a dict per | |
553 | line and added/removed totals. |
|
556 | line and added/removed totals. | |
554 | """ |
|
557 | """ | |
555 | added = deleted = 0 |
|
558 | added = deleted = 0 | |
556 | old_line = old_end = new_line = new_end = None |
|
559 | old_line = old_end = new_line = new_end = None | |
557 |
|
560 | |||
558 | try: |
|
561 | try: | |
559 | chunks = [] |
|
562 | chunks = [] | |
560 | line = diff_lines.next() |
|
563 | line = diff_lines.next() | |
561 |
|
564 | |||
562 | while True: |
|
565 | while True: | |
563 | lines = [] |
|
566 | lines = [] | |
564 | chunks.append(lines) |
|
567 | chunks.append(lines) | |
565 |
|
568 | |||
566 | match = _chunk_re.match(line) |
|
569 | match = _chunk_re.match(line) | |
567 |
|
570 | |||
568 | if not match: |
|
571 | if not match: | |
569 | raise Exception('error parsing diff @@ line %r' % line) |
|
572 | raise Exception('error parsing diff @@ line %r' % line) | |
570 |
|
573 | |||
571 | gr = match.groups() |
|
574 | gr = match.groups() | |
572 | (old_line, old_end, |
|
575 | (old_line, old_end, | |
573 | new_line, new_end) = [int(x or 1) for x in gr[:-1]] |
|
576 | new_line, new_end) = [int(x or 1) for x in gr[:-1]] | |
574 | old_line -= 1 |
|
577 | old_line -= 1 | |
575 | new_line -= 1 |
|
578 | new_line -= 1 | |
576 |
|
579 | |||
577 | context = len(gr) == 5 |
|
580 | context = len(gr) == 5 | |
578 | old_end += old_line |
|
581 | old_end += old_line | |
579 | new_end += new_line |
|
582 | new_end += new_line | |
580 |
|
583 | |||
581 | if context: |
|
584 | if context: | |
582 | # skip context only if it's first line |
|
585 | # skip context only if it's first line | |
583 | if int(gr[0]) > 1: |
|
586 | if int(gr[0]) > 1: | |
584 | lines.append({ |
|
587 | lines.append({ | |
585 | 'old_lineno': '...', |
|
588 | 'old_lineno': '...', | |
586 | 'new_lineno': '...', |
|
589 | 'new_lineno': '...', | |
587 | 'action': 'context', |
|
590 | 'action': 'context', | |
588 | 'line': line, |
|
591 | 'line': line, | |
589 | }) |
|
592 | }) | |
590 |
|
593 | |||
591 | line = diff_lines.next() |
|
594 | line = diff_lines.next() | |
592 |
|
595 | |||
593 | while old_line < old_end or new_line < new_end: |
|
596 | while old_line < old_end or new_line < new_end: | |
594 | if not line: |
|
597 | if not line: | |
595 | raise Exception('error parsing diff - empty line at -%s+%s' % (old_line, new_line)) |
|
598 | raise Exception('error parsing diff - empty line at -%s+%s' % (old_line, new_line)) | |
596 |
|
599 | |||
597 | affects_old = affects_new = False |
|
600 | affects_old = affects_new = False | |
598 |
|
601 | |||
599 | command = line[0] |
|
602 | command = line[0] | |
600 | if command == '+': |
|
603 | if command == '+': | |
601 | affects_new = True |
|
604 | affects_new = True | |
602 | action = 'add' |
|
605 | action = 'add' | |
603 | added += 1 |
|
606 | added += 1 | |
604 | elif command == '-': |
|
607 | elif command == '-': | |
605 | affects_old = True |
|
608 | affects_old = True | |
606 | action = 'del' |
|
609 | action = 'del' | |
607 | deleted += 1 |
|
610 | deleted += 1 | |
608 | elif command == ' ': |
|
611 | elif command == ' ': | |
609 | affects_old = affects_new = True |
|
612 | affects_old = affects_new = True | |
610 | action = 'unmod' |
|
613 | action = 'unmod' | |
611 | else: |
|
614 | else: | |
612 | raise Exception('error parsing diff - unknown command in line %r at -%s+%s' % (line, old_line, new_line)) |
|
615 | raise Exception('error parsing diff - unknown command in line %r at -%s+%s' % (line, old_line, new_line)) | |
613 |
|
616 | |||
614 | if not _newline_marker.match(line): |
|
617 | if not _newline_marker.match(line): | |
615 | old_line += affects_old |
|
618 | old_line += affects_old | |
616 | new_line += affects_new |
|
619 | new_line += affects_new | |
617 | lines.append({ |
|
620 | lines.append({ | |
618 | 'old_lineno': affects_old and old_line or '', |
|
621 | 'old_lineno': affects_old and old_line or '', | |
619 | 'new_lineno': affects_new and new_line or '', |
|
622 | 'new_lineno': affects_new and new_line or '', | |
620 | 'action': action, |
|
623 | 'action': action, | |
621 | 'line': line[1:], |
|
624 | 'line': line[1:], | |
622 | }) |
|
625 | }) | |
623 |
|
626 | |||
624 | line = diff_lines.next() |
|
627 | line = diff_lines.next() | |
625 |
|
628 | |||
626 | if _newline_marker.match(line): |
|
629 | if _newline_marker.match(line): | |
627 | # we need to append to lines, since this is not |
|
630 | # we need to append to lines, since this is not | |
628 | # counted in the line specs of diff |
|
631 | # counted in the line specs of diff | |
629 | lines.append({ |
|
632 | lines.append({ | |
630 | 'old_lineno': '...', |
|
633 | 'old_lineno': '...', | |
631 | 'new_lineno': '...', |
|
634 | 'new_lineno': '...', | |
632 | 'action': 'context', |
|
635 | 'action': 'context', | |
633 | 'line': line, |
|
636 | 'line': line, | |
634 | }) |
|
637 | }) | |
635 | line = diff_lines.next() |
|
638 | line = diff_lines.next() | |
636 | if old_line > old_end: |
|
639 | if old_line > old_end: | |
637 | raise Exception('error parsing diff - more than %s "-" lines at -%s+%s' % (old_end, old_line, new_line)) |
|
640 | raise Exception('error parsing diff - more than %s "-" lines at -%s+%s' % (old_end, old_line, new_line)) | |
638 | if new_line > new_end: |
|
641 | if new_line > new_end: | |
639 | raise Exception('error parsing diff - more than %s "+" lines at -%s+%s' % (new_end, old_line, new_line)) |
|
642 | raise Exception('error parsing diff - more than %s "+" lines at -%s+%s' % (new_end, old_line, new_line)) | |
640 | except StopIteration: |
|
643 | except StopIteration: | |
641 | pass |
|
644 | pass | |
642 | if old_line != old_end or new_line != new_end: |
|
645 | if old_line != old_end or new_line != new_end: | |
643 | raise Exception('diff processing broken when old %s<>%s or new %s<>%s line %r' % (old_line, old_end, new_line, new_end, line)) |
|
646 | raise Exception('diff processing broken when old %s<>%s or new %s<>%s line %r' % (old_line, old_end, new_line, new_end, line)) | |
644 |
|
647 | |||
645 | return chunks, added, deleted |
|
648 | return chunks, added, deleted | |
646 |
|
649 | |||
647 | # Used for inline highlighter word split, must match the substitutions in _escaper |
|
650 | # Used for inline highlighter word split, must match the substitutions in _escaper | |
648 | _token_re = re.compile(r'()(&|<|>|<u>\t</u>|<u class="cr"></u>| <i></i>|\W+?)') |
|
651 | _token_re = re.compile(r'()(&|<|>|<u>\t</u>|<u class="cr"></u>| <i></i>|\W+?)') | |
649 |
|
652 | |||
650 |
|
653 | |||
651 | def _highlight_inline_diff(old, new): |
|
654 | def _highlight_inline_diff(old, new): | |
652 | """ |
|
655 | """ | |
653 | Highlight simple add/remove in two lines given as info dicts. They are |
|
656 | Highlight simple add/remove in two lines given as info dicts. They are | |
654 | modified in place and given markup with <del>/<ins>. |
|
657 | modified in place and given markup with <del>/<ins>. | |
655 | """ |
|
658 | """ | |
656 | assert old['action'] == 'del' |
|
659 | assert old['action'] == 'del' | |
657 | assert new['action'] == 'add' |
|
660 | assert new['action'] == 'add' | |
658 |
|
661 | |||
659 | oldwords = _token_re.split(old['line']) |
|
662 | oldwords = _token_re.split(old['line']) | |
660 | newwords = _token_re.split(new['line']) |
|
663 | newwords = _token_re.split(new['line']) | |
661 | sequence = difflib.SequenceMatcher(None, oldwords, newwords) |
|
664 | sequence = difflib.SequenceMatcher(None, oldwords, newwords) | |
662 |
|
665 | |||
663 | oldfragments, newfragments = [], [] |
|
666 | oldfragments, newfragments = [], [] | |
664 | for tag, i1, i2, j1, j2 in sequence.get_opcodes(): |
|
667 | for tag, i1, i2, j1, j2 in sequence.get_opcodes(): | |
665 | oldfrag = ''.join(oldwords[i1:i2]) |
|
668 | oldfrag = ''.join(oldwords[i1:i2]) | |
666 | newfrag = ''.join(newwords[j1:j2]) |
|
669 | newfrag = ''.join(newwords[j1:j2]) | |
667 | if tag != 'equal': |
|
670 | if tag != 'equal': | |
668 | if oldfrag: |
|
671 | if oldfrag: | |
669 | oldfrag = '<del>%s</del>' % oldfrag |
|
672 | oldfrag = '<del>%s</del>' % oldfrag | |
670 | if newfrag: |
|
673 | if newfrag: | |
671 | newfrag = '<ins>%s</ins>' % newfrag |
|
674 | newfrag = '<ins>%s</ins>' % newfrag | |
672 | oldfragments.append(oldfrag) |
|
675 | oldfragments.append(oldfrag) | |
673 | newfragments.append(newfrag) |
|
676 | newfragments.append(newfrag) | |
674 |
|
677 | |||
675 | old['line'] = "".join(oldfragments) |
|
678 | old['line'] = "".join(oldfragments) | |
676 | new['line'] = "".join(newfragments) |
|
679 | new['line'] = "".join(newfragments) |
@@ -1,1325 +1,1325 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 | # This program is free software: you can redistribute it and/or modify |
|
2 | # This program is free software: you can redistribute it and/or modify | |
3 | # it under the terms of the GNU General Public License as published by |
|
3 | # it under the terms of the GNU General Public License as published by | |
4 | # the Free Software Foundation, either version 3 of the License, or |
|
4 | # the Free Software Foundation, either version 3 of the License, or | |
5 | # (at your option) any later version. |
|
5 | # (at your option) any later version. | |
6 | # |
|
6 | # | |
7 | # This program is distributed in the hope that it will be useful, |
|
7 | # This program is distributed in the hope that it will be useful, | |
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | # GNU General Public License for more details. |
|
10 | # GNU General Public License for more details. | |
11 | # |
|
11 | # | |
12 | # You should have received a copy of the GNU General Public License |
|
12 | # You should have received a copy of the GNU General Public License | |
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
14 | """ |
|
14 | """ | |
15 | Helper functions |
|
15 | Helper functions | |
16 |
|
16 | |||
17 | Consists of functions to typically be used within templates, but also |
|
17 | Consists of functions to typically be used within templates, but also | |
18 | available to Controllers. This module is available to both as 'h'. |
|
18 | available to Controllers. This module is available to both as 'h'. | |
19 | """ |
|
19 | """ | |
20 | import hashlib |
|
20 | import hashlib | |
21 | import json |
|
21 | import json | |
22 | import logging |
|
22 | import logging | |
23 | import random |
|
23 | import random | |
24 | import re |
|
24 | import re | |
25 | import textwrap |
|
25 | import textwrap | |
26 | import urlparse |
|
26 | import urlparse | |
27 |
|
27 | |||
28 | from beaker.cache import cache_region |
|
28 | from beaker.cache import cache_region | |
29 | from pygments import highlight as code_highlight |
|
29 | from pygments import highlight as code_highlight | |
30 | from pygments.formatters.html import HtmlFormatter |
|
30 | from pygments.formatters.html import HtmlFormatter | |
31 | from tg.i18n import ugettext as _ |
|
31 | from tg.i18n import ugettext as _ | |
32 | from webhelpers2.html import HTML, escape, literal |
|
32 | from webhelpers2.html import HTML, escape, literal | |
33 | from webhelpers2.html.tags import NotGiven, Option, Options, _input, _make_safe_id_component, checkbox, end_form |
|
33 | from webhelpers2.html.tags import NotGiven, Option, Options, _input, _make_safe_id_component, checkbox, end_form | |
34 | from webhelpers2.html.tags import form as insecure_form |
|
34 | from webhelpers2.html.tags import form as insecure_form | |
35 | from webhelpers2.html.tags import hidden, link_to, password, radio |
|
35 | from webhelpers2.html.tags import hidden, link_to, password, radio | |
36 | from webhelpers2.html.tags import select as webhelpers2_select |
|
36 | from webhelpers2.html.tags import select as webhelpers2_select | |
37 | from webhelpers2.html.tags import submit, text, textarea |
|
37 | from webhelpers2.html.tags import submit, text, textarea | |
38 | from webhelpers2.number import format_byte_size |
|
38 | from webhelpers2.number import format_byte_size | |
39 | from webhelpers2.text import chop_at, truncate, wrap_paragraphs |
|
39 | from webhelpers2.text import chop_at, truncate, wrap_paragraphs | |
40 |
|
40 | |||
41 | from kallithea.config.routing import url |
|
41 | from kallithea.config.routing import url | |
42 | from kallithea.lib.annotate import annotate_highlight |
|
42 | from kallithea.lib.annotate import annotate_highlight | |
43 | #============================================================================== |
|
43 | #============================================================================== | |
44 | # PERMS |
|
44 | # PERMS | |
45 | #============================================================================== |
|
45 | #============================================================================== | |
46 | from kallithea.lib.auth import HasPermissionAny, HasRepoGroupPermissionLevel, HasRepoPermissionLevel |
|
46 | from kallithea.lib.auth import HasPermissionAny, HasRepoGroupPermissionLevel, HasRepoPermissionLevel | |
47 | from kallithea.lib.markup_renderer import url_re |
|
47 | from kallithea.lib.markup_renderer import url_re | |
48 | from kallithea.lib.pygmentsutils import get_custom_lexer |
|
48 | from kallithea.lib.pygmentsutils import get_custom_lexer | |
49 | from kallithea.lib.utils2 import MENTIONS_REGEX, AttributeDict |
|
49 | from kallithea.lib.utils2 import MENTIONS_REGEX, AttributeDict | |
50 | from kallithea.lib.utils2 import age as _age |
|
50 | from kallithea.lib.utils2 import age as _age | |
51 | from kallithea.lib.utils2 import credentials_filter, safe_int, safe_str, safe_unicode, str2bool, time_to_datetime |
|
51 | from kallithea.lib.utils2 import credentials_filter, safe_int, safe_str, safe_unicode, str2bool, time_to_datetime | |
52 | from kallithea.lib.vcs.backends.base import BaseChangeset, EmptyChangeset |
|
52 | from kallithea.lib.vcs.backends.base import BaseChangeset, EmptyChangeset | |
53 | from kallithea.lib.vcs.exceptions import ChangesetDoesNotExistError |
|
53 | from kallithea.lib.vcs.exceptions import ChangesetDoesNotExistError | |
54 | #============================================================================== |
|
54 | #============================================================================== | |
55 | # SCM FILTERS available via h. |
|
55 | # SCM FILTERS available via h. | |
56 | #============================================================================== |
|
56 | #============================================================================== | |
57 | from kallithea.lib.vcs.utils import author_email, author_name |
|
57 | from kallithea.lib.vcs.utils import author_email, author_name | |
58 |
|
58 | |||
59 |
|
59 | |||
60 | log = logging.getLogger(__name__) |
|
60 | log = logging.getLogger(__name__) | |
61 |
|
61 | |||
62 |
|
62 | |||
63 | def canonical_url(*args, **kargs): |
|
63 | def canonical_url(*args, **kargs): | |
64 | '''Like url(x, qualified=True), but returns url that not only is qualified |
|
64 | '''Like url(x, qualified=True), but returns url that not only is qualified | |
65 | but also canonical, as configured in canonical_url''' |
|
65 | but also canonical, as configured in canonical_url''' | |
66 | from kallithea import CONFIG |
|
66 | from kallithea import CONFIG | |
67 | try: |
|
67 | try: | |
68 | parts = CONFIG.get('canonical_url', '').split('://', 1) |
|
68 | parts = CONFIG.get('canonical_url', '').split('://', 1) | |
69 | kargs['host'] = parts[1] |
|
69 | kargs['host'] = parts[1] | |
70 | kargs['protocol'] = parts[0] |
|
70 | kargs['protocol'] = parts[0] | |
71 | except IndexError: |
|
71 | except IndexError: | |
72 | kargs['qualified'] = True |
|
72 | kargs['qualified'] = True | |
73 | return url(*args, **kargs) |
|
73 | return url(*args, **kargs) | |
74 |
|
74 | |||
75 |
|
75 | |||
76 | def canonical_hostname(): |
|
76 | def canonical_hostname(): | |
77 | '''Return canonical hostname of system''' |
|
77 | '''Return canonical hostname of system''' | |
78 | from kallithea import CONFIG |
|
78 | from kallithea import CONFIG | |
79 | try: |
|
79 | try: | |
80 | parts = CONFIG.get('canonical_url', '').split('://', 1) |
|
80 | parts = CONFIG.get('canonical_url', '').split('://', 1) | |
81 | return parts[1].split('/', 1)[0] |
|
81 | return parts[1].split('/', 1)[0] | |
82 | except IndexError: |
|
82 | except IndexError: | |
83 | parts = url('home', qualified=True).split('://', 1) |
|
83 | parts = url('home', qualified=True).split('://', 1) | |
84 | return parts[1].split('/', 1)[0] |
|
84 | return parts[1].split('/', 1)[0] | |
85 |
|
85 | |||
86 |
|
86 | |||
87 | def html_escape(s): |
|
87 | def html_escape(s): | |
88 | """Return string with all html escaped. |
|
88 | """Return string with all html escaped. | |
89 | This is also safe for javascript in html but not necessarily correct. |
|
89 | This is also safe for javascript in html but not necessarily correct. | |
90 | """ |
|
90 | """ | |
91 | return (s |
|
91 | return (s | |
92 | .replace('&', '&') |
|
92 | .replace('&', '&') | |
93 | .replace(">", ">") |
|
93 | .replace(">", ">") | |
94 | .replace("<", "<") |
|
94 | .replace("<", "<") | |
95 | .replace('"', """) |
|
95 | .replace('"', """) | |
96 | .replace("'", "'") # Note: this is HTML5 not HTML4 and might not work in mails |
|
96 | .replace("'", "'") # Note: this is HTML5 not HTML4 and might not work in mails | |
97 | ) |
|
97 | ) | |
98 |
|
98 | |||
99 | def js(value): |
|
99 | def js(value): | |
100 | """Convert Python value to the corresponding JavaScript representation. |
|
100 | """Convert Python value to the corresponding JavaScript representation. | |
101 |
|
101 | |||
102 | This is necessary to safely insert arbitrary values into HTML <script> |
|
102 | This is necessary to safely insert arbitrary values into HTML <script> | |
103 | sections e.g. using Mako template expression substitution. |
|
103 | sections e.g. using Mako template expression substitution. | |
104 |
|
104 | |||
105 | Note: Rather than using this function, it's preferable to avoid the |
|
105 | Note: Rather than using this function, it's preferable to avoid the | |
106 | insertion of values into HTML <script> sections altogether. Instead, |
|
106 | insertion of values into HTML <script> sections altogether. Instead, | |
107 | data should (to the extent possible) be passed to JavaScript using |
|
107 | data should (to the extent possible) be passed to JavaScript using | |
108 | data attributes or AJAX calls, eliminating the need for JS specific |
|
108 | data attributes or AJAX calls, eliminating the need for JS specific | |
109 | escaping. |
|
109 | escaping. | |
110 |
|
110 | |||
111 | Note: This is not safe for use in attributes (e.g. onclick), because |
|
111 | Note: This is not safe for use in attributes (e.g. onclick), because | |
112 | quotes are not escaped. |
|
112 | quotes are not escaped. | |
113 |
|
113 | |||
114 | Because the rules for parsing <script> varies between XHTML (where |
|
114 | Because the rules for parsing <script> varies between XHTML (where | |
115 | normal rules apply for any special characters) and HTML (where |
|
115 | normal rules apply for any special characters) and HTML (where | |
116 | entities are not interpreted, but the literal string "</script>" |
|
116 | entities are not interpreted, but the literal string "</script>" | |
117 | is forbidden), the function ensures that the result never contains |
|
117 | is forbidden), the function ensures that the result never contains | |
118 | '&', '<' and '>', thus making it safe in both those contexts (but |
|
118 | '&', '<' and '>', thus making it safe in both those contexts (but | |
119 | not in attributes). |
|
119 | not in attributes). | |
120 | """ |
|
120 | """ | |
121 | return literal( |
|
121 | return literal( | |
122 | ('(' + json.dumps(value) + ')') |
|
122 | ('(' + json.dumps(value) + ')') | |
123 | # In JSON, the following can only appear in string literals. |
|
123 | # In JSON, the following can only appear in string literals. | |
124 | .replace('&', r'\x26') |
|
124 | .replace('&', r'\x26') | |
125 | .replace('<', r'\x3c') |
|
125 | .replace('<', r'\x3c') | |
126 | .replace('>', r'\x3e') |
|
126 | .replace('>', r'\x3e') | |
127 | ) |
|
127 | ) | |
128 |
|
128 | |||
129 |
|
129 | |||
130 | def jshtml(val): |
|
130 | def jshtml(val): | |
131 | """HTML escapes a string value, then converts the resulting string |
|
131 | """HTML escapes a string value, then converts the resulting string | |
132 | to its corresponding JavaScript representation (see `js`). |
|
132 | to its corresponding JavaScript representation (see `js`). | |
133 |
|
133 | |||
134 | This is used when a plain-text string (possibly containing special |
|
134 | This is used when a plain-text string (possibly containing special | |
135 | HTML characters) will be used by a script in an HTML context (e.g. |
|
135 | HTML characters) will be used by a script in an HTML context (e.g. | |
136 | element.innerHTML or jQuery's 'html' method). |
|
136 | element.innerHTML or jQuery's 'html' method). | |
137 |
|
137 | |||
138 | If in doubt, err on the side of using `jshtml` over `js`, since it's |
|
138 | If in doubt, err on the side of using `jshtml` over `js`, since it's | |
139 | better to escape too much than too little. |
|
139 | better to escape too much than too little. | |
140 | """ |
|
140 | """ | |
141 | return js(escape(val)) |
|
141 | return js(escape(val)) | |
142 |
|
142 | |||
143 |
|
143 | |||
144 | def shorter(s, size=20, firstline=False, postfix='...'): |
|
144 | def shorter(s, size=20, firstline=False, postfix='...'): | |
145 | """Truncate s to size, including the postfix string if truncating. |
|
145 | """Truncate s to size, including the postfix string if truncating. | |
146 | If firstline, truncate at newline. |
|
146 | If firstline, truncate at newline. | |
147 | """ |
|
147 | """ | |
148 | if firstline: |
|
148 | if firstline: | |
149 | s = s.split('\n', 1)[0].rstrip() |
|
149 | s = s.split('\n', 1)[0].rstrip() | |
150 | if len(s) > size: |
|
150 | if len(s) > size: | |
151 | return s[:size - len(postfix)] + postfix |
|
151 | return s[:size - len(postfix)] + postfix | |
152 | return s |
|
152 | return s | |
153 |
|
153 | |||
154 |
|
154 | |||
155 | def reset(name, value, id=NotGiven, **attrs): |
|
155 | def reset(name, value, id=NotGiven, **attrs): | |
156 | """Create a reset button, similar to webhelpers2.html.tags.submit .""" |
|
156 | """Create a reset button, similar to webhelpers2.html.tags.submit .""" | |
157 | return _input("reset", name, value, id, attrs) |
|
157 | return _input("reset", name, value, id, attrs) | |
158 |
|
158 | |||
159 |
|
159 | |||
160 | def select(name, selected_values, options, id=NotGiven, **attrs): |
|
160 | def select(name, selected_values, options, id=NotGiven, **attrs): | |
161 | """Convenient wrapper of webhelpers2 to let it accept options as a tuple list""" |
|
161 | """Convenient wrapper of webhelpers2 to let it accept options as a tuple list""" | |
162 | if isinstance(options, list): |
|
162 | if isinstance(options, list): | |
163 | option_list = options |
|
163 | option_list = options | |
164 | # Handle old value,label lists ... where value also can be value,label lists |
|
164 | # Handle old value,label lists ... where value also can be value,label lists | |
165 | options = Options() |
|
165 | options = Options() | |
166 | for x in option_list: |
|
166 | for x in option_list: | |
167 | if isinstance(x, tuple) and len(x) == 2: |
|
167 | if isinstance(x, tuple) and len(x) == 2: | |
168 | value, label = x |
|
168 | value, label = x | |
169 | elif isinstance(x, basestring): |
|
169 | elif isinstance(x, basestring): | |
170 | value = label = x |
|
170 | value = label = x | |
171 | else: |
|
171 | else: | |
172 | log.error('invalid select option %r', x) |
|
172 | log.error('invalid select option %r', x) | |
173 | raise |
|
173 | raise | |
174 | if isinstance(value, list): |
|
174 | if isinstance(value, list): | |
175 | og = options.add_optgroup(label) |
|
175 | og = options.add_optgroup(label) | |
176 | for x in value: |
|
176 | for x in value: | |
177 | if isinstance(x, tuple) and len(x) == 2: |
|
177 | if isinstance(x, tuple) and len(x) == 2: | |
178 | group_value, group_label = x |
|
178 | group_value, group_label = x | |
179 | elif isinstance(x, basestring): |
|
179 | elif isinstance(x, basestring): | |
180 | group_value = group_label = x |
|
180 | group_value = group_label = x | |
181 | else: |
|
181 | else: | |
182 | log.error('invalid select option %r', x) |
|
182 | log.error('invalid select option %r', x) | |
183 | raise |
|
183 | raise | |
184 | og.add_option(group_label, group_value) |
|
184 | og.add_option(group_label, group_value) | |
185 | else: |
|
185 | else: | |
186 | options.add_option(label, value) |
|
186 | options.add_option(label, value) | |
187 | return webhelpers2_select(name, selected_values, options, id=id, **attrs) |
|
187 | return webhelpers2_select(name, selected_values, options, id=id, **attrs) | |
188 |
|
188 | |||
189 |
|
189 | |||
190 | safeid = _make_safe_id_component |
|
190 | safeid = _make_safe_id_component | |
191 |
|
191 | |||
192 |
|
192 | |||
193 | def FID(raw_id, path): |
|
193 | def FID(raw_id, path): | |
194 | """ |
|
194 | """ | |
195 | Creates a unique ID for filenode based on it's hash of path and revision |
|
195 | Creates a unique ID for filenode based on it's hash of path and revision | |
196 | it's safe to use in urls |
|
196 | it's safe to use in urls | |
197 |
|
197 | |||
198 | :param raw_id: |
|
198 | :param raw_id: | |
199 | :param path: |
|
199 | :param path: | |
200 | """ |
|
200 | """ | |
201 |
|
201 | |||
202 | return 'C-%s-%s' % (short_id(raw_id), hashlib.md5(safe_str(path)).hexdigest()[:12]) |
|
202 | return 'C-%s-%s' % (short_id(raw_id), hashlib.md5(safe_str(path)).hexdigest()[:12]) | |
203 |
|
203 | |||
204 |
|
204 | |||
205 | class _FilesBreadCrumbs(object): |
|
205 | class _FilesBreadCrumbs(object): | |
206 |
|
206 | |||
207 | def __call__(self, repo_name, rev, paths): |
|
207 | def __call__(self, repo_name, rev, paths): | |
208 | if isinstance(paths, str): |
|
208 | if isinstance(paths, str): | |
209 | paths = safe_unicode(paths) |
|
209 | paths = safe_unicode(paths) | |
210 | url_l = [link_to(repo_name, url('files_home', |
|
210 | url_l = [link_to(repo_name, url('files_home', | |
211 | repo_name=repo_name, |
|
211 | repo_name=repo_name, | |
212 | revision=rev, f_path=''), |
|
212 | revision=rev, f_path=''), | |
213 | class_='ypjax-link')] |
|
213 | class_='ypjax-link')] | |
214 | paths_l = paths.split('/') |
|
214 | paths_l = paths.split('/') | |
215 | for cnt, p in enumerate(paths_l): |
|
215 | for cnt, p in enumerate(paths_l): | |
216 | if p != '': |
|
216 | if p != '': | |
217 | url_l.append(link_to(p, |
|
217 | url_l.append(link_to(p, | |
218 | url('files_home', |
|
218 | url('files_home', | |
219 | repo_name=repo_name, |
|
219 | repo_name=repo_name, | |
220 | revision=rev, |
|
220 | revision=rev, | |
221 | f_path='/'.join(paths_l[:cnt + 1]) |
|
221 | f_path='/'.join(paths_l[:cnt + 1]) | |
222 | ), |
|
222 | ), | |
223 | class_='ypjax-link' |
|
223 | class_='ypjax-link' | |
224 | ) |
|
224 | ) | |
225 | ) |
|
225 | ) | |
226 |
|
226 | |||
227 | return literal('/'.join(url_l)) |
|
227 | return literal('/'.join(url_l)) | |
228 |
|
228 | |||
229 |
|
229 | |||
230 | files_breadcrumbs = _FilesBreadCrumbs() |
|
230 | files_breadcrumbs = _FilesBreadCrumbs() | |
231 |
|
231 | |||
232 |
|
232 | |||
233 | class CodeHtmlFormatter(HtmlFormatter): |
|
233 | class CodeHtmlFormatter(HtmlFormatter): | |
234 | """ |
|
234 | """ | |
235 | My code Html Formatter for source codes |
|
235 | My code Html Formatter for source codes | |
236 | """ |
|
236 | """ | |
237 |
|
237 | |||
238 | def wrap(self, source, outfile): |
|
238 | def wrap(self, source, outfile): | |
239 | return self._wrap_div(self._wrap_pre(self._wrap_code(source))) |
|
239 | return self._wrap_div(self._wrap_pre(self._wrap_code(source))) | |
240 |
|
240 | |||
241 | def _wrap_code(self, source): |
|
241 | def _wrap_code(self, source): | |
242 | for cnt, it in enumerate(source): |
|
242 | for cnt, it in enumerate(source): | |
243 | i, t = it |
|
243 | i, t = it | |
244 | t = '<span id="L%s">%s</span>' % (cnt + 1, t) |
|
244 | t = '<span id="L%s">%s</span>' % (cnt + 1, t) | |
245 | yield i, t |
|
245 | yield i, t | |
246 |
|
246 | |||
247 | def _wrap_tablelinenos(self, inner): |
|
247 | def _wrap_tablelinenos(self, inner): | |
248 | inner_lines = [] |
|
248 | inner_lines = [] | |
249 | lncount = 0 |
|
249 | lncount = 0 | |
250 | for t, line in inner: |
|
250 | for t, line in inner: | |
251 | if t: |
|
251 | if t: | |
252 | lncount += 1 |
|
252 | lncount += 1 | |
253 | inner_lines.append(line) |
|
253 | inner_lines.append(line) | |
254 |
|
254 | |||
255 | fl = self.linenostart |
|
255 | fl = self.linenostart | |
256 | mw = len(str(lncount + fl - 1)) |
|
256 | mw = len(str(lncount + fl - 1)) | |
257 | sp = self.linenospecial |
|
257 | sp = self.linenospecial | |
258 | st = self.linenostep |
|
258 | st = self.linenostep | |
259 | la = self.lineanchors |
|
259 | la = self.lineanchors | |
260 | aln = self.anchorlinenos |
|
260 | aln = self.anchorlinenos | |
261 | nocls = self.noclasses |
|
261 | nocls = self.noclasses | |
262 | if sp: |
|
262 | if sp: | |
263 | lines = [] |
|
263 | lines = [] | |
264 |
|
264 | |||
265 | for i in range(fl, fl + lncount): |
|
265 | for i in range(fl, fl + lncount): | |
266 | if i % st == 0: |
|
266 | if i % st == 0: | |
267 | if i % sp == 0: |
|
267 | if i % sp == 0: | |
268 | if aln: |
|
268 | if aln: | |
269 | lines.append('<a href="#%s%d" class="special">%*d</a>' % |
|
269 | lines.append('<a href="#%s%d" class="special">%*d</a>' % | |
270 | (la, i, mw, i)) |
|
270 | (la, i, mw, i)) | |
271 | else: |
|
271 | else: | |
272 | lines.append('<span class="special">%*d</span>' % (mw, i)) |
|
272 | lines.append('<span class="special">%*d</span>' % (mw, i)) | |
273 | else: |
|
273 | else: | |
274 | if aln: |
|
274 | if aln: | |
275 | lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i)) |
|
275 | lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i)) | |
276 | else: |
|
276 | else: | |
277 | lines.append('%*d' % (mw, i)) |
|
277 | lines.append('%*d' % (mw, i)) | |
278 | else: |
|
278 | else: | |
279 | lines.append('') |
|
279 | lines.append('') | |
280 | ls = '\n'.join(lines) |
|
280 | ls = '\n'.join(lines) | |
281 | else: |
|
281 | else: | |
282 | lines = [] |
|
282 | lines = [] | |
283 | for i in range(fl, fl + lncount): |
|
283 | for i in range(fl, fl + lncount): | |
284 | if i % st == 0: |
|
284 | if i % st == 0: | |
285 | if aln: |
|
285 | if aln: | |
286 | lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i)) |
|
286 | lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i)) | |
287 | else: |
|
287 | else: | |
288 | lines.append('%*d' % (mw, i)) |
|
288 | lines.append('%*d' % (mw, i)) | |
289 | else: |
|
289 | else: | |
290 | lines.append('') |
|
290 | lines.append('') | |
291 | ls = '\n'.join(lines) |
|
291 | ls = '\n'.join(lines) | |
292 |
|
292 | |||
293 | # in case you wonder about the seemingly redundant <div> here: since the |
|
293 | # in case you wonder about the seemingly redundant <div> here: since the | |
294 | # content in the other cell also is wrapped in a div, some browsers in |
|
294 | # content in the other cell also is wrapped in a div, some browsers in | |
295 | # some configurations seem to mess up the formatting... |
|
295 | # some configurations seem to mess up the formatting... | |
296 | if nocls: |
|
296 | if nocls: | |
297 | yield 0, ('<table class="%stable">' % self.cssclass + |
|
297 | yield 0, ('<table class="%stable">' % self.cssclass + | |
298 | '<tr><td><div class="linenodiv">' |
|
298 | '<tr><td><div class="linenodiv">' | |
299 | '<pre>' + ls + '</pre></div></td>' |
|
299 | '<pre>' + ls + '</pre></div></td>' | |
300 | '<td id="hlcode" class="code">') |
|
300 | '<td id="hlcode" class="code">') | |
301 | else: |
|
301 | else: | |
302 | yield 0, ('<table class="%stable">' % self.cssclass + |
|
302 | yield 0, ('<table class="%stable">' % self.cssclass + | |
303 | '<tr><td class="linenos"><div class="linenodiv">' |
|
303 | '<tr><td class="linenos"><div class="linenodiv">' | |
304 | '<pre>' + ls + '</pre></div></td>' |
|
304 | '<pre>' + ls + '</pre></div></td>' | |
305 | '<td id="hlcode" class="code">') |
|
305 | '<td id="hlcode" class="code">') | |
306 | yield 0, ''.join(inner_lines) |
|
306 | yield 0, ''.join(inner_lines) | |
307 | yield 0, '</td></tr></table>' |
|
307 | yield 0, '</td></tr></table>' | |
308 |
|
308 | |||
309 |
|
309 | |||
310 | _whitespace_re = re.compile(r'(\t)|( )(?=\n|</div>)') |
|
310 | _whitespace_re = re.compile(r'(\t)|( )(?=\n|</div>)') | |
311 |
|
311 | |||
312 |
|
312 | |||
313 | def _markup_whitespace(m): |
|
313 | def _markup_whitespace(m): | |
314 | groups = m.groups() |
|
314 | groups = m.groups() | |
315 | if groups[0]: |
|
315 | if groups[0]: | |
316 | return '<u>\t</u>' |
|
316 | return '<u>\t</u>' | |
317 | if groups[1]: |
|
317 | if groups[1]: | |
318 | return ' <i></i>' |
|
318 | return ' <i></i>' | |
319 |
|
319 | |||
320 |
|
320 | |||
321 | def markup_whitespace(s): |
|
321 | def markup_whitespace(s): | |
322 | return _whitespace_re.sub(_markup_whitespace, s) |
|
322 | return _whitespace_re.sub(_markup_whitespace, s) | |
323 |
|
323 | |||
324 |
|
324 | |||
325 | def pygmentize(filenode, **kwargs): |
|
325 | def pygmentize(filenode, **kwargs): | |
326 | """ |
|
326 | """ | |
327 | pygmentize function using pygments |
|
327 | pygmentize function using pygments | |
328 |
|
328 | |||
329 | :param filenode: |
|
329 | :param filenode: | |
330 | """ |
|
330 | """ | |
331 | lexer = get_custom_lexer(filenode.extension) or filenode.lexer |
|
331 | lexer = get_custom_lexer(filenode.extension) or filenode.lexer | |
332 | return literal(markup_whitespace( |
|
332 | return literal(markup_whitespace( | |
333 | code_highlight(filenode.content, lexer, CodeHtmlFormatter(**kwargs)))) |
|
333 | code_highlight(safe_unicode(filenode.content), lexer, CodeHtmlFormatter(**kwargs)))) | |
334 |
|
334 | |||
335 |
|
335 | |||
336 | def pygmentize_annotation(repo_name, filenode, **kwargs): |
|
336 | def pygmentize_annotation(repo_name, filenode, **kwargs): | |
337 | """ |
|
337 | """ | |
338 | pygmentize function for annotation |
|
338 | pygmentize function for annotation | |
339 |
|
339 | |||
340 | :param filenode: |
|
340 | :param filenode: | |
341 | """ |
|
341 | """ | |
342 |
|
342 | |||
343 | color_dict = {} |
|
343 | color_dict = {} | |
344 |
|
344 | |||
345 | def gen_color(n=10000): |
|
345 | def gen_color(n=10000): | |
346 | """generator for getting n of evenly distributed colors using |
|
346 | """generator for getting n of evenly distributed colors using | |
347 | hsv color and golden ratio. It always return same order of colors |
|
347 | hsv color and golden ratio. It always return same order of colors | |
348 |
|
348 | |||
349 | :returns: RGB tuple |
|
349 | :returns: RGB tuple | |
350 | """ |
|
350 | """ | |
351 |
|
351 | |||
352 | def hsv_to_rgb(h, s, v): |
|
352 | def hsv_to_rgb(h, s, v): | |
353 | if s == 0.0: |
|
353 | if s == 0.0: | |
354 | return v, v, v |
|
354 | return v, v, v | |
355 | i = int(h * 6.0) # XXX assume int() truncates! |
|
355 | i = int(h * 6.0) # XXX assume int() truncates! | |
356 | f = (h * 6.0) - i |
|
356 | f = (h * 6.0) - i | |
357 | p = v * (1.0 - s) |
|
357 | p = v * (1.0 - s) | |
358 | q = v * (1.0 - s * f) |
|
358 | q = v * (1.0 - s * f) | |
359 | t = v * (1.0 - s * (1.0 - f)) |
|
359 | t = v * (1.0 - s * (1.0 - f)) | |
360 | i = i % 6 |
|
360 | i = i % 6 | |
361 | if i == 0: |
|
361 | if i == 0: | |
362 | return v, t, p |
|
362 | return v, t, p | |
363 | if i == 1: |
|
363 | if i == 1: | |
364 | return q, v, p |
|
364 | return q, v, p | |
365 | if i == 2: |
|
365 | if i == 2: | |
366 | return p, v, t |
|
366 | return p, v, t | |
367 | if i == 3: |
|
367 | if i == 3: | |
368 | return p, q, v |
|
368 | return p, q, v | |
369 | if i == 4: |
|
369 | if i == 4: | |
370 | return t, p, v |
|
370 | return t, p, v | |
371 | if i == 5: |
|
371 | if i == 5: | |
372 | return v, p, q |
|
372 | return v, p, q | |
373 |
|
373 | |||
374 | golden_ratio = 0.618033988749895 |
|
374 | golden_ratio = 0.618033988749895 | |
375 | h = 0.22717784590367374 |
|
375 | h = 0.22717784590367374 | |
376 |
|
376 | |||
377 | for _unused in xrange(n): |
|
377 | for _unused in xrange(n): | |
378 | h += golden_ratio |
|
378 | h += golden_ratio | |
379 | h %= 1 |
|
379 | h %= 1 | |
380 | HSV_tuple = [h, 0.95, 0.95] |
|
380 | HSV_tuple = [h, 0.95, 0.95] | |
381 | RGB_tuple = hsv_to_rgb(*HSV_tuple) |
|
381 | RGB_tuple = hsv_to_rgb(*HSV_tuple) | |
382 | yield [str(int(x * 256)) for x in RGB_tuple] |
|
382 | yield [str(int(x * 256)) for x in RGB_tuple] | |
383 |
|
383 | |||
384 | cgenerator = gen_color() |
|
384 | cgenerator = gen_color() | |
385 |
|
385 | |||
386 | def get_color_string(cs): |
|
386 | def get_color_string(cs): | |
387 | if cs in color_dict: |
|
387 | if cs in color_dict: | |
388 | col = color_dict[cs] |
|
388 | col = color_dict[cs] | |
389 | else: |
|
389 | else: | |
390 | col = color_dict[cs] = cgenerator.next() |
|
390 | col = color_dict[cs] = cgenerator.next() | |
391 | return "color: rgb(%s)! important;" % (', '.join(col)) |
|
391 | return "color: rgb(%s)! important;" % (', '.join(col)) | |
392 |
|
392 | |||
393 | def url_func(repo_name): |
|
393 | def url_func(repo_name): | |
394 |
|
394 | |||
395 | def _url_func(changeset): |
|
395 | def _url_func(changeset): | |
396 | author = escape(changeset.author) |
|
396 | author = escape(changeset.author) | |
397 | date = changeset.date |
|
397 | date = changeset.date | |
398 | message = escape(changeset.message) |
|
398 | message = escape(changeset.message) | |
399 | tooltip_html = ("<b>Author:</b> %s<br/>" |
|
399 | tooltip_html = ("<b>Author:</b> %s<br/>" | |
400 | "<b>Date:</b> %s</b><br/>" |
|
400 | "<b>Date:</b> %s</b><br/>" | |
401 | "<b>Message:</b> %s") % (author, date, message) |
|
401 | "<b>Message:</b> %s") % (author, date, message) | |
402 |
|
402 | |||
403 | lnk_format = show_id(changeset) |
|
403 | lnk_format = show_id(changeset) | |
404 | uri = link_to( |
|
404 | uri = link_to( | |
405 | lnk_format, |
|
405 | lnk_format, | |
406 | url('changeset_home', repo_name=repo_name, |
|
406 | url('changeset_home', repo_name=repo_name, | |
407 | revision=changeset.raw_id), |
|
407 | revision=changeset.raw_id), | |
408 | style=get_color_string(changeset.raw_id), |
|
408 | style=get_color_string(changeset.raw_id), | |
409 | **{'data-toggle': 'popover', |
|
409 | **{'data-toggle': 'popover', | |
410 | 'data-content': tooltip_html} |
|
410 | 'data-content': tooltip_html} | |
411 | ) |
|
411 | ) | |
412 |
|
412 | |||
413 | uri += '\n' |
|
413 | uri += '\n' | |
414 | return uri |
|
414 | return uri | |
415 | return _url_func |
|
415 | return _url_func | |
416 |
|
416 | |||
417 | return literal(markup_whitespace(annotate_highlight(filenode, url_func(repo_name), **kwargs))) |
|
417 | return literal(markup_whitespace(annotate_highlight(filenode, url_func(repo_name), **kwargs))) | |
418 |
|
418 | |||
419 |
|
419 | |||
420 | class _Message(object): |
|
420 | class _Message(object): | |
421 | """A message returned by ``pop_flash_messages()``. |
|
421 | """A message returned by ``pop_flash_messages()``. | |
422 |
|
422 | |||
423 | Converting the message to a string returns the message text. Instances |
|
423 | Converting the message to a string returns the message text. Instances | |
424 | also have the following attributes: |
|
424 | also have the following attributes: | |
425 |
|
425 | |||
426 | * ``message``: the message text. |
|
426 | * ``message``: the message text. | |
427 | * ``category``: the category specified when the message was created. |
|
427 | * ``category``: the category specified when the message was created. | |
428 | """ |
|
428 | """ | |
429 |
|
429 | |||
430 | def __init__(self, category, message): |
|
430 | def __init__(self, category, message): | |
431 | self.category = category |
|
431 | self.category = category | |
432 | self.message = message |
|
432 | self.message = message | |
433 |
|
433 | |||
434 | def __str__(self): |
|
434 | def __str__(self): | |
435 | return self.message |
|
435 | return self.message | |
436 |
|
436 | |||
437 | __unicode__ = __str__ |
|
437 | __unicode__ = __str__ | |
438 |
|
438 | |||
439 | def __html__(self): |
|
439 | def __html__(self): | |
440 | return escape(safe_unicode(self.message)) |
|
440 | return escape(safe_unicode(self.message)) | |
441 |
|
441 | |||
442 |
|
442 | |||
443 | def _session_flash_messages(append=None, clear=False): |
|
443 | def _session_flash_messages(append=None, clear=False): | |
444 | """Manage a message queue in tg.session: return the current message queue |
|
444 | """Manage a message queue in tg.session: return the current message queue | |
445 | after appending the given message, and possibly clearing the queue.""" |
|
445 | after appending the given message, and possibly clearing the queue.""" | |
446 | key = 'flash' |
|
446 | key = 'flash' | |
447 | from tg import session |
|
447 | from tg import session | |
448 | if key in session: |
|
448 | if key in session: | |
449 | flash_messages = session[key] |
|
449 | flash_messages = session[key] | |
450 | else: |
|
450 | else: | |
451 | if append is None: # common fast path - also used for clearing empty queue |
|
451 | if append is None: # common fast path - also used for clearing empty queue | |
452 | return [] # don't bother saving |
|
452 | return [] # don't bother saving | |
453 | flash_messages = [] |
|
453 | flash_messages = [] | |
454 | session[key] = flash_messages |
|
454 | session[key] = flash_messages | |
455 | if append is not None and append not in flash_messages: |
|
455 | if append is not None and append not in flash_messages: | |
456 | flash_messages.append(append) |
|
456 | flash_messages.append(append) | |
457 | if clear: |
|
457 | if clear: | |
458 | session.pop(key, None) |
|
458 | session.pop(key, None) | |
459 | session.save() |
|
459 | session.save() | |
460 | return flash_messages |
|
460 | return flash_messages | |
461 |
|
461 | |||
462 |
|
462 | |||
463 | def flash(message, category=None, logf=None): |
|
463 | def flash(message, category=None, logf=None): | |
464 | """ |
|
464 | """ | |
465 | Show a message to the user _and_ log it through the specified function |
|
465 | Show a message to the user _and_ log it through the specified function | |
466 |
|
466 | |||
467 | category: notice (default), warning, error, success |
|
467 | category: notice (default), warning, error, success | |
468 | logf: a custom log function - such as log.debug |
|
468 | logf: a custom log function - such as log.debug | |
469 |
|
469 | |||
470 | logf defaults to log.info, unless category equals 'success', in which |
|
470 | logf defaults to log.info, unless category equals 'success', in which | |
471 | case logf defaults to log.debug. |
|
471 | case logf defaults to log.debug. | |
472 | """ |
|
472 | """ | |
473 | if logf is None: |
|
473 | if logf is None: | |
474 | logf = log.info |
|
474 | logf = log.info | |
475 | if category == 'success': |
|
475 | if category == 'success': | |
476 | logf = log.debug |
|
476 | logf = log.debug | |
477 |
|
477 | |||
478 | logf('Flash %s: %s', category, message) |
|
478 | logf('Flash %s: %s', category, message) | |
479 |
|
479 | |||
480 | _session_flash_messages(append=(category, message)) |
|
480 | _session_flash_messages(append=(category, message)) | |
481 |
|
481 | |||
482 |
|
482 | |||
483 | def pop_flash_messages(): |
|
483 | def pop_flash_messages(): | |
484 | """Return all accumulated messages and delete them from the session. |
|
484 | """Return all accumulated messages and delete them from the session. | |
485 |
|
485 | |||
486 | The return value is a list of ``Message`` objects. |
|
486 | The return value is a list of ``Message`` objects. | |
487 | """ |
|
487 | """ | |
488 | return [_Message(*m) for m in _session_flash_messages(clear=True)] |
|
488 | return [_Message(*m) for m in _session_flash_messages(clear=True)] | |
489 |
|
489 | |||
490 |
|
490 | |||
491 | age = lambda x, y=False: _age(x, y) |
|
491 | age = lambda x, y=False: _age(x, y) | |
492 | capitalize = lambda x: x.capitalize() |
|
492 | capitalize = lambda x: x.capitalize() | |
493 | email = author_email |
|
493 | email = author_email | |
494 | short_id = lambda x: x[:12] |
|
494 | short_id = lambda x: x[:12] | |
495 | hide_credentials = lambda x: ''.join(credentials_filter(x)) |
|
495 | hide_credentials = lambda x: ''.join(credentials_filter(x)) | |
496 |
|
496 | |||
497 |
|
497 | |||
498 | def show_id(cs): |
|
498 | def show_id(cs): | |
499 | """ |
|
499 | """ | |
500 | Configurable function that shows ID |
|
500 | Configurable function that shows ID | |
501 | by default it's r123:fffeeefffeee |
|
501 | by default it's r123:fffeeefffeee | |
502 |
|
502 | |||
503 | :param cs: changeset instance |
|
503 | :param cs: changeset instance | |
504 | """ |
|
504 | """ | |
505 | from kallithea import CONFIG |
|
505 | from kallithea import CONFIG | |
506 | def_len = safe_int(CONFIG.get('show_sha_length', 12)) |
|
506 | def_len = safe_int(CONFIG.get('show_sha_length', 12)) | |
507 | show_rev = str2bool(CONFIG.get('show_revision_number', False)) |
|
507 | show_rev = str2bool(CONFIG.get('show_revision_number', False)) | |
508 |
|
508 | |||
509 | raw_id = cs.raw_id[:def_len] |
|
509 | raw_id = cs.raw_id[:def_len] | |
510 | if show_rev: |
|
510 | if show_rev: | |
511 | return 'r%s:%s' % (cs.revision, raw_id) |
|
511 | return 'r%s:%s' % (cs.revision, raw_id) | |
512 | else: |
|
512 | else: | |
513 | return raw_id |
|
513 | return raw_id | |
514 |
|
514 | |||
515 |
|
515 | |||
516 | def fmt_date(date): |
|
516 | def fmt_date(date): | |
517 | if date: |
|
517 | if date: | |
518 | return date.strftime("%Y-%m-%d %H:%M:%S") |
|
518 | return date.strftime("%Y-%m-%d %H:%M:%S") | |
519 | return "" |
|
519 | return "" | |
520 |
|
520 | |||
521 |
|
521 | |||
522 | def is_git(repository): |
|
522 | def is_git(repository): | |
523 | if hasattr(repository, 'alias'): |
|
523 | if hasattr(repository, 'alias'): | |
524 | _type = repository.alias |
|
524 | _type = repository.alias | |
525 | elif hasattr(repository, 'repo_type'): |
|
525 | elif hasattr(repository, 'repo_type'): | |
526 | _type = repository.repo_type |
|
526 | _type = repository.repo_type | |
527 | else: |
|
527 | else: | |
528 | _type = repository |
|
528 | _type = repository | |
529 | return _type == 'git' |
|
529 | return _type == 'git' | |
530 |
|
530 | |||
531 |
|
531 | |||
532 | def is_hg(repository): |
|
532 | def is_hg(repository): | |
533 | if hasattr(repository, 'alias'): |
|
533 | if hasattr(repository, 'alias'): | |
534 | _type = repository.alias |
|
534 | _type = repository.alias | |
535 | elif hasattr(repository, 'repo_type'): |
|
535 | elif hasattr(repository, 'repo_type'): | |
536 | _type = repository.repo_type |
|
536 | _type = repository.repo_type | |
537 | else: |
|
537 | else: | |
538 | _type = repository |
|
538 | _type = repository | |
539 | return _type == 'hg' |
|
539 | return _type == 'hg' | |
540 |
|
540 | |||
541 |
|
541 | |||
542 | @cache_region('long_term', 'user_attr_or_none') |
|
542 | @cache_region('long_term', 'user_attr_or_none') | |
543 | def user_attr_or_none(author, show_attr): |
|
543 | def user_attr_or_none(author, show_attr): | |
544 | """Try to match email part of VCS committer string with a local user and return show_attr |
|
544 | """Try to match email part of VCS committer string with a local user and return show_attr | |
545 | - or return None if user not found""" |
|
545 | - or return None if user not found""" | |
546 | email = author_email(author) |
|
546 | email = author_email(author) | |
547 | if email: |
|
547 | if email: | |
548 | from kallithea.model.db import User |
|
548 | from kallithea.model.db import User | |
549 | user = User.get_by_email(email, cache=True) # cache will only use sql_cache_short |
|
549 | user = User.get_by_email(email, cache=True) # cache will only use sql_cache_short | |
550 | if user is not None: |
|
550 | if user is not None: | |
551 | return getattr(user, show_attr) |
|
551 | return getattr(user, show_attr) | |
552 | return None |
|
552 | return None | |
553 |
|
553 | |||
554 |
|
554 | |||
555 | def email_or_none(author): |
|
555 | def email_or_none(author): | |
556 | """Try to match email part of VCS committer string with a local user. |
|
556 | """Try to match email part of VCS committer string with a local user. | |
557 | Return primary email of user, email part of the specified author name, or None.""" |
|
557 | Return primary email of user, email part of the specified author name, or None.""" | |
558 | if not author: |
|
558 | if not author: | |
559 | return None |
|
559 | return None | |
560 | email = user_attr_or_none(author, 'email') |
|
560 | email = user_attr_or_none(author, 'email') | |
561 | if email is not None: |
|
561 | if email is not None: | |
562 | return email # always use user's main email address - not necessarily the one used to find user |
|
562 | return email # always use user's main email address - not necessarily the one used to find user | |
563 |
|
563 | |||
564 | # extract email from the commit string |
|
564 | # extract email from the commit string | |
565 | email = author_email(author) |
|
565 | email = author_email(author) | |
566 | if email: |
|
566 | if email: | |
567 | return email |
|
567 | return email | |
568 |
|
568 | |||
569 | # No valid email, not a valid user in the system, none! |
|
569 | # No valid email, not a valid user in the system, none! | |
570 | return None |
|
570 | return None | |
571 |
|
571 | |||
572 |
|
572 | |||
573 | def person(author, show_attr="username"): |
|
573 | def person(author, show_attr="username"): | |
574 | """Find the user identified by 'author', return one of the users attributes, |
|
574 | """Find the user identified by 'author', return one of the users attributes, | |
575 | default to the username attribute, None if there is no user""" |
|
575 | default to the username attribute, None if there is no user""" | |
576 | from kallithea.model.db import User |
|
576 | from kallithea.model.db import User | |
577 | # if author is already an instance use it for extraction |
|
577 | # if author is already an instance use it for extraction | |
578 | if isinstance(author, User): |
|
578 | if isinstance(author, User): | |
579 | return getattr(author, show_attr) |
|
579 | return getattr(author, show_attr) | |
580 |
|
580 | |||
581 | value = user_attr_or_none(author, show_attr) |
|
581 | value = user_attr_or_none(author, show_attr) | |
582 | if value is not None: |
|
582 | if value is not None: | |
583 | return value |
|
583 | return value | |
584 |
|
584 | |||
585 | # Still nothing? Just pass back the author name if any, else the email |
|
585 | # Still nothing? Just pass back the author name if any, else the email | |
586 | return author_name(author) or email(author) |
|
586 | return author_name(author) or email(author) | |
587 |
|
587 | |||
588 |
|
588 | |||
589 | def person_by_id(id_, show_attr="username"): |
|
589 | def person_by_id(id_, show_attr="username"): | |
590 | from kallithea.model.db import User |
|
590 | from kallithea.model.db import User | |
591 | # attr to return from fetched user |
|
591 | # attr to return from fetched user | |
592 | person_getter = lambda usr: getattr(usr, show_attr) |
|
592 | person_getter = lambda usr: getattr(usr, show_attr) | |
593 |
|
593 | |||
594 | # maybe it's an ID ? |
|
594 | # maybe it's an ID ? | |
595 | if str(id_).isdigit() or isinstance(id_, int): |
|
595 | if str(id_).isdigit() or isinstance(id_, int): | |
596 | id_ = int(id_) |
|
596 | id_ = int(id_) | |
597 | user = User.get(id_) |
|
597 | user = User.get(id_) | |
598 | if user is not None: |
|
598 | if user is not None: | |
599 | return person_getter(user) |
|
599 | return person_getter(user) | |
600 | return id_ |
|
600 | return id_ | |
601 |
|
601 | |||
602 |
|
602 | |||
603 | def boolicon(value): |
|
603 | def boolicon(value): | |
604 | """Returns boolean value of a value, represented as small html image of true/false |
|
604 | """Returns boolean value of a value, represented as small html image of true/false | |
605 | icons |
|
605 | icons | |
606 |
|
606 | |||
607 | :param value: value |
|
607 | :param value: value | |
608 | """ |
|
608 | """ | |
609 |
|
609 | |||
610 | if value: |
|
610 | if value: | |
611 | return HTML.tag('i', class_="icon-ok") |
|
611 | return HTML.tag('i', class_="icon-ok") | |
612 | else: |
|
612 | else: | |
613 | return HTML.tag('i', class_="icon-minus-circled") |
|
613 | return HTML.tag('i', class_="icon-minus-circled") | |
614 |
|
614 | |||
615 |
|
615 | |||
616 | def action_parser(user_log, feed=False, parse_cs=False): |
|
616 | def action_parser(user_log, feed=False, parse_cs=False): | |
617 | """ |
|
617 | """ | |
618 | This helper will action_map the specified string action into translated |
|
618 | This helper will action_map the specified string action into translated | |
619 | fancy names with icons and links |
|
619 | fancy names with icons and links | |
620 |
|
620 | |||
621 | :param user_log: user log instance |
|
621 | :param user_log: user log instance | |
622 | :param feed: use output for feeds (no html and fancy icons) |
|
622 | :param feed: use output for feeds (no html and fancy icons) | |
623 | :param parse_cs: parse Changesets into VCS instances |
|
623 | :param parse_cs: parse Changesets into VCS instances | |
624 | """ |
|
624 | """ | |
625 |
|
625 | |||
626 | action = user_log.action |
|
626 | action = user_log.action | |
627 | action_params = ' ' |
|
627 | action_params = ' ' | |
628 |
|
628 | |||
629 | x = action.split(':') |
|
629 | x = action.split(':') | |
630 |
|
630 | |||
631 | if len(x) > 1: |
|
631 | if len(x) > 1: | |
632 | action, action_params = x |
|
632 | action, action_params = x | |
633 |
|
633 | |||
634 | def get_cs_links(): |
|
634 | def get_cs_links(): | |
635 | revs_limit = 3 # display this amount always |
|
635 | revs_limit = 3 # display this amount always | |
636 | revs_top_limit = 50 # show upto this amount of changesets hidden |
|
636 | revs_top_limit = 50 # show upto this amount of changesets hidden | |
637 | revs_ids = action_params.split(',') |
|
637 | revs_ids = action_params.split(',') | |
638 | deleted = user_log.repository is None |
|
638 | deleted = user_log.repository is None | |
639 | if deleted: |
|
639 | if deleted: | |
640 | return ','.join(revs_ids) |
|
640 | return ','.join(revs_ids) | |
641 |
|
641 | |||
642 | repo_name = user_log.repository.repo_name |
|
642 | repo_name = user_log.repository.repo_name | |
643 |
|
643 | |||
644 | def lnk(rev, repo_name): |
|
644 | def lnk(rev, repo_name): | |
645 | lazy_cs = False |
|
645 | lazy_cs = False | |
646 | title_ = None |
|
646 | title_ = None | |
647 | url_ = '#' |
|
647 | url_ = '#' | |
648 | if isinstance(rev, BaseChangeset) or isinstance(rev, AttributeDict): |
|
648 | if isinstance(rev, BaseChangeset) or isinstance(rev, AttributeDict): | |
649 | if rev.op and rev.ref_name: |
|
649 | if rev.op and rev.ref_name: | |
650 | if rev.op == 'delete_branch': |
|
650 | if rev.op == 'delete_branch': | |
651 | lbl = _('Deleted branch: %s') % rev.ref_name |
|
651 | lbl = _('Deleted branch: %s') % rev.ref_name | |
652 | elif rev.op == 'tag': |
|
652 | elif rev.op == 'tag': | |
653 | lbl = _('Created tag: %s') % rev.ref_name |
|
653 | lbl = _('Created tag: %s') % rev.ref_name | |
654 | else: |
|
654 | else: | |
655 | lbl = 'Unknown operation %s' % rev.op |
|
655 | lbl = 'Unknown operation %s' % rev.op | |
656 | else: |
|
656 | else: | |
657 | lazy_cs = True |
|
657 | lazy_cs = True | |
658 | lbl = rev.short_id[:8] |
|
658 | lbl = rev.short_id[:8] | |
659 | url_ = url('changeset_home', repo_name=repo_name, |
|
659 | url_ = url('changeset_home', repo_name=repo_name, | |
660 | revision=rev.raw_id) |
|
660 | revision=rev.raw_id) | |
661 | else: |
|
661 | else: | |
662 | # changeset cannot be found - it might have been stripped or removed |
|
662 | # changeset cannot be found - it might have been stripped or removed | |
663 | lbl = rev[:12] |
|
663 | lbl = rev[:12] | |
664 | title_ = _('Changeset %s not found') % lbl |
|
664 | title_ = _('Changeset %s not found') % lbl | |
665 | if parse_cs: |
|
665 | if parse_cs: | |
666 | return link_to(lbl, url_, title=title_, **{'data-toggle': 'tooltip'}) |
|
666 | return link_to(lbl, url_, title=title_, **{'data-toggle': 'tooltip'}) | |
667 | return link_to(lbl, url_, class_='lazy-cs' if lazy_cs else '', |
|
667 | return link_to(lbl, url_, class_='lazy-cs' if lazy_cs else '', | |
668 | **{'data-raw_id': rev.raw_id, 'data-repo_name': repo_name}) |
|
668 | **{'data-raw_id': rev.raw_id, 'data-repo_name': repo_name}) | |
669 |
|
669 | |||
670 | def _get_op(rev_txt): |
|
670 | def _get_op(rev_txt): | |
671 | _op = None |
|
671 | _op = None | |
672 | _name = rev_txt |
|
672 | _name = rev_txt | |
673 | if len(rev_txt.split('=>')) == 2: |
|
673 | if len(rev_txt.split('=>')) == 2: | |
674 | _op, _name = rev_txt.split('=>') |
|
674 | _op, _name = rev_txt.split('=>') | |
675 | return _op, _name |
|
675 | return _op, _name | |
676 |
|
676 | |||
677 | revs = [] |
|
677 | revs = [] | |
678 | if len([v for v in revs_ids if v != '']) > 0: |
|
678 | if len([v for v in revs_ids if v != '']) > 0: | |
679 | repo = None |
|
679 | repo = None | |
680 | for rev in revs_ids[:revs_top_limit]: |
|
680 | for rev in revs_ids[:revs_top_limit]: | |
681 | _op, _name = _get_op(rev) |
|
681 | _op, _name = _get_op(rev) | |
682 |
|
682 | |||
683 | # we want parsed changesets, or new log store format is bad |
|
683 | # we want parsed changesets, or new log store format is bad | |
684 | if parse_cs: |
|
684 | if parse_cs: | |
685 | try: |
|
685 | try: | |
686 | if repo is None: |
|
686 | if repo is None: | |
687 | repo = user_log.repository.scm_instance |
|
687 | repo = user_log.repository.scm_instance | |
688 | _rev = repo.get_changeset(rev) |
|
688 | _rev = repo.get_changeset(rev) | |
689 | revs.append(_rev) |
|
689 | revs.append(_rev) | |
690 | except ChangesetDoesNotExistError: |
|
690 | except ChangesetDoesNotExistError: | |
691 | log.error('cannot find revision %s in this repo', rev) |
|
691 | log.error('cannot find revision %s in this repo', rev) | |
692 | revs.append(rev) |
|
692 | revs.append(rev) | |
693 | else: |
|
693 | else: | |
694 | _rev = AttributeDict({ |
|
694 | _rev = AttributeDict({ | |
695 | 'short_id': rev[:12], |
|
695 | 'short_id': rev[:12], | |
696 | 'raw_id': rev, |
|
696 | 'raw_id': rev, | |
697 | 'message': '', |
|
697 | 'message': '', | |
698 | 'op': _op, |
|
698 | 'op': _op, | |
699 | 'ref_name': _name |
|
699 | 'ref_name': _name | |
700 | }) |
|
700 | }) | |
701 | revs.append(_rev) |
|
701 | revs.append(_rev) | |
702 | cs_links = [" " + ', '.join( |
|
702 | cs_links = [" " + ', '.join( | |
703 | [lnk(rev, repo_name) for rev in revs[:revs_limit]] |
|
703 | [lnk(rev, repo_name) for rev in revs[:revs_limit]] | |
704 | )] |
|
704 | )] | |
705 | _op1, _name1 = _get_op(revs_ids[0]) |
|
705 | _op1, _name1 = _get_op(revs_ids[0]) | |
706 | _op2, _name2 = _get_op(revs_ids[-1]) |
|
706 | _op2, _name2 = _get_op(revs_ids[-1]) | |
707 |
|
707 | |||
708 | _rev = '%s...%s' % (_name1, _name2) |
|
708 | _rev = '%s...%s' % (_name1, _name2) | |
709 |
|
709 | |||
710 | compare_view = ( |
|
710 | compare_view = ( | |
711 | ' <div class="compare_view" data-toggle="tooltip" title="%s">' |
|
711 | ' <div class="compare_view" data-toggle="tooltip" title="%s">' | |
712 | '<a href="%s">%s</a> </div>' % ( |
|
712 | '<a href="%s">%s</a> </div>' % ( | |
713 | _('Show all combined changesets %s->%s') % ( |
|
713 | _('Show all combined changesets %s->%s') % ( | |
714 | revs_ids[0][:12], revs_ids[-1][:12] |
|
714 | revs_ids[0][:12], revs_ids[-1][:12] | |
715 | ), |
|
715 | ), | |
716 | url('changeset_home', repo_name=repo_name, |
|
716 | url('changeset_home', repo_name=repo_name, | |
717 | revision=_rev |
|
717 | revision=_rev | |
718 | ), |
|
718 | ), | |
719 | _('Compare view') |
|
719 | _('Compare view') | |
720 | ) |
|
720 | ) | |
721 | ) |
|
721 | ) | |
722 |
|
722 | |||
723 | # if we have exactly one more than normally displayed |
|
723 | # if we have exactly one more than normally displayed | |
724 | # just display it, takes less space than displaying |
|
724 | # just display it, takes less space than displaying | |
725 | # "and 1 more revisions" |
|
725 | # "and 1 more revisions" | |
726 | if len(revs_ids) == revs_limit + 1: |
|
726 | if len(revs_ids) == revs_limit + 1: | |
727 | cs_links.append(", " + lnk(revs[revs_limit], repo_name)) |
|
727 | cs_links.append(", " + lnk(revs[revs_limit], repo_name)) | |
728 |
|
728 | |||
729 | # hidden-by-default ones |
|
729 | # hidden-by-default ones | |
730 | if len(revs_ids) > revs_limit + 1: |
|
730 | if len(revs_ids) > revs_limit + 1: | |
731 | uniq_id = revs_ids[0] |
|
731 | uniq_id = revs_ids[0] | |
732 | html_tmpl = ( |
|
732 | html_tmpl = ( | |
733 | '<span> %s <a class="show_more" id="_%s" ' |
|
733 | '<span> %s <a class="show_more" id="_%s" ' | |
734 | 'href="#more">%s</a> %s</span>' |
|
734 | 'href="#more">%s</a> %s</span>' | |
735 | ) |
|
735 | ) | |
736 | if not feed: |
|
736 | if not feed: | |
737 | cs_links.append(html_tmpl % ( |
|
737 | cs_links.append(html_tmpl % ( | |
738 | _('and'), |
|
738 | _('and'), | |
739 | uniq_id, _('%s more') % (len(revs_ids) - revs_limit), |
|
739 | uniq_id, _('%s more') % (len(revs_ids) - revs_limit), | |
740 | _('revisions') |
|
740 | _('revisions') | |
741 | ) |
|
741 | ) | |
742 | ) |
|
742 | ) | |
743 |
|
743 | |||
744 | if not feed: |
|
744 | if not feed: | |
745 | html_tmpl = '<span id="%s" style="display:none">, %s </span>' |
|
745 | html_tmpl = '<span id="%s" style="display:none">, %s </span>' | |
746 | else: |
|
746 | else: | |
747 | html_tmpl = '<span id="%s"> %s </span>' |
|
747 | html_tmpl = '<span id="%s"> %s </span>' | |
748 |
|
748 | |||
749 | morelinks = ', '.join( |
|
749 | morelinks = ', '.join( | |
750 | [lnk(rev, repo_name) for rev in revs[revs_limit:]] |
|
750 | [lnk(rev, repo_name) for rev in revs[revs_limit:]] | |
751 | ) |
|
751 | ) | |
752 |
|
752 | |||
753 | if len(revs_ids) > revs_top_limit: |
|
753 | if len(revs_ids) > revs_top_limit: | |
754 | morelinks += ', ...' |
|
754 | morelinks += ', ...' | |
755 |
|
755 | |||
756 | cs_links.append(html_tmpl % (uniq_id, morelinks)) |
|
756 | cs_links.append(html_tmpl % (uniq_id, morelinks)) | |
757 | if len(revs) > 1: |
|
757 | if len(revs) > 1: | |
758 | cs_links.append(compare_view) |
|
758 | cs_links.append(compare_view) | |
759 | return ''.join(cs_links) |
|
759 | return ''.join(cs_links) | |
760 |
|
760 | |||
761 | def get_fork_name(): |
|
761 | def get_fork_name(): | |
762 | repo_name = action_params |
|
762 | repo_name = action_params | |
763 | url_ = url('summary_home', repo_name=repo_name) |
|
763 | url_ = url('summary_home', repo_name=repo_name) | |
764 | return _('Fork name %s') % link_to(action_params, url_) |
|
764 | return _('Fork name %s') % link_to(action_params, url_) | |
765 |
|
765 | |||
766 | def get_user_name(): |
|
766 | def get_user_name(): | |
767 | user_name = action_params |
|
767 | user_name = action_params | |
768 | return user_name |
|
768 | return user_name | |
769 |
|
769 | |||
770 | def get_users_group(): |
|
770 | def get_users_group(): | |
771 | group_name = action_params |
|
771 | group_name = action_params | |
772 | return group_name |
|
772 | return group_name | |
773 |
|
773 | |||
774 | def get_pull_request(): |
|
774 | def get_pull_request(): | |
775 | from kallithea.model.db import PullRequest |
|
775 | from kallithea.model.db import PullRequest | |
776 | pull_request_id = action_params |
|
776 | pull_request_id = action_params | |
777 | nice_id = PullRequest.make_nice_id(pull_request_id) |
|
777 | nice_id = PullRequest.make_nice_id(pull_request_id) | |
778 |
|
778 | |||
779 | deleted = user_log.repository is None |
|
779 | deleted = user_log.repository is None | |
780 | if deleted: |
|
780 | if deleted: | |
781 | repo_name = user_log.repository_name |
|
781 | repo_name = user_log.repository_name | |
782 | else: |
|
782 | else: | |
783 | repo_name = user_log.repository.repo_name |
|
783 | repo_name = user_log.repository.repo_name | |
784 |
|
784 | |||
785 | return link_to(_('Pull request %s') % nice_id, |
|
785 | return link_to(_('Pull request %s') % nice_id, | |
786 | url('pullrequest_show', repo_name=repo_name, |
|
786 | url('pullrequest_show', repo_name=repo_name, | |
787 | pull_request_id=pull_request_id)) |
|
787 | pull_request_id=pull_request_id)) | |
788 |
|
788 | |||
789 | def get_archive_name(): |
|
789 | def get_archive_name(): | |
790 | archive_name = action_params |
|
790 | archive_name = action_params | |
791 | return archive_name |
|
791 | return archive_name | |
792 |
|
792 | |||
793 | # action : translated str, callback(extractor), icon |
|
793 | # action : translated str, callback(extractor), icon | |
794 | action_map = { |
|
794 | action_map = { | |
795 | 'user_deleted_repo': (_('[deleted] repository'), |
|
795 | 'user_deleted_repo': (_('[deleted] repository'), | |
796 | None, 'icon-trashcan'), |
|
796 | None, 'icon-trashcan'), | |
797 | 'user_created_repo': (_('[created] repository'), |
|
797 | 'user_created_repo': (_('[created] repository'), | |
798 | None, 'icon-plus'), |
|
798 | None, 'icon-plus'), | |
799 | 'user_created_fork': (_('[created] repository as fork'), |
|
799 | 'user_created_fork': (_('[created] repository as fork'), | |
800 | None, 'icon-fork'), |
|
800 | None, 'icon-fork'), | |
801 | 'user_forked_repo': (_('[forked] repository'), |
|
801 | 'user_forked_repo': (_('[forked] repository'), | |
802 | get_fork_name, 'icon-fork'), |
|
802 | get_fork_name, 'icon-fork'), | |
803 | 'user_updated_repo': (_('[updated] repository'), |
|
803 | 'user_updated_repo': (_('[updated] repository'), | |
804 | None, 'icon-pencil'), |
|
804 | None, 'icon-pencil'), | |
805 | 'user_downloaded_archive': (_('[downloaded] archive from repository'), |
|
805 | 'user_downloaded_archive': (_('[downloaded] archive from repository'), | |
806 | get_archive_name, 'icon-download-cloud'), |
|
806 | get_archive_name, 'icon-download-cloud'), | |
807 | 'admin_deleted_repo': (_('[delete] repository'), |
|
807 | 'admin_deleted_repo': (_('[delete] repository'), | |
808 | None, 'icon-trashcan'), |
|
808 | None, 'icon-trashcan'), | |
809 | 'admin_created_repo': (_('[created] repository'), |
|
809 | 'admin_created_repo': (_('[created] repository'), | |
810 | None, 'icon-plus'), |
|
810 | None, 'icon-plus'), | |
811 | 'admin_forked_repo': (_('[forked] repository'), |
|
811 | 'admin_forked_repo': (_('[forked] repository'), | |
812 | None, 'icon-fork'), |
|
812 | None, 'icon-fork'), | |
813 | 'admin_updated_repo': (_('[updated] repository'), |
|
813 | 'admin_updated_repo': (_('[updated] repository'), | |
814 | None, 'icon-pencil'), |
|
814 | None, 'icon-pencil'), | |
815 | 'admin_created_user': (_('[created] user'), |
|
815 | 'admin_created_user': (_('[created] user'), | |
816 | get_user_name, 'icon-user'), |
|
816 | get_user_name, 'icon-user'), | |
817 | 'admin_updated_user': (_('[updated] user'), |
|
817 | 'admin_updated_user': (_('[updated] user'), | |
818 | get_user_name, 'icon-user'), |
|
818 | get_user_name, 'icon-user'), | |
819 | 'admin_created_users_group': (_('[created] user group'), |
|
819 | 'admin_created_users_group': (_('[created] user group'), | |
820 | get_users_group, 'icon-pencil'), |
|
820 | get_users_group, 'icon-pencil'), | |
821 | 'admin_updated_users_group': (_('[updated] user group'), |
|
821 | 'admin_updated_users_group': (_('[updated] user group'), | |
822 | get_users_group, 'icon-pencil'), |
|
822 | get_users_group, 'icon-pencil'), | |
823 | 'user_commented_revision': (_('[commented] on revision in repository'), |
|
823 | 'user_commented_revision': (_('[commented] on revision in repository'), | |
824 | get_cs_links, 'icon-comment'), |
|
824 | get_cs_links, 'icon-comment'), | |
825 | 'user_commented_pull_request': (_('[commented] on pull request for'), |
|
825 | 'user_commented_pull_request': (_('[commented] on pull request for'), | |
826 | get_pull_request, 'icon-comment'), |
|
826 | get_pull_request, 'icon-comment'), | |
827 | 'user_closed_pull_request': (_('[closed] pull request for'), |
|
827 | 'user_closed_pull_request': (_('[closed] pull request for'), | |
828 | get_pull_request, 'icon-ok'), |
|
828 | get_pull_request, 'icon-ok'), | |
829 | 'push': (_('[pushed] into'), |
|
829 | 'push': (_('[pushed] into'), | |
830 | get_cs_links, 'icon-move-up'), |
|
830 | get_cs_links, 'icon-move-up'), | |
831 | 'push_local': (_('[committed via Kallithea] into repository'), |
|
831 | 'push_local': (_('[committed via Kallithea] into repository'), | |
832 | get_cs_links, 'icon-pencil'), |
|
832 | get_cs_links, 'icon-pencil'), | |
833 | 'push_remote': (_('[pulled from remote] into repository'), |
|
833 | 'push_remote': (_('[pulled from remote] into repository'), | |
834 | get_cs_links, 'icon-move-up'), |
|
834 | get_cs_links, 'icon-move-up'), | |
835 | 'pull': (_('[pulled] from'), |
|
835 | 'pull': (_('[pulled] from'), | |
836 | None, 'icon-move-down'), |
|
836 | None, 'icon-move-down'), | |
837 | 'started_following_repo': (_('[started following] repository'), |
|
837 | 'started_following_repo': (_('[started following] repository'), | |
838 | None, 'icon-heart'), |
|
838 | None, 'icon-heart'), | |
839 | 'stopped_following_repo': (_('[stopped following] repository'), |
|
839 | 'stopped_following_repo': (_('[stopped following] repository'), | |
840 | None, 'icon-heart-empty'), |
|
840 | None, 'icon-heart-empty'), | |
841 | } |
|
841 | } | |
842 |
|
842 | |||
843 | action_str = action_map.get(action, action) |
|
843 | action_str = action_map.get(action, action) | |
844 | if feed: |
|
844 | if feed: | |
845 | action = action_str[0].replace('[', '').replace(']', '') |
|
845 | action = action_str[0].replace('[', '').replace(']', '') | |
846 | else: |
|
846 | else: | |
847 | action = action_str[0] \ |
|
847 | action = action_str[0] \ | |
848 | .replace('[', '<b>') \ |
|
848 | .replace('[', '<b>') \ | |
849 | .replace(']', '</b>') |
|
849 | .replace(']', '</b>') | |
850 |
|
850 | |||
851 | action_params_func = lambda: "" |
|
851 | action_params_func = lambda: "" | |
852 |
|
852 | |||
853 | if callable(action_str[1]): |
|
853 | if callable(action_str[1]): | |
854 | action_params_func = action_str[1] |
|
854 | action_params_func = action_str[1] | |
855 |
|
855 | |||
856 | def action_parser_icon(): |
|
856 | def action_parser_icon(): | |
857 | action = user_log.action |
|
857 | action = user_log.action | |
858 | action_params = None |
|
858 | action_params = None | |
859 | x = action.split(':') |
|
859 | x = action.split(':') | |
860 |
|
860 | |||
861 | if len(x) > 1: |
|
861 | if len(x) > 1: | |
862 | action, action_params = x |
|
862 | action, action_params = x | |
863 |
|
863 | |||
864 | ico = action_map.get(action, ['', '', ''])[2] |
|
864 | ico = action_map.get(action, ['', '', ''])[2] | |
865 | html = """<i class="%s"></i>""" % ico |
|
865 | html = """<i class="%s"></i>""" % ico | |
866 | return literal(html) |
|
866 | return literal(html) | |
867 |
|
867 | |||
868 | # returned callbacks we need to call to get |
|
868 | # returned callbacks we need to call to get | |
869 | return [lambda: literal(action), action_params_func, action_parser_icon] |
|
869 | return [lambda: literal(action), action_params_func, action_parser_icon] | |
870 |
|
870 | |||
871 |
|
871 | |||
872 | #============================================================================== |
|
872 | #============================================================================== | |
873 | # GRAVATAR URL |
|
873 | # GRAVATAR URL | |
874 | #============================================================================== |
|
874 | #============================================================================== | |
875 | def gravatar_div(email_address, cls='', size=30, **div_attributes): |
|
875 | def gravatar_div(email_address, cls='', size=30, **div_attributes): | |
876 | """Return an html literal with a span around a gravatar if they are enabled. |
|
876 | """Return an html literal with a span around a gravatar if they are enabled. | |
877 | Extra keyword parameters starting with 'div_' will get the prefix removed |
|
877 | Extra keyword parameters starting with 'div_' will get the prefix removed | |
878 | and '_' changed to '-' and be used as attributes on the div. The default |
|
878 | and '_' changed to '-' and be used as attributes on the div. The default | |
879 | class is 'gravatar'. |
|
879 | class is 'gravatar'. | |
880 | """ |
|
880 | """ | |
881 | from tg import tmpl_context as c |
|
881 | from tg import tmpl_context as c | |
882 | if not c.visual.use_gravatar: |
|
882 | if not c.visual.use_gravatar: | |
883 | return '' |
|
883 | return '' | |
884 | if 'div_class' not in div_attributes: |
|
884 | if 'div_class' not in div_attributes: | |
885 | div_attributes['div_class'] = "gravatar" |
|
885 | div_attributes['div_class'] = "gravatar" | |
886 | attributes = [] |
|
886 | attributes = [] | |
887 | for k, v in sorted(div_attributes.items()): |
|
887 | for k, v in sorted(div_attributes.items()): | |
888 | assert k.startswith('div_'), k |
|
888 | assert k.startswith('div_'), k | |
889 | attributes.append(' %s="%s"' % (k[4:].replace('_', '-'), escape(v))) |
|
889 | attributes.append(' %s="%s"' % (k[4:].replace('_', '-'), escape(v))) | |
890 | return literal("""<span%s>%s</span>""" % |
|
890 | return literal("""<span%s>%s</span>""" % | |
891 | (''.join(attributes), |
|
891 | (''.join(attributes), | |
892 | gravatar(email_address, cls=cls, size=size))) |
|
892 | gravatar(email_address, cls=cls, size=size))) | |
893 |
|
893 | |||
894 |
|
894 | |||
895 | def gravatar(email_address, cls='', size=30): |
|
895 | def gravatar(email_address, cls='', size=30): | |
896 | """return html element of the gravatar |
|
896 | """return html element of the gravatar | |
897 |
|
897 | |||
898 | This method will return an <img> with the resolution double the size (for |
|
898 | This method will return an <img> with the resolution double the size (for | |
899 | retina screens) of the image. If the url returned from gravatar_url is |
|
899 | retina screens) of the image. If the url returned from gravatar_url is | |
900 | empty then we fallback to using an icon. |
|
900 | empty then we fallback to using an icon. | |
901 |
|
901 | |||
902 | """ |
|
902 | """ | |
903 | from tg import tmpl_context as c |
|
903 | from tg import tmpl_context as c | |
904 | if not c.visual.use_gravatar: |
|
904 | if not c.visual.use_gravatar: | |
905 | return '' |
|
905 | return '' | |
906 |
|
906 | |||
907 | src = gravatar_url(email_address, size * 2) |
|
907 | src = gravatar_url(email_address, size * 2) | |
908 |
|
908 | |||
909 | if src: |
|
909 | if src: | |
910 | # here it makes sense to use style="width: ..." (instead of, say, a |
|
910 | # here it makes sense to use style="width: ..." (instead of, say, a | |
911 | # stylesheet) because we using this to generate a high-res (retina) size |
|
911 | # stylesheet) because we using this to generate a high-res (retina) size | |
912 | html = ('<i class="icon-gravatar {cls}"' |
|
912 | html = ('<i class="icon-gravatar {cls}"' | |
913 | ' style="font-size: {size}px;background-size: {size}px;background-image: url(\'{src}\')"' |
|
913 | ' style="font-size: {size}px;background-size: {size}px;background-image: url(\'{src}\')"' | |
914 | '></i>').format(cls=cls, size=size, src=src) |
|
914 | '></i>').format(cls=cls, size=size, src=src) | |
915 |
|
915 | |||
916 | else: |
|
916 | else: | |
917 | # if src is empty then there was no gravatar, so we use a font icon |
|
917 | # if src is empty then there was no gravatar, so we use a font icon | |
918 | html = ("""<i class="icon-user {cls}" style="font-size: {size}px;"></i>""" |
|
918 | html = ("""<i class="icon-user {cls}" style="font-size: {size}px;"></i>""" | |
919 | .format(cls=cls, size=size, src=src)) |
|
919 | .format(cls=cls, size=size, src=src)) | |
920 |
|
920 | |||
921 | return literal(html) |
|
921 | return literal(html) | |
922 |
|
922 | |||
923 |
|
923 | |||
924 | def gravatar_url(email_address, size=30, default=''): |
|
924 | def gravatar_url(email_address, size=30, default=''): | |
925 | # doh, we need to re-import those to mock it later |
|
925 | # doh, we need to re-import those to mock it later | |
926 | from kallithea.config.routing import url |
|
926 | from kallithea.config.routing import url | |
927 | from kallithea.model.db import User |
|
927 | from kallithea.model.db import User | |
928 | from tg import tmpl_context as c |
|
928 | from tg import tmpl_context as c | |
929 | if not c.visual.use_gravatar: |
|
929 | if not c.visual.use_gravatar: | |
930 | return "" |
|
930 | return "" | |
931 |
|
931 | |||
932 | _def = 'anonymous@kallithea-scm.org' # default gravatar |
|
932 | _def = 'anonymous@kallithea-scm.org' # default gravatar | |
933 | email_address = email_address or _def |
|
933 | email_address = email_address or _def | |
934 |
|
934 | |||
935 | if email_address == _def: |
|
935 | if email_address == _def: | |
936 | return default |
|
936 | return default | |
937 |
|
937 | |||
938 | parsed_url = urlparse.urlparse(url.current(qualified=True)) |
|
938 | parsed_url = urlparse.urlparse(url.current(qualified=True)) | |
939 | url = (c.visual.gravatar_url or User.DEFAULT_GRAVATAR_URL) \ |
|
939 | url = (c.visual.gravatar_url or User.DEFAULT_GRAVATAR_URL) \ | |
940 | .replace('{email}', email_address) \ |
|
940 | .replace('{email}', email_address) \ | |
941 | .replace('{md5email}', hashlib.md5(safe_str(email_address).lower()).hexdigest()) \ |
|
941 | .replace('{md5email}', hashlib.md5(safe_str(email_address).lower()).hexdigest()) \ | |
942 | .replace('{netloc}', parsed_url.netloc) \ |
|
942 | .replace('{netloc}', parsed_url.netloc) \ | |
943 | .replace('{scheme}', parsed_url.scheme) \ |
|
943 | .replace('{scheme}', parsed_url.scheme) \ | |
944 | .replace('{size}', str(size)) |
|
944 | .replace('{size}', str(size)) | |
945 | return url |
|
945 | return url | |
946 |
|
946 | |||
947 |
|
947 | |||
948 | def changed_tooltip(nodes): |
|
948 | def changed_tooltip(nodes): | |
949 | """ |
|
949 | """ | |
950 | Generates a html string for changed nodes in changeset page. |
|
950 | Generates a html string for changed nodes in changeset page. | |
951 | It limits the output to 30 entries |
|
951 | It limits the output to 30 entries | |
952 |
|
952 | |||
953 | :param nodes: LazyNodesGenerator |
|
953 | :param nodes: LazyNodesGenerator | |
954 | """ |
|
954 | """ | |
955 | if nodes: |
|
955 | if nodes: | |
956 | pref = ': <br/> ' |
|
956 | pref = ': <br/> ' | |
957 | suf = '' |
|
957 | suf = '' | |
958 | if len(nodes) > 30: |
|
958 | if len(nodes) > 30: | |
959 | suf = '<br/>' + _(' and %s more') % (len(nodes) - 30) |
|
959 | suf = '<br/>' + _(' and %s more') % (len(nodes) - 30) | |
960 | return literal(pref + '<br/> '.join([safe_unicode(x.path) |
|
960 | return literal(pref + '<br/> '.join([safe_unicode(x.path) | |
961 | for x in nodes[:30]]) + suf) |
|
961 | for x in nodes[:30]]) + suf) | |
962 | else: |
|
962 | else: | |
963 | return ': ' + _('No files') |
|
963 | return ': ' + _('No files') | |
964 |
|
964 | |||
965 |
|
965 | |||
966 | def fancy_file_stats(stats): |
|
966 | def fancy_file_stats(stats): | |
967 | """ |
|
967 | """ | |
968 | Displays a fancy two colored bar for number of added/deleted |
|
968 | Displays a fancy two colored bar for number of added/deleted | |
969 | lines of code on file |
|
969 | lines of code on file | |
970 |
|
970 | |||
971 | :param stats: two element list of added/deleted lines of code |
|
971 | :param stats: two element list of added/deleted lines of code | |
972 | """ |
|
972 | """ | |
973 | from kallithea.lib.diffs import NEW_FILENODE, DEL_FILENODE, \ |
|
973 | from kallithea.lib.diffs import NEW_FILENODE, DEL_FILENODE, \ | |
974 | MOD_FILENODE, RENAMED_FILENODE, CHMOD_FILENODE, BIN_FILENODE |
|
974 | MOD_FILENODE, RENAMED_FILENODE, CHMOD_FILENODE, BIN_FILENODE | |
975 |
|
975 | |||
976 | a, d = stats['added'], stats['deleted'] |
|
976 | a, d = stats['added'], stats['deleted'] | |
977 | width = 100 |
|
977 | width = 100 | |
978 |
|
978 | |||
979 | if stats['binary']: |
|
979 | if stats['binary']: | |
980 | # binary mode |
|
980 | # binary mode | |
981 | lbl = '' |
|
981 | lbl = '' | |
982 | bin_op = 1 |
|
982 | bin_op = 1 | |
983 |
|
983 | |||
984 | if BIN_FILENODE in stats['ops']: |
|
984 | if BIN_FILENODE in stats['ops']: | |
985 | lbl = 'bin+' |
|
985 | lbl = 'bin+' | |
986 |
|
986 | |||
987 | if NEW_FILENODE in stats['ops']: |
|
987 | if NEW_FILENODE in stats['ops']: | |
988 | lbl += _('new file') |
|
988 | lbl += _('new file') | |
989 | bin_op = NEW_FILENODE |
|
989 | bin_op = NEW_FILENODE | |
990 | elif MOD_FILENODE in stats['ops']: |
|
990 | elif MOD_FILENODE in stats['ops']: | |
991 | lbl += _('mod') |
|
991 | lbl += _('mod') | |
992 | bin_op = MOD_FILENODE |
|
992 | bin_op = MOD_FILENODE | |
993 | elif DEL_FILENODE in stats['ops']: |
|
993 | elif DEL_FILENODE in stats['ops']: | |
994 | lbl += _('del') |
|
994 | lbl += _('del') | |
995 | bin_op = DEL_FILENODE |
|
995 | bin_op = DEL_FILENODE | |
996 | elif RENAMED_FILENODE in stats['ops']: |
|
996 | elif RENAMED_FILENODE in stats['ops']: | |
997 | lbl += _('rename') |
|
997 | lbl += _('rename') | |
998 | bin_op = RENAMED_FILENODE |
|
998 | bin_op = RENAMED_FILENODE | |
999 |
|
999 | |||
1000 | # chmod can go with other operations |
|
1000 | # chmod can go with other operations | |
1001 | if CHMOD_FILENODE in stats['ops']: |
|
1001 | if CHMOD_FILENODE in stats['ops']: | |
1002 | _org_lbl = _('chmod') |
|
1002 | _org_lbl = _('chmod') | |
1003 | lbl += _org_lbl if lbl.endswith('+') else '+%s' % _org_lbl |
|
1003 | lbl += _org_lbl if lbl.endswith('+') else '+%s' % _org_lbl | |
1004 |
|
1004 | |||
1005 | #import ipdb;ipdb.set_trace() |
|
1005 | #import ipdb;ipdb.set_trace() | |
1006 | b_d = '<div class="bin bin%s progress-bar" style="width:100%%">%s</div>' % (bin_op, lbl) |
|
1006 | b_d = '<div class="bin bin%s progress-bar" style="width:100%%">%s</div>' % (bin_op, lbl) | |
1007 | b_a = '<div class="bin bin1" style="width:0%"></div>' |
|
1007 | b_a = '<div class="bin bin1" style="width:0%"></div>' | |
1008 | return literal('<div style="width:%spx" class="progress">%s%s</div>' % (width, b_a, b_d)) |
|
1008 | return literal('<div style="width:%spx" class="progress">%s%s</div>' % (width, b_a, b_d)) | |
1009 |
|
1009 | |||
1010 | t = stats['added'] + stats['deleted'] |
|
1010 | t = stats['added'] + stats['deleted'] | |
1011 | unit = float(width) / (t or 1) |
|
1011 | unit = float(width) / (t or 1) | |
1012 |
|
1012 | |||
1013 | # needs > 9% of width to be visible or 0 to be hidden |
|
1013 | # needs > 9% of width to be visible or 0 to be hidden | |
1014 | a_p = max(9, unit * a) if a > 0 else 0 |
|
1014 | a_p = max(9, unit * a) if a > 0 else 0 | |
1015 | d_p = max(9, unit * d) if d > 0 else 0 |
|
1015 | d_p = max(9, unit * d) if d > 0 else 0 | |
1016 | p_sum = a_p + d_p |
|
1016 | p_sum = a_p + d_p | |
1017 |
|
1017 | |||
1018 | if p_sum > width: |
|
1018 | if p_sum > width: | |
1019 | # adjust the percentage to be == 100% since we adjusted to 9 |
|
1019 | # adjust the percentage to be == 100% since we adjusted to 9 | |
1020 | if a_p > d_p: |
|
1020 | if a_p > d_p: | |
1021 | a_p = a_p - (p_sum - width) |
|
1021 | a_p = a_p - (p_sum - width) | |
1022 | else: |
|
1022 | else: | |
1023 | d_p = d_p - (p_sum - width) |
|
1023 | d_p = d_p - (p_sum - width) | |
1024 |
|
1024 | |||
1025 | a_v = a if a > 0 else '' |
|
1025 | a_v = a if a > 0 else '' | |
1026 | d_v = d if d > 0 else '' |
|
1026 | d_v = d if d > 0 else '' | |
1027 |
|
1027 | |||
1028 | d_a = '<div class="added progress-bar" style="width:%s%%">%s</div>' % ( |
|
1028 | d_a = '<div class="added progress-bar" style="width:%s%%">%s</div>' % ( | |
1029 | a_p, a_v |
|
1029 | a_p, a_v | |
1030 | ) |
|
1030 | ) | |
1031 | d_d = '<div class="deleted progress-bar" style="width:%s%%">%s</div>' % ( |
|
1031 | d_d = '<div class="deleted progress-bar" style="width:%s%%">%s</div>' % ( | |
1032 | d_p, d_v |
|
1032 | d_p, d_v | |
1033 | ) |
|
1033 | ) | |
1034 | return literal('<div class="progress" style="width:%spx">%s%s</div>' % (width, d_a, d_d)) |
|
1034 | return literal('<div class="progress" style="width:%spx">%s%s</div>' % (width, d_a, d_d)) | |
1035 |
|
1035 | |||
1036 |
|
1036 | |||
1037 | _URLIFY_RE = re.compile(r''' |
|
1037 | _URLIFY_RE = re.compile(r''' | |
1038 | # URL markup |
|
1038 | # URL markup | |
1039 | (?P<url>%s) | |
|
1039 | (?P<url>%s) | | |
1040 | # @mention markup |
|
1040 | # @mention markup | |
1041 | (?P<mention>%s) | |
|
1041 | (?P<mention>%s) | | |
1042 | # Changeset hash markup |
|
1042 | # Changeset hash markup | |
1043 | (?<!\w|[-_]) |
|
1043 | (?<!\w|[-_]) | |
1044 | (?P<hash>[0-9a-f]{12,40}) |
|
1044 | (?P<hash>[0-9a-f]{12,40}) | |
1045 | (?!\w|[-_]) | |
|
1045 | (?!\w|[-_]) | | |
1046 | # Markup of *bold text* |
|
1046 | # Markup of *bold text* | |
1047 | (?: |
|
1047 | (?: | |
1048 | (?:^|(?<=\s)) |
|
1048 | (?:^|(?<=\s)) | |
1049 | (?P<bold> [*] (?!\s) [^*\n]* (?<!\s) [*] ) |
|
1049 | (?P<bold> [*] (?!\s) [^*\n]* (?<!\s) [*] ) | |
1050 | (?![*\w]) |
|
1050 | (?![*\w]) | |
1051 | ) | |
|
1051 | ) | | |
1052 | # "Stylize" markup |
|
1052 | # "Stylize" markup | |
1053 | \[see\ \=>\ *(?P<seen>[a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\] | |
|
1053 | \[see\ \=>\ *(?P<seen>[a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\] | | |
1054 | \[license\ \=>\ *(?P<license>[a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\] | |
|
1054 | \[license\ \=>\ *(?P<license>[a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\] | | |
1055 | \[(?P<tagtype>requires|recommends|conflicts|base)\ \=>\ *(?P<tagvalue>[a-zA-Z0-9\-\/]*)\] | |
|
1055 | \[(?P<tagtype>requires|recommends|conflicts|base)\ \=>\ *(?P<tagvalue>[a-zA-Z0-9\-\/]*)\] | | |
1056 | \[(?:lang|language)\ \=>\ *(?P<lang>[a-zA-Z\-\/\#\+]*)\] | |
|
1056 | \[(?:lang|language)\ \=>\ *(?P<lang>[a-zA-Z\-\/\#\+]*)\] | | |
1057 | \[(?P<tag>[a-z]+)\] |
|
1057 | \[(?P<tag>[a-z]+)\] | |
1058 | ''' % (url_re.pattern, MENTIONS_REGEX.pattern), |
|
1058 | ''' % (url_re.pattern, MENTIONS_REGEX.pattern), | |
1059 | re.VERBOSE | re.MULTILINE | re.IGNORECASE) |
|
1059 | re.VERBOSE | re.MULTILINE | re.IGNORECASE) | |
1060 |
|
1060 | |||
1061 |
|
1061 | |||
1062 | def urlify_text(s, repo_name=None, link_=None, truncate=None, stylize=False, truncatef=truncate): |
|
1062 | def urlify_text(s, repo_name=None, link_=None, truncate=None, stylize=False, truncatef=truncate): | |
1063 | """ |
|
1063 | """ | |
1064 | Parses given text message and make literal html with markup. |
|
1064 | Parses given text message and make literal html with markup. | |
1065 | The text will be truncated to the specified length. |
|
1065 | The text will be truncated to the specified length. | |
1066 | Hashes are turned into changeset links to specified repository. |
|
1066 | Hashes are turned into changeset links to specified repository. | |
1067 | URLs links to what they say. |
|
1067 | URLs links to what they say. | |
1068 | Issues are linked to given issue-server. |
|
1068 | Issues are linked to given issue-server. | |
1069 | If link_ is provided, all text not already linking somewhere will link there. |
|
1069 | If link_ is provided, all text not already linking somewhere will link there. | |
1070 | """ |
|
1070 | """ | |
1071 |
|
1071 | |||
1072 | def _replace(match_obj): |
|
1072 | def _replace(match_obj): | |
1073 | url = match_obj.group('url') |
|
1073 | url = match_obj.group('url') | |
1074 | if url is not None: |
|
1074 | if url is not None: | |
1075 | return '<a href="%(url)s">%(url)s</a>' % {'url': url} |
|
1075 | return '<a href="%(url)s">%(url)s</a>' % {'url': url} | |
1076 | mention = match_obj.group('mention') |
|
1076 | mention = match_obj.group('mention') | |
1077 | if mention is not None: |
|
1077 | if mention is not None: | |
1078 | return '<b>%s</b>' % mention |
|
1078 | return '<b>%s</b>' % mention | |
1079 | hash_ = match_obj.group('hash') |
|
1079 | hash_ = match_obj.group('hash') | |
1080 | if hash_ is not None and repo_name is not None: |
|
1080 | if hash_ is not None and repo_name is not None: | |
1081 | from kallithea.config.routing import url # doh, we need to re-import url to mock it later |
|
1081 | from kallithea.config.routing import url # doh, we need to re-import url to mock it later | |
1082 | return '<a class="changeset_hash" href="%(url)s">%(hash)s</a>' % { |
|
1082 | return '<a class="changeset_hash" href="%(url)s">%(hash)s</a>' % { | |
1083 | 'url': url('changeset_home', repo_name=repo_name, revision=hash_), |
|
1083 | 'url': url('changeset_home', repo_name=repo_name, revision=hash_), | |
1084 | 'hash': hash_, |
|
1084 | 'hash': hash_, | |
1085 | } |
|
1085 | } | |
1086 | bold = match_obj.group('bold') |
|
1086 | bold = match_obj.group('bold') | |
1087 | if bold is not None: |
|
1087 | if bold is not None: | |
1088 | return '<b>*%s*</b>' % _urlify(bold[1:-1]) |
|
1088 | return '<b>*%s*</b>' % _urlify(bold[1:-1]) | |
1089 | if stylize: |
|
1089 | if stylize: | |
1090 | seen = match_obj.group('seen') |
|
1090 | seen = match_obj.group('seen') | |
1091 | if seen: |
|
1091 | if seen: | |
1092 | return '<div class="label label-meta" data-tag="see">see => %s</div>' % seen |
|
1092 | return '<div class="label label-meta" data-tag="see">see => %s</div>' % seen | |
1093 | license = match_obj.group('license') |
|
1093 | license = match_obj.group('license') | |
1094 | if license: |
|
1094 | if license: | |
1095 | return '<div class="label label-meta" data-tag="license"><a href="http://www.opensource.org/licenses/%s">%s</a></div>' % (license, license) |
|
1095 | return '<div class="label label-meta" data-tag="license"><a href="http://www.opensource.org/licenses/%s">%s</a></div>' % (license, license) | |
1096 | tagtype = match_obj.group('tagtype') |
|
1096 | tagtype = match_obj.group('tagtype') | |
1097 | if tagtype: |
|
1097 | if tagtype: | |
1098 | tagvalue = match_obj.group('tagvalue') |
|
1098 | tagvalue = match_obj.group('tagvalue') | |
1099 | return '<div class="label label-meta" data-tag="%s">%s => <a href="/%s">%s</a></div>' % (tagtype, tagtype, tagvalue, tagvalue) |
|
1099 | return '<div class="label label-meta" data-tag="%s">%s => <a href="/%s">%s</a></div>' % (tagtype, tagtype, tagvalue, tagvalue) | |
1100 | lang = match_obj.group('lang') |
|
1100 | lang = match_obj.group('lang') | |
1101 | if lang: |
|
1101 | if lang: | |
1102 | return '<div class="label label-meta" data-tag="lang">%s</div>' % lang |
|
1102 | return '<div class="label label-meta" data-tag="lang">%s</div>' % lang | |
1103 | tag = match_obj.group('tag') |
|
1103 | tag = match_obj.group('tag') | |
1104 | if tag: |
|
1104 | if tag: | |
1105 | return '<div class="label label-meta" data-tag="%s">%s</div>' % (tag, tag) |
|
1105 | return '<div class="label label-meta" data-tag="%s">%s</div>' % (tag, tag) | |
1106 | return match_obj.group(0) |
|
1106 | return match_obj.group(0) | |
1107 |
|
1107 | |||
1108 | def _urlify(s): |
|
1108 | def _urlify(s): | |
1109 | """ |
|
1109 | """ | |
1110 | Extract urls from text and make html links out of them |
|
1110 | Extract urls from text and make html links out of them | |
1111 | """ |
|
1111 | """ | |
1112 | return _URLIFY_RE.sub(_replace, s) |
|
1112 | return _URLIFY_RE.sub(_replace, s) | |
1113 |
|
1113 | |||
1114 | if truncate is None: |
|
1114 | if truncate is None: | |
1115 | s = s.rstrip() |
|
1115 | s = s.rstrip() | |
1116 | else: |
|
1116 | else: | |
1117 | s = truncatef(s, truncate, whole_word=True) |
|
1117 | s = truncatef(s, truncate, whole_word=True) | |
1118 | s = html_escape(s) |
|
1118 | s = html_escape(s) | |
1119 | s = _urlify(s) |
|
1119 | s = _urlify(s) | |
1120 | if repo_name is not None: |
|
1120 | if repo_name is not None: | |
1121 | s = urlify_issues(s, repo_name) |
|
1121 | s = urlify_issues(s, repo_name) | |
1122 | if link_ is not None: |
|
1122 | if link_ is not None: | |
1123 | # make href around everything that isn't a href already |
|
1123 | # make href around everything that isn't a href already | |
1124 | s = linkify_others(s, link_) |
|
1124 | s = linkify_others(s, link_) | |
1125 | s = s.replace('\r\n', '<br/>').replace('\n', '<br/>') |
|
1125 | s = s.replace('\r\n', '<br/>').replace('\n', '<br/>') | |
1126 | # Turn HTML5 into more valid HTML4 as required by some mail readers. |
|
1126 | # Turn HTML5 into more valid HTML4 as required by some mail readers. | |
1127 | # (This is not done in one step in html_escape, because character codes like |
|
1127 | # (This is not done in one step in html_escape, because character codes like | |
1128 | # { risk to be seen as an issue reference due to the presence of '#'.) |
|
1128 | # { risk to be seen as an issue reference due to the presence of '#'.) | |
1129 | s = s.replace("'", "'") |
|
1129 | s = s.replace("'", "'") | |
1130 | return literal(s) |
|
1130 | return literal(s) | |
1131 |
|
1131 | |||
1132 |
|
1132 | |||
1133 | def linkify_others(t, l): |
|
1133 | def linkify_others(t, l): | |
1134 | """Add a default link to html with links. |
|
1134 | """Add a default link to html with links. | |
1135 | HTML doesn't allow nesting of links, so the outer link must be broken up |
|
1135 | HTML doesn't allow nesting of links, so the outer link must be broken up | |
1136 | in pieces and give space for other links. |
|
1136 | in pieces and give space for other links. | |
1137 | """ |
|
1137 | """ | |
1138 | urls = re.compile(r'(\<a.*?\<\/a\>)',) |
|
1138 | urls = re.compile(r'(\<a.*?\<\/a\>)',) | |
1139 | links = [] |
|
1139 | links = [] | |
1140 | for e in urls.split(t): |
|
1140 | for e in urls.split(t): | |
1141 | if e.strip() and not urls.match(e): |
|
1141 | if e.strip() and not urls.match(e): | |
1142 | links.append('<a class="message-link" href="%s">%s</a>' % (l, e)) |
|
1142 | links.append('<a class="message-link" href="%s">%s</a>' % (l, e)) | |
1143 | else: |
|
1143 | else: | |
1144 | links.append(e) |
|
1144 | links.append(e) | |
1145 |
|
1145 | |||
1146 | return ''.join(links) |
|
1146 | return ''.join(links) | |
1147 |
|
1147 | |||
1148 |
|
1148 | |||
1149 | # Global variable that will hold the actual urlify_issues function body. |
|
1149 | # Global variable that will hold the actual urlify_issues function body. | |
1150 | # Will be set on first use when the global configuration has been read. |
|
1150 | # Will be set on first use when the global configuration has been read. | |
1151 | _urlify_issues_f = None |
|
1151 | _urlify_issues_f = None | |
1152 |
|
1152 | |||
1153 |
|
1153 | |||
1154 | def urlify_issues(newtext, repo_name): |
|
1154 | def urlify_issues(newtext, repo_name): | |
1155 | """Urlify issue references according to .ini configuration""" |
|
1155 | """Urlify issue references according to .ini configuration""" | |
1156 | global _urlify_issues_f |
|
1156 | global _urlify_issues_f | |
1157 | if _urlify_issues_f is None: |
|
1157 | if _urlify_issues_f is None: | |
1158 | from kallithea import CONFIG |
|
1158 | from kallithea import CONFIG | |
1159 | from kallithea.model.db import URL_SEP |
|
1159 | from kallithea.model.db import URL_SEP | |
1160 | assert CONFIG['sqlalchemy.url'] # make sure config has been loaded |
|
1160 | assert CONFIG['sqlalchemy.url'] # make sure config has been loaded | |
1161 |
|
1161 | |||
1162 | # Build chain of urlify functions, starting with not doing any transformation |
|
1162 | # Build chain of urlify functions, starting with not doing any transformation | |
1163 | tmp_urlify_issues_f = lambda s: s |
|
1163 | tmp_urlify_issues_f = lambda s: s | |
1164 |
|
1164 | |||
1165 | issue_pat_re = re.compile(r'issue_pat(.*)') |
|
1165 | issue_pat_re = re.compile(r'issue_pat(.*)') | |
1166 | for k in CONFIG: |
|
1166 | for k in CONFIG: | |
1167 | # Find all issue_pat* settings that also have corresponding server_link and prefix configuration |
|
1167 | # Find all issue_pat* settings that also have corresponding server_link and prefix configuration | |
1168 | m = issue_pat_re.match(k) |
|
1168 | m = issue_pat_re.match(k) | |
1169 | if m is None: |
|
1169 | if m is None: | |
1170 | continue |
|
1170 | continue | |
1171 | suffix = m.group(1) |
|
1171 | suffix = m.group(1) | |
1172 | issue_pat = CONFIG.get(k) |
|
1172 | issue_pat = CONFIG.get(k) | |
1173 | issue_server_link = CONFIG.get('issue_server_link%s' % suffix) |
|
1173 | issue_server_link = CONFIG.get('issue_server_link%s' % suffix) | |
1174 | issue_sub = CONFIG.get('issue_sub%s' % suffix) |
|
1174 | issue_sub = CONFIG.get('issue_sub%s' % suffix) | |
1175 | if not issue_pat or not issue_server_link or issue_sub is None: # issue_sub can be empty but should be present |
|
1175 | if not issue_pat or not issue_server_link or issue_sub is None: # issue_sub can be empty but should be present | |
1176 | log.error('skipping incomplete issue pattern %r: %r -> %r %r', suffix, issue_pat, issue_server_link, issue_sub) |
|
1176 | log.error('skipping incomplete issue pattern %r: %r -> %r %r', suffix, issue_pat, issue_server_link, issue_sub) | |
1177 | continue |
|
1177 | continue | |
1178 |
|
1178 | |||
1179 | # Wrap tmp_urlify_issues_f with substitution of this pattern, while making sure all loop variables (and compiled regexpes) are bound |
|
1179 | # Wrap tmp_urlify_issues_f with substitution of this pattern, while making sure all loop variables (and compiled regexpes) are bound | |
1180 | try: |
|
1180 | try: | |
1181 | issue_re = re.compile(issue_pat) |
|
1181 | issue_re = re.compile(issue_pat) | |
1182 | except re.error as e: |
|
1182 | except re.error as e: | |
1183 | log.error('skipping invalid issue pattern %r: %r -> %r %r. Error: %s', suffix, issue_pat, issue_server_link, issue_sub, str(e)) |
|
1183 | log.error('skipping invalid issue pattern %r: %r -> %r %r. Error: %s', suffix, issue_pat, issue_server_link, issue_sub, str(e)) | |
1184 | continue |
|
1184 | continue | |
1185 |
|
1185 | |||
1186 | log.debug('issue pattern %r: %r -> %r %r', suffix, issue_pat, issue_server_link, issue_sub) |
|
1186 | log.debug('issue pattern %r: %r -> %r %r', suffix, issue_pat, issue_server_link, issue_sub) | |
1187 |
|
1187 | |||
1188 | def issues_replace(match_obj, |
|
1188 | def issues_replace(match_obj, | |
1189 | issue_server_link=issue_server_link, issue_sub=issue_sub): |
|
1189 | issue_server_link=issue_server_link, issue_sub=issue_sub): | |
1190 | try: |
|
1190 | try: | |
1191 | issue_url = match_obj.expand(issue_server_link) |
|
1191 | issue_url = match_obj.expand(issue_server_link) | |
1192 | except (IndexError, re.error) as e: |
|
1192 | except (IndexError, re.error) as e: | |
1193 | log.error('invalid issue_url setting %r -> %r %r. Error: %s', issue_pat, issue_server_link, issue_sub, str(e)) |
|
1193 | log.error('invalid issue_url setting %r -> %r %r. Error: %s', issue_pat, issue_server_link, issue_sub, str(e)) | |
1194 | issue_url = issue_server_link |
|
1194 | issue_url = issue_server_link | |
1195 | issue_url = issue_url.replace('{repo}', repo_name) |
|
1195 | issue_url = issue_url.replace('{repo}', repo_name) | |
1196 | issue_url = issue_url.replace('{repo_name}', repo_name.split(URL_SEP)[-1]) |
|
1196 | issue_url = issue_url.replace('{repo_name}', repo_name.split(URL_SEP)[-1]) | |
1197 | # if issue_sub is empty use the matched issue reference verbatim |
|
1197 | # if issue_sub is empty use the matched issue reference verbatim | |
1198 | if not issue_sub: |
|
1198 | if not issue_sub: | |
1199 | issue_text = match_obj.group() |
|
1199 | issue_text = match_obj.group() | |
1200 | else: |
|
1200 | else: | |
1201 | try: |
|
1201 | try: | |
1202 | issue_text = match_obj.expand(issue_sub) |
|
1202 | issue_text = match_obj.expand(issue_sub) | |
1203 | except (IndexError, re.error) as e: |
|
1203 | except (IndexError, re.error) as e: | |
1204 | log.error('invalid issue_sub setting %r -> %r %r. Error: %s', issue_pat, issue_server_link, issue_sub, str(e)) |
|
1204 | log.error('invalid issue_sub setting %r -> %r %r. Error: %s', issue_pat, issue_server_link, issue_sub, str(e)) | |
1205 | issue_text = match_obj.group() |
|
1205 | issue_text = match_obj.group() | |
1206 |
|
1206 | |||
1207 | return ( |
|
1207 | return ( | |
1208 | '<a class="issue-tracker-link" href="%(url)s">' |
|
1208 | '<a class="issue-tracker-link" href="%(url)s">' | |
1209 | '%(text)s' |
|
1209 | '%(text)s' | |
1210 | '</a>' |
|
1210 | '</a>' | |
1211 | ) % { |
|
1211 | ) % { | |
1212 | 'url': issue_url, |
|
1212 | 'url': issue_url, | |
1213 | 'text': issue_text, |
|
1213 | 'text': issue_text, | |
1214 | } |
|
1214 | } | |
1215 | tmp_urlify_issues_f = (lambda s, |
|
1215 | tmp_urlify_issues_f = (lambda s, | |
1216 | issue_re=issue_re, issues_replace=issues_replace, chain_f=tmp_urlify_issues_f: |
|
1216 | issue_re=issue_re, issues_replace=issues_replace, chain_f=tmp_urlify_issues_f: | |
1217 | issue_re.sub(issues_replace, chain_f(s))) |
|
1217 | issue_re.sub(issues_replace, chain_f(s))) | |
1218 |
|
1218 | |||
1219 | # Set tmp function globally - atomically |
|
1219 | # Set tmp function globally - atomically | |
1220 | _urlify_issues_f = tmp_urlify_issues_f |
|
1220 | _urlify_issues_f = tmp_urlify_issues_f | |
1221 |
|
1221 | |||
1222 | return _urlify_issues_f(newtext) |
|
1222 | return _urlify_issues_f(newtext) | |
1223 |
|
1223 | |||
1224 |
|
1224 | |||
1225 | def render_w_mentions(source, repo_name=None): |
|
1225 | def render_w_mentions(source, repo_name=None): | |
1226 | """ |
|
1226 | """ | |
1227 | Render plain text with revision hashes and issue references urlified |
|
1227 | Render plain text with revision hashes and issue references urlified | |
1228 | and with @mention highlighting. |
|
1228 | and with @mention highlighting. | |
1229 | """ |
|
1229 | """ | |
1230 | s = safe_unicode(source) |
|
1230 | s = safe_unicode(source) | |
1231 | s = urlify_text(s, repo_name=repo_name) |
|
1231 | s = urlify_text(s, repo_name=repo_name) | |
1232 | return literal('<div class="formatted-fixed">%s</div>' % s) |
|
1232 | return literal('<div class="formatted-fixed">%s</div>' % s) | |
1233 |
|
1233 | |||
1234 |
|
1234 | |||
1235 | def short_ref(ref_type, ref_name): |
|
1235 | def short_ref(ref_type, ref_name): | |
1236 | if ref_type == 'rev': |
|
1236 | if ref_type == 'rev': | |
1237 | return short_id(ref_name) |
|
1237 | return short_id(ref_name) | |
1238 | return ref_name |
|
1238 | return ref_name | |
1239 |
|
1239 | |||
1240 |
|
1240 | |||
1241 | def link_to_ref(repo_name, ref_type, ref_name, rev=None): |
|
1241 | def link_to_ref(repo_name, ref_type, ref_name, rev=None): | |
1242 | """ |
|
1242 | """ | |
1243 | Return full markup for a href to changeset_home for a changeset. |
|
1243 | Return full markup for a href to changeset_home for a changeset. | |
1244 | If ref_type is branch it will link to changelog. |
|
1244 | If ref_type is branch it will link to changelog. | |
1245 | ref_name is shortened if ref_type is 'rev'. |
|
1245 | ref_name is shortened if ref_type is 'rev'. | |
1246 | if rev is specified show it too, explicitly linking to that revision. |
|
1246 | if rev is specified show it too, explicitly linking to that revision. | |
1247 | """ |
|
1247 | """ | |
1248 | txt = short_ref(ref_type, ref_name) |
|
1248 | txt = short_ref(ref_type, ref_name) | |
1249 | if ref_type == 'branch': |
|
1249 | if ref_type == 'branch': | |
1250 | u = url('changelog_home', repo_name=repo_name, branch=ref_name) |
|
1250 | u = url('changelog_home', repo_name=repo_name, branch=ref_name) | |
1251 | else: |
|
1251 | else: | |
1252 | u = url('changeset_home', repo_name=repo_name, revision=ref_name) |
|
1252 | u = url('changeset_home', repo_name=repo_name, revision=ref_name) | |
1253 | l = link_to(repo_name + '#' + txt, u) |
|
1253 | l = link_to(repo_name + '#' + txt, u) | |
1254 | if rev and ref_type != 'rev': |
|
1254 | if rev and ref_type != 'rev': | |
1255 | l = literal('%s (%s)' % (l, link_to(short_id(rev), url('changeset_home', repo_name=repo_name, revision=rev)))) |
|
1255 | l = literal('%s (%s)' % (l, link_to(short_id(rev), url('changeset_home', repo_name=repo_name, revision=rev)))) | |
1256 | return l |
|
1256 | return l | |
1257 |
|
1257 | |||
1258 |
|
1258 | |||
1259 | def changeset_status(repo, revision): |
|
1259 | def changeset_status(repo, revision): | |
1260 | from kallithea.model.changeset_status import ChangesetStatusModel |
|
1260 | from kallithea.model.changeset_status import ChangesetStatusModel | |
1261 | return ChangesetStatusModel().get_status(repo, revision) |
|
1261 | return ChangesetStatusModel().get_status(repo, revision) | |
1262 |
|
1262 | |||
1263 |
|
1263 | |||
1264 | def changeset_status_lbl(changeset_status): |
|
1264 | def changeset_status_lbl(changeset_status): | |
1265 | from kallithea.model.db import ChangesetStatus |
|
1265 | from kallithea.model.db import ChangesetStatus | |
1266 | return ChangesetStatus.get_status_lbl(changeset_status) |
|
1266 | return ChangesetStatus.get_status_lbl(changeset_status) | |
1267 |
|
1267 | |||
1268 |
|
1268 | |||
1269 | def get_permission_name(key): |
|
1269 | def get_permission_name(key): | |
1270 | from kallithea.model.db import Permission |
|
1270 | from kallithea.model.db import Permission | |
1271 | return dict(Permission.PERMS).get(key) |
|
1271 | return dict(Permission.PERMS).get(key) | |
1272 |
|
1272 | |||
1273 |
|
1273 | |||
1274 | def journal_filter_help(): |
|
1274 | def journal_filter_help(): | |
1275 | return _(textwrap.dedent(''' |
|
1275 | return _(textwrap.dedent(''' | |
1276 | Example filter terms: |
|
1276 | Example filter terms: | |
1277 | repository:vcs |
|
1277 | repository:vcs | |
1278 | username:developer |
|
1278 | username:developer | |
1279 | action:*push* |
|
1279 | action:*push* | |
1280 | ip:127.0.0.1 |
|
1280 | ip:127.0.0.1 | |
1281 | date:20120101 |
|
1281 | date:20120101 | |
1282 | date:[20120101100000 TO 20120102] |
|
1282 | date:[20120101100000 TO 20120102] | |
1283 |
|
1283 | |||
1284 | Generate wildcards using '*' character: |
|
1284 | Generate wildcards using '*' character: | |
1285 | "repository:vcs*" - search everything starting with 'vcs' |
|
1285 | "repository:vcs*" - search everything starting with 'vcs' | |
1286 | "repository:*vcs*" - search for repository containing 'vcs' |
|
1286 | "repository:*vcs*" - search for repository containing 'vcs' | |
1287 |
|
1287 | |||
1288 | Optional AND / OR operators in queries |
|
1288 | Optional AND / OR operators in queries | |
1289 | "repository:vcs OR repository:test" |
|
1289 | "repository:vcs OR repository:test" | |
1290 | "username:test AND repository:test*" |
|
1290 | "username:test AND repository:test*" | |
1291 | ''')) |
|
1291 | ''')) | |
1292 |
|
1292 | |||
1293 |
|
1293 | |||
1294 | def not_mapped_error(repo_name): |
|
1294 | def not_mapped_error(repo_name): | |
1295 | flash(_('%s repository is not mapped to db perhaps' |
|
1295 | flash(_('%s repository is not mapped to db perhaps' | |
1296 | ' it was created or renamed from the filesystem' |
|
1296 | ' it was created or renamed from the filesystem' | |
1297 | ' please run the application again' |
|
1297 | ' please run the application again' | |
1298 | ' in order to rescan repositories') % repo_name, category='error') |
|
1298 | ' in order to rescan repositories') % repo_name, category='error') | |
1299 |
|
1299 | |||
1300 |
|
1300 | |||
1301 | def ip_range(ip_addr): |
|
1301 | def ip_range(ip_addr): | |
1302 | from kallithea.model.db import UserIpMap |
|
1302 | from kallithea.model.db import UserIpMap | |
1303 | s, e = UserIpMap._get_ip_range(ip_addr) |
|
1303 | s, e = UserIpMap._get_ip_range(ip_addr) | |
1304 | return '%s - %s' % (s, e) |
|
1304 | return '%s - %s' % (s, e) | |
1305 |
|
1305 | |||
1306 |
|
1306 | |||
1307 | session_csrf_secret_name = "_session_csrf_secret_token" |
|
1307 | session_csrf_secret_name = "_session_csrf_secret_token" | |
1308 |
|
1308 | |||
1309 | def session_csrf_secret_token(): |
|
1309 | def session_csrf_secret_token(): | |
1310 | """Return (and create) the current session's CSRF protection token.""" |
|
1310 | """Return (and create) the current session's CSRF protection token.""" | |
1311 | from tg import session |
|
1311 | from tg import session | |
1312 | if not session_csrf_secret_name in session: |
|
1312 | if not session_csrf_secret_name in session: | |
1313 | session[session_csrf_secret_name] = str(random.getrandbits(128)) |
|
1313 | session[session_csrf_secret_name] = str(random.getrandbits(128)) | |
1314 | session.save() |
|
1314 | session.save() | |
1315 | return session[session_csrf_secret_name] |
|
1315 | return session[session_csrf_secret_name] | |
1316 |
|
1316 | |||
1317 | def form(url, method="post", **attrs): |
|
1317 | def form(url, method="post", **attrs): | |
1318 | """Like webhelpers.html.tags.form , but automatically adding |
|
1318 | """Like webhelpers.html.tags.form , but automatically adding | |
1319 | session_csrf_secret_token for POST. The secret is thus never leaked in GET |
|
1319 | session_csrf_secret_token for POST. The secret is thus never leaked in GET | |
1320 | URLs. |
|
1320 | URLs. | |
1321 | """ |
|
1321 | """ | |
1322 | form = insecure_form(url, method, **attrs) |
|
1322 | form = insecure_form(url, method, **attrs) | |
1323 | if method.lower() == 'get': |
|
1323 | if method.lower() == 'get': | |
1324 | return form |
|
1324 | return form | |
1325 | return form + HTML.div(hidden(session_csrf_secret_name, session_csrf_secret_token()), style="display: none;") |
|
1325 | return form + HTML.div(hidden(session_csrf_secret_name, session_csrf_secret_token()), style="display: none;") |
@@ -1,459 +1,460 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 | # This program is free software: you can redistribute it and/or modify |
|
2 | # This program is free software: you can redistribute it and/or modify | |
3 | # it under the terms of the GNU General Public License as published by |
|
3 | # it under the terms of the GNU General Public License as published by | |
4 | # the Free Software Foundation, either version 3 of the License, or |
|
4 | # the Free Software Foundation, either version 3 of the License, or | |
5 | # (at your option) any later version. |
|
5 | # (at your option) any later version. | |
6 | # |
|
6 | # | |
7 | # This program is distributed in the hope that it will be useful, |
|
7 | # This program is distributed in the hope that it will be useful, | |
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
8 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
9 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | # GNU General Public License for more details. |
|
10 | # GNU General Public License for more details. | |
11 | # |
|
11 | # | |
12 | # You should have received a copy of the GNU General Public License |
|
12 | # You should have received a copy of the GNU General Public License | |
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
13 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
14 | """ |
|
14 | """ | |
15 | kallithea.lib.indexers.daemon |
|
15 | kallithea.lib.indexers.daemon | |
16 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
|
16 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |
17 |
|
17 | |||
18 | A daemon will read from task table and run tasks |
|
18 | A daemon will read from task table and run tasks | |
19 |
|
19 | |||
20 | This file was forked by the Kallithea project in July 2014. |
|
20 | This file was forked by the Kallithea project in July 2014. | |
21 | Original author and date, and relevant copyright and licensing information is below: |
|
21 | Original author and date, and relevant copyright and licensing information is below: | |
22 | :created_on: Jan 26, 2010 |
|
22 | :created_on: Jan 26, 2010 | |
23 | :author: marcink |
|
23 | :author: marcink | |
24 | :copyright: (c) 2013 RhodeCode GmbH, and others. |
|
24 | :copyright: (c) 2013 RhodeCode GmbH, and others. | |
25 | :license: GPLv3, see LICENSE.md for more details. |
|
25 | :license: GPLv3, see LICENSE.md for more details. | |
26 | """ |
|
26 | """ | |
27 |
|
27 | |||
28 |
|
28 | |||
29 | import logging |
|
29 | import logging | |
30 | import os |
|
30 | import os | |
31 | import sys |
|
31 | import sys | |
32 | import traceback |
|
32 | import traceback | |
33 | from os.path import dirname |
|
33 | from os.path import dirname | |
34 | from shutil import rmtree |
|
34 | from shutil import rmtree | |
35 | from time import mktime |
|
35 | from time import mktime | |
36 |
|
36 | |||
37 | from whoosh.index import create_in, exists_in, open_dir |
|
37 | from whoosh.index import create_in, exists_in, open_dir | |
38 | from whoosh.qparser import QueryParser |
|
38 | from whoosh.qparser import QueryParser | |
39 |
|
39 | |||
40 | from kallithea.config.conf import INDEX_EXTENSIONS, INDEX_FILENAMES |
|
40 | from kallithea.config.conf import INDEX_EXTENSIONS, INDEX_FILENAMES | |
41 | from kallithea.lib.indexers import CHGSET_IDX_NAME, CHGSETS_SCHEMA, IDX_NAME, SCHEMA |
|
41 | from kallithea.lib.indexers import CHGSET_IDX_NAME, CHGSETS_SCHEMA, IDX_NAME, SCHEMA | |
42 | from kallithea.lib.utils2 import safe_str, safe_unicode |
|
42 | from kallithea.lib.utils2 import safe_str, safe_unicode | |
43 | from kallithea.lib.vcs.exceptions import ChangesetError, NodeDoesNotExistError, RepositoryError |
|
43 | from kallithea.lib.vcs.exceptions import ChangesetError, NodeDoesNotExistError, RepositoryError | |
44 | from kallithea.model.db import Repository |
|
44 | from kallithea.model.db import Repository | |
45 | from kallithea.model.scm import ScmModel |
|
45 | from kallithea.model.scm import ScmModel | |
46 |
|
46 | |||
47 |
|
47 | |||
48 | # Add location of top level folder to sys.path |
|
48 | # Add location of top level folder to sys.path | |
49 | project_path = dirname(dirname(dirname(dirname(os.path.realpath(__file__))))) |
|
49 | project_path = dirname(dirname(dirname(dirname(os.path.realpath(__file__))))) | |
50 | sys.path.append(project_path) |
|
50 | sys.path.append(project_path) | |
51 |
|
51 | |||
52 |
|
52 | |||
53 |
|
53 | |||
54 |
|
54 | |||
55 | log = logging.getLogger('whoosh_indexer') |
|
55 | log = logging.getLogger('whoosh_indexer') | |
56 |
|
56 | |||
57 |
|
57 | |||
58 | class WhooshIndexingDaemon(object): |
|
58 | class WhooshIndexingDaemon(object): | |
59 | """ |
|
59 | """ | |
60 | Daemon for atomic indexing jobs |
|
60 | Daemon for atomic indexing jobs | |
61 | """ |
|
61 | """ | |
62 |
|
62 | |||
63 | def __init__(self, indexname=IDX_NAME, index_location=None, |
|
63 | def __init__(self, indexname=IDX_NAME, index_location=None, | |
64 | repo_location=None, repo_list=None, |
|
64 | repo_location=None, repo_list=None, | |
65 | repo_update_list=None): |
|
65 | repo_update_list=None): | |
66 | self.indexname = indexname |
|
66 | self.indexname = indexname | |
67 |
|
67 | |||
68 | self.index_location = index_location |
|
68 | self.index_location = index_location | |
69 | if not index_location: |
|
69 | if not index_location: | |
70 | raise Exception('You have to provide index location') |
|
70 | raise Exception('You have to provide index location') | |
71 |
|
71 | |||
72 | self.repo_location = repo_location |
|
72 | self.repo_location = repo_location | |
73 | if not repo_location: |
|
73 | if not repo_location: | |
74 | raise Exception('You have to provide repositories location') |
|
74 | raise Exception('You have to provide repositories location') | |
75 |
|
75 | |||
76 | self.repo_paths = ScmModel().repo_scan(self.repo_location) |
|
76 | self.repo_paths = ScmModel().repo_scan(self.repo_location) | |
77 |
|
77 | |||
78 | # filter repo list |
|
78 | # filter repo list | |
79 | if repo_list: |
|
79 | if repo_list: | |
80 | # Fix non-ascii repo names to unicode |
|
80 | # Fix non-ascii repo names to unicode | |
81 | repo_list = set(safe_unicode(repo_name) for repo_name in repo_list) |
|
81 | repo_list = set(safe_unicode(repo_name) for repo_name in repo_list) | |
82 | self.filtered_repo_paths = {} |
|
82 | self.filtered_repo_paths = {} | |
83 | for repo_name, repo in self.repo_paths.items(): |
|
83 | for repo_name, repo in self.repo_paths.items(): | |
84 | if repo_name in repo_list: |
|
84 | if repo_name in repo_list: | |
85 | self.filtered_repo_paths[repo_name] = repo |
|
85 | self.filtered_repo_paths[repo_name] = repo | |
86 |
|
86 | |||
87 | self.repo_paths = self.filtered_repo_paths |
|
87 | self.repo_paths = self.filtered_repo_paths | |
88 |
|
88 | |||
89 | # filter update repo list |
|
89 | # filter update repo list | |
90 | self.filtered_repo_update_paths = {} |
|
90 | self.filtered_repo_update_paths = {} | |
91 | if repo_update_list: |
|
91 | if repo_update_list: | |
92 | self.filtered_repo_update_paths = {} |
|
92 | self.filtered_repo_update_paths = {} | |
93 | for repo_name, repo in self.repo_paths.items(): |
|
93 | for repo_name, repo in self.repo_paths.items(): | |
94 | if repo_name in repo_update_list: |
|
94 | if repo_name in repo_update_list: | |
95 | self.filtered_repo_update_paths[repo_name] = repo |
|
95 | self.filtered_repo_update_paths[repo_name] = repo | |
96 | self.repo_paths = self.filtered_repo_update_paths |
|
96 | self.repo_paths = self.filtered_repo_update_paths | |
97 |
|
97 | |||
98 | self.initial = True |
|
98 | self.initial = True | |
99 | if not os.path.isdir(self.index_location): |
|
99 | if not os.path.isdir(self.index_location): | |
100 | os.makedirs(self.index_location) |
|
100 | os.makedirs(self.index_location) | |
101 | log.info('Cannot run incremental index since it does not ' |
|
101 | log.info('Cannot run incremental index since it does not ' | |
102 | 'yet exist - running full build') |
|
102 | 'yet exist - running full build') | |
103 | elif not exists_in(self.index_location, IDX_NAME): |
|
103 | elif not exists_in(self.index_location, IDX_NAME): | |
104 | log.info('Running full index build, as the file content ' |
|
104 | log.info('Running full index build, as the file content ' | |
105 | 'index does not exist') |
|
105 | 'index does not exist') | |
106 | elif not exists_in(self.index_location, CHGSET_IDX_NAME): |
|
106 | elif not exists_in(self.index_location, CHGSET_IDX_NAME): | |
107 | log.info('Running full index build, as the changeset ' |
|
107 | log.info('Running full index build, as the changeset ' | |
108 | 'index does not exist') |
|
108 | 'index does not exist') | |
109 | else: |
|
109 | else: | |
110 | self.initial = False |
|
110 | self.initial = False | |
111 |
|
111 | |||
112 | def _get_index_revision(self, repo): |
|
112 | def _get_index_revision(self, repo): | |
113 | db_repo = Repository.get_by_repo_name(safe_unicode(repo.name)) |
|
113 | db_repo = Repository.get_by_repo_name(safe_unicode(repo.name)) | |
114 | landing_rev = 'tip' |
|
114 | landing_rev = 'tip' | |
115 | if db_repo: |
|
115 | if db_repo: | |
116 | _rev_type, _rev = db_repo.landing_rev |
|
116 | _rev_type, _rev = db_repo.landing_rev | |
117 | landing_rev = _rev |
|
117 | landing_rev = _rev | |
118 | return landing_rev |
|
118 | return landing_rev | |
119 |
|
119 | |||
120 | def _get_index_changeset(self, repo, index_rev=None): |
|
120 | def _get_index_changeset(self, repo, index_rev=None): | |
121 | if not index_rev: |
|
121 | if not index_rev: | |
122 | index_rev = self._get_index_revision(repo) |
|
122 | index_rev = self._get_index_revision(repo) | |
123 | cs = repo.get_changeset(index_rev) |
|
123 | cs = repo.get_changeset(index_rev) | |
124 | return cs |
|
124 | return cs | |
125 |
|
125 | |||
126 | def get_paths(self, repo): |
|
126 | def get_paths(self, repo): | |
127 | """ |
|
127 | """ | |
128 | recursive walk in root dir and return a set of all path in that dir |
|
128 | recursive walk in root dir and return a set of all path in that dir | |
129 | based on repository walk function |
|
129 | based on repository walk function | |
130 | """ |
|
130 | """ | |
131 | index_paths_ = set() |
|
131 | index_paths_ = set() | |
132 | try: |
|
132 | try: | |
133 | cs = self._get_index_changeset(repo) |
|
133 | cs = self._get_index_changeset(repo) | |
134 | for _topnode, _dirs, files in cs.walk('/'): |
|
134 | for _topnode, _dirs, files in cs.walk('/'): | |
135 | for f in files: |
|
135 | for f in files: | |
136 | index_paths_.add(os.path.join(safe_str(repo.path), safe_str(f.path))) |
|
136 | index_paths_.add(os.path.join(safe_str(repo.path), safe_str(f.path))) | |
137 |
|
137 | |||
138 | except RepositoryError: |
|
138 | except RepositoryError: | |
139 | log.debug(traceback.format_exc()) |
|
139 | log.debug(traceback.format_exc()) | |
140 | pass |
|
140 | pass | |
141 | return index_paths_ |
|
141 | return index_paths_ | |
142 |
|
142 | |||
143 | def get_node(self, repo, path, index_rev=None): |
|
143 | def get_node(self, repo, path, index_rev=None): | |
144 | """ |
|
144 | """ | |
145 | gets a filenode based on given full path. It operates on string for |
|
145 | gets a filenode based on given full path. It operates on string for | |
146 | hg git compatibility. |
|
146 | hg git compatibility. | |
147 |
|
147 | |||
148 | :param repo: scm repo instance |
|
148 | :param repo: scm repo instance | |
149 | :param path: full path including root location |
|
149 | :param path: full path including root location | |
150 | :return: FileNode |
|
150 | :return: FileNode | |
151 | """ |
|
151 | """ | |
152 | # FIXME: paths should be normalized ... or even better: don't include repo.path |
|
152 | # FIXME: paths should be normalized ... or even better: don't include repo.path | |
153 | path = safe_str(path) |
|
153 | path = safe_str(path) | |
154 | repo_path = safe_str(repo.path) |
|
154 | repo_path = safe_str(repo.path) | |
155 | assert path.startswith(repo_path) |
|
155 | assert path.startswith(repo_path) | |
156 | assert path[len(repo_path)] in (os.path.sep, os.path.altsep) |
|
156 | assert path[len(repo_path)] in (os.path.sep, os.path.altsep) | |
157 | node_path = path[len(repo_path) + 1:] |
|
157 | node_path = path[len(repo_path) + 1:] | |
158 | cs = self._get_index_changeset(repo, index_rev=index_rev) |
|
158 | cs = self._get_index_changeset(repo, index_rev=index_rev) | |
159 | node = cs.get_node(node_path) |
|
159 | node = cs.get_node(node_path) | |
160 | return node |
|
160 | return node | |
161 |
|
161 | |||
162 | def is_indexable_node(self, node): |
|
162 | def is_indexable_node(self, node): | |
163 | """ |
|
163 | """ | |
164 | Just index the content of chosen files, skipping binary files |
|
164 | Just index the content of chosen files, skipping binary files | |
165 | """ |
|
165 | """ | |
166 | return (node.extension in INDEX_EXTENSIONS or node.name in INDEX_FILENAMES) and \ |
|
166 | return (node.extension in INDEX_EXTENSIONS or node.name in INDEX_FILENAMES) and \ | |
167 | not node.is_binary |
|
167 | not node.is_binary | |
168 |
|
168 | |||
169 | def get_node_mtime(self, node): |
|
169 | def get_node_mtime(self, node): | |
170 | return mktime(node.last_changeset.date.timetuple()) |
|
170 | return mktime(node.last_changeset.date.timetuple()) | |
171 |
|
171 | |||
172 | def add_doc(self, writer, path, repo, repo_name, index_rev=None): |
|
172 | def add_doc(self, writer, path, repo, repo_name, index_rev=None): | |
173 | """ |
|
173 | """ | |
174 | Adding doc to writer this function itself fetches data from |
|
174 | Adding doc to writer this function itself fetches data from | |
175 | the instance of vcs backend |
|
175 | the instance of vcs backend | |
176 | """ |
|
176 | """ | |
177 | try: |
|
177 | try: | |
178 | node = self.get_node(repo, path, index_rev) |
|
178 | node = self.get_node(repo, path, index_rev) | |
179 | except (ChangesetError, NodeDoesNotExistError): |
|
179 | except (ChangesetError, NodeDoesNotExistError): | |
180 | log.debug(" >> %s - not found in %s %s", path, repo, index_rev) |
|
180 | log.debug(" >> %s - not found in %s %s", path, repo, index_rev) | |
181 | return 0, 0 |
|
181 | return 0, 0 | |
182 |
|
182 | |||
183 | indexed = indexed_w_content = 0 |
|
183 | indexed = indexed_w_content = 0 | |
184 | if self.is_indexable_node(node): |
|
184 | if self.is_indexable_node(node): | |
185 |
|
|
185 | bytes_content = node.content | |
186 |
if |
|
186 | if b'\0' in bytes_content: | |
187 | log.warning(' >> %s - no text content', path) |
|
187 | log.warning(' >> %s - no text content', path) | |
188 | u_content = u'' |
|
188 | u_content = u'' | |
189 | else: |
|
189 | else: | |
190 | log.debug(' >> %s', path) |
|
190 | log.debug(' >> %s', path) | |
|
191 | u_content = safe_unicode(bytes_content) | |||
191 | indexed_w_content += 1 |
|
192 | indexed_w_content += 1 | |
192 |
|
193 | |||
193 | else: |
|
194 | else: | |
194 | log.debug(' >> %s - not indexable', path) |
|
195 | log.debug(' >> %s - not indexable', path) | |
195 | # just index file name without it's content |
|
196 | # just index file name without it's content | |
196 | u_content = u'' |
|
197 | u_content = u'' | |
197 | indexed += 1 |
|
198 | indexed += 1 | |
198 |
|
199 | |||
199 | p = safe_unicode(path) |
|
200 | p = safe_unicode(path) | |
200 | writer.add_document( |
|
201 | writer.add_document( | |
201 | fileid=p, |
|
202 | fileid=p, | |
202 | owner=unicode(repo.contact), |
|
203 | owner=unicode(repo.contact), | |
203 | repository_rawname=safe_unicode(repo_name), |
|
204 | repository_rawname=safe_unicode(repo_name), | |
204 | repository=safe_unicode(repo_name), |
|
205 | repository=safe_unicode(repo_name), | |
205 | path=p, |
|
206 | path=p, | |
206 | content=u_content, |
|
207 | content=u_content, | |
207 | modtime=self.get_node_mtime(node), |
|
208 | modtime=self.get_node_mtime(node), | |
208 | extension=node.extension |
|
209 | extension=node.extension | |
209 | ) |
|
210 | ) | |
210 | return indexed, indexed_w_content |
|
211 | return indexed, indexed_w_content | |
211 |
|
212 | |||
212 | def index_changesets(self, writer, repo_name, repo, start_rev=None): |
|
213 | def index_changesets(self, writer, repo_name, repo, start_rev=None): | |
213 | """ |
|
214 | """ | |
214 | Add all changeset in the vcs repo starting at start_rev |
|
215 | Add all changeset in the vcs repo starting at start_rev | |
215 | to the index writer |
|
216 | to the index writer | |
216 |
|
217 | |||
217 | :param writer: the whoosh index writer to add to |
|
218 | :param writer: the whoosh index writer to add to | |
218 | :param repo_name: name of the repository from whence the |
|
219 | :param repo_name: name of the repository from whence the | |
219 | changeset originates including the repository group |
|
220 | changeset originates including the repository group | |
220 | :param repo: the vcs repository instance to index changesets for, |
|
221 | :param repo: the vcs repository instance to index changesets for, | |
221 | the presumption is the repo has changesets to index |
|
222 | the presumption is the repo has changesets to index | |
222 | :param start_rev=None: the full sha id to start indexing from |
|
223 | :param start_rev=None: the full sha id to start indexing from | |
223 | if start_rev is None then index from the first changeset in |
|
224 | if start_rev is None then index from the first changeset in | |
224 | the repo |
|
225 | the repo | |
225 | """ |
|
226 | """ | |
226 |
|
227 | |||
227 | if start_rev is None: |
|
228 | if start_rev is None: | |
228 | start_rev = repo[0].raw_id |
|
229 | start_rev = repo[0].raw_id | |
229 |
|
230 | |||
230 | log.debug('Indexing changesets in %s, starting at rev %s', |
|
231 | log.debug('Indexing changesets in %s, starting at rev %s', | |
231 | repo_name, start_rev) |
|
232 | repo_name, start_rev) | |
232 |
|
233 | |||
233 | indexed = 0 |
|
234 | indexed = 0 | |
234 | cs_iter = repo.get_changesets(start=start_rev) |
|
235 | cs_iter = repo.get_changesets(start=start_rev) | |
235 | total = len(cs_iter) |
|
236 | total = len(cs_iter) | |
236 | for cs in cs_iter: |
|
237 | for cs in cs_iter: | |
237 | indexed += 1 |
|
238 | indexed += 1 | |
238 | log.debug(' >> %s %s/%s', cs, indexed, total) |
|
239 | log.debug(' >> %s %s/%s', cs, indexed, total) | |
239 | writer.add_document( |
|
240 | writer.add_document( | |
240 | raw_id=unicode(cs.raw_id), |
|
241 | raw_id=unicode(cs.raw_id), | |
241 | owner=unicode(repo.contact), |
|
242 | owner=unicode(repo.contact), | |
242 | date=cs._timestamp, |
|
243 | date=cs._timestamp, | |
243 | repository_rawname=safe_unicode(repo_name), |
|
244 | repository_rawname=safe_unicode(repo_name), | |
244 | repository=safe_unicode(repo_name), |
|
245 | repository=safe_unicode(repo_name), | |
245 | author=cs.author, |
|
246 | author=cs.author, | |
246 | message=cs.message, |
|
247 | message=cs.message, | |
247 | last=cs.last, |
|
248 | last=cs.last, | |
248 | added=u' '.join([safe_unicode(node.path) for node in cs.added]).lower(), |
|
249 | added=u' '.join([safe_unicode(node.path) for node in cs.added]).lower(), | |
249 | removed=u' '.join([safe_unicode(node.path) for node in cs.removed]).lower(), |
|
250 | removed=u' '.join([safe_unicode(node.path) for node in cs.removed]).lower(), | |
250 | changed=u' '.join([safe_unicode(node.path) for node in cs.changed]).lower(), |
|
251 | changed=u' '.join([safe_unicode(node.path) for node in cs.changed]).lower(), | |
251 | parents=u' '.join([cs.raw_id for cs in cs.parents]), |
|
252 | parents=u' '.join([cs.raw_id for cs in cs.parents]), | |
252 | ) |
|
253 | ) | |
253 |
|
254 | |||
254 | return indexed |
|
255 | return indexed | |
255 |
|
256 | |||
256 | def index_files(self, file_idx_writer, repo_name, repo): |
|
257 | def index_files(self, file_idx_writer, repo_name, repo): | |
257 | """ |
|
258 | """ | |
258 | Index files for given repo_name |
|
259 | Index files for given repo_name | |
259 |
|
260 | |||
260 | :param file_idx_writer: the whoosh index writer to add to |
|
261 | :param file_idx_writer: the whoosh index writer to add to | |
261 | :param repo_name: name of the repository we're indexing |
|
262 | :param repo_name: name of the repository we're indexing | |
262 | :param repo: instance of vcs repo |
|
263 | :param repo: instance of vcs repo | |
263 | """ |
|
264 | """ | |
264 | i_cnt = iwc_cnt = 0 |
|
265 | i_cnt = iwc_cnt = 0 | |
265 | log.debug('Building file index for %s @revision:%s', repo_name, |
|
266 | log.debug('Building file index for %s @revision:%s', repo_name, | |
266 | self._get_index_revision(repo)) |
|
267 | self._get_index_revision(repo)) | |
267 | index_rev = self._get_index_revision(repo) |
|
268 | index_rev = self._get_index_revision(repo) | |
268 | for idx_path in self.get_paths(repo): |
|
269 | for idx_path in self.get_paths(repo): | |
269 | i, iwc = self.add_doc(file_idx_writer, idx_path, repo, repo_name, index_rev) |
|
270 | i, iwc = self.add_doc(file_idx_writer, idx_path, repo, repo_name, index_rev) | |
270 | i_cnt += i |
|
271 | i_cnt += i | |
271 | iwc_cnt += iwc |
|
272 | iwc_cnt += iwc | |
272 |
|
273 | |||
273 | log.debug('added %s files %s with content for repo %s', |
|
274 | log.debug('added %s files %s with content for repo %s', | |
274 | i_cnt + iwc_cnt, iwc_cnt, repo.path) |
|
275 | i_cnt + iwc_cnt, iwc_cnt, repo.path) | |
275 | return i_cnt, iwc_cnt |
|
276 | return i_cnt, iwc_cnt | |
276 |
|
277 | |||
277 | def update_changeset_index(self): |
|
278 | def update_changeset_index(self): | |
278 | idx = open_dir(self.index_location, indexname=CHGSET_IDX_NAME) |
|
279 | idx = open_dir(self.index_location, indexname=CHGSET_IDX_NAME) | |
279 |
|
280 | |||
280 | with idx.searcher() as searcher: |
|
281 | with idx.searcher() as searcher: | |
281 | writer = idx.writer() |
|
282 | writer = idx.writer() | |
282 | writer_is_dirty = False |
|
283 | writer_is_dirty = False | |
283 | try: |
|
284 | try: | |
284 | indexed_total = 0 |
|
285 | indexed_total = 0 | |
285 | repo_name = None |
|
286 | repo_name = None | |
286 | for repo_name, repo in sorted(self.repo_paths.items()): |
|
287 | for repo_name, repo in sorted(self.repo_paths.items()): | |
287 | log.debug('Updating changeset index for repo %s', repo_name) |
|
288 | log.debug('Updating changeset index for repo %s', repo_name) | |
288 | # skip indexing if there aren't any revs in the repo |
|
289 | # skip indexing if there aren't any revs in the repo | |
289 | num_of_revs = len(repo) |
|
290 | num_of_revs = len(repo) | |
290 | if num_of_revs < 1: |
|
291 | if num_of_revs < 1: | |
291 | continue |
|
292 | continue | |
292 |
|
293 | |||
293 | qp = QueryParser('repository', schema=CHGSETS_SCHEMA) |
|
294 | qp = QueryParser('repository', schema=CHGSETS_SCHEMA) | |
294 | q = qp.parse(u"last:t AND %s" % repo_name) |
|
295 | q = qp.parse(u"last:t AND %s" % repo_name) | |
295 |
|
296 | |||
296 | results = searcher.search(q) |
|
297 | results = searcher.search(q) | |
297 |
|
298 | |||
298 | # default to scanning the entire repo |
|
299 | # default to scanning the entire repo | |
299 | last_rev = 0 |
|
300 | last_rev = 0 | |
300 | start_id = None |
|
301 | start_id = None | |
301 |
|
302 | |||
302 | if len(results) > 0: |
|
303 | if len(results) > 0: | |
303 | # assuming that there is only one result, if not this |
|
304 | # assuming that there is only one result, if not this | |
304 | # may require a full re-index. |
|
305 | # may require a full re-index. | |
305 | start_id = results[0]['raw_id'] |
|
306 | start_id = results[0]['raw_id'] | |
306 | last_rev = repo.get_changeset(revision=start_id).revision |
|
307 | last_rev = repo.get_changeset(revision=start_id).revision | |
307 |
|
308 | |||
308 | # there are new changesets to index or a new repo to index |
|
309 | # there are new changesets to index or a new repo to index | |
309 | if last_rev == 0 or num_of_revs > last_rev + 1: |
|
310 | if last_rev == 0 or num_of_revs > last_rev + 1: | |
310 | # delete the docs in the index for the previous |
|
311 | # delete the docs in the index for the previous | |
311 | # last changeset(s) |
|
312 | # last changeset(s) | |
312 | for hit in results: |
|
313 | for hit in results: | |
313 | q = qp.parse(u"last:t AND %s AND raw_id:%s" % |
|
314 | q = qp.parse(u"last:t AND %s AND raw_id:%s" % | |
314 | (repo_name, hit['raw_id'])) |
|
315 | (repo_name, hit['raw_id'])) | |
315 | writer.delete_by_query(q) |
|
316 | writer.delete_by_query(q) | |
316 |
|
317 | |||
317 | # index from the previous last changeset + all new ones |
|
318 | # index from the previous last changeset + all new ones | |
318 | indexed_total += self.index_changesets(writer, |
|
319 | indexed_total += self.index_changesets(writer, | |
319 | repo_name, repo, start_id) |
|
320 | repo_name, repo, start_id) | |
320 | writer_is_dirty = True |
|
321 | writer_is_dirty = True | |
321 | log.debug('indexed %s changesets for repo %s', |
|
322 | log.debug('indexed %s changesets for repo %s', | |
322 | indexed_total, repo_name |
|
323 | indexed_total, repo_name | |
323 | ) |
|
324 | ) | |
324 | finally: |
|
325 | finally: | |
325 | if writer_is_dirty: |
|
326 | if writer_is_dirty: | |
326 | log.debug('>> COMMITING CHANGES TO CHANGESET INDEX<<') |
|
327 | log.debug('>> COMMITING CHANGES TO CHANGESET INDEX<<') | |
327 | writer.commit(merge=True) |
|
328 | writer.commit(merge=True) | |
328 | log.debug('>>> FINISHED REBUILDING CHANGESET INDEX <<<') |
|
329 | log.debug('>>> FINISHED REBUILDING CHANGESET INDEX <<<') | |
329 | else: |
|
330 | else: | |
330 | log.debug('>> NOTHING TO COMMIT TO CHANGESET INDEX<<') |
|
331 | log.debug('>> NOTHING TO COMMIT TO CHANGESET INDEX<<') | |
331 |
|
332 | |||
332 | def update_file_index(self): |
|
333 | def update_file_index(self): | |
333 | log.debug((u'STARTING INCREMENTAL INDEXING UPDATE FOR EXTENSIONS %s ' |
|
334 | log.debug((u'STARTING INCREMENTAL INDEXING UPDATE FOR EXTENSIONS %s ' | |
334 | 'AND REPOS %s') % (INDEX_EXTENSIONS, self.repo_paths.keys())) |
|
335 | 'AND REPOS %s') % (INDEX_EXTENSIONS, self.repo_paths.keys())) | |
335 |
|
336 | |||
336 | idx = open_dir(self.index_location, indexname=self.indexname) |
|
337 | idx = open_dir(self.index_location, indexname=self.indexname) | |
337 | # The set of all paths in the index |
|
338 | # The set of all paths in the index | |
338 | indexed_paths = set() |
|
339 | indexed_paths = set() | |
339 | # The set of all paths we need to re-index |
|
340 | # The set of all paths we need to re-index | |
340 | to_index = set() |
|
341 | to_index = set() | |
341 |
|
342 | |||
342 | writer = idx.writer() |
|
343 | writer = idx.writer() | |
343 | writer_is_dirty = False |
|
344 | writer_is_dirty = False | |
344 | try: |
|
345 | try: | |
345 | with idx.reader() as reader: |
|
346 | with idx.reader() as reader: | |
346 |
|
347 | |||
347 | # Loop over the stored fields in the index |
|
348 | # Loop over the stored fields in the index | |
348 | for fields in reader.all_stored_fields(): |
|
349 | for fields in reader.all_stored_fields(): | |
349 | indexed_path = fields['path'] |
|
350 | indexed_path = fields['path'] | |
350 | indexed_repo_path = fields['repository'] |
|
351 | indexed_repo_path = fields['repository'] | |
351 | indexed_paths.add(indexed_path) |
|
352 | indexed_paths.add(indexed_path) | |
352 |
|
353 | |||
353 | if indexed_repo_path not in self.filtered_repo_update_paths: |
|
354 | if indexed_repo_path not in self.filtered_repo_update_paths: | |
354 | continue |
|
355 | continue | |
355 |
|
356 | |||
356 | repo = self.repo_paths[indexed_repo_path] |
|
357 | repo = self.repo_paths[indexed_repo_path] | |
357 |
|
358 | |||
358 | try: |
|
359 | try: | |
359 | node = self.get_node(repo, indexed_path) |
|
360 | node = self.get_node(repo, indexed_path) | |
360 | # Check if this file was changed since it was indexed |
|
361 | # Check if this file was changed since it was indexed | |
361 | indexed_time = fields['modtime'] |
|
362 | indexed_time = fields['modtime'] | |
362 | mtime = self.get_node_mtime(node) |
|
363 | mtime = self.get_node_mtime(node) | |
363 | if mtime > indexed_time: |
|
364 | if mtime > indexed_time: | |
364 | # The file has changed, delete it and add it to |
|
365 | # The file has changed, delete it and add it to | |
365 | # the list of files to reindex |
|
366 | # the list of files to reindex | |
366 | log.debug( |
|
367 | log.debug( | |
367 | 'adding to reindex list %s mtime: %s vs %s', |
|
368 | 'adding to reindex list %s mtime: %s vs %s', | |
368 | indexed_path, mtime, indexed_time |
|
369 | indexed_path, mtime, indexed_time | |
369 | ) |
|
370 | ) | |
370 | writer.delete_by_term('fileid', indexed_path) |
|
371 | writer.delete_by_term('fileid', indexed_path) | |
371 | writer_is_dirty = True |
|
372 | writer_is_dirty = True | |
372 |
|
373 | |||
373 | to_index.add(indexed_path) |
|
374 | to_index.add(indexed_path) | |
374 | except (ChangesetError, NodeDoesNotExistError): |
|
375 | except (ChangesetError, NodeDoesNotExistError): | |
375 | # This file was deleted since it was indexed |
|
376 | # This file was deleted since it was indexed | |
376 | log.debug('removing from index %s', indexed_path) |
|
377 | log.debug('removing from index %s', indexed_path) | |
377 | writer.delete_by_term('path', indexed_path) |
|
378 | writer.delete_by_term('path', indexed_path) | |
378 | writer_is_dirty = True |
|
379 | writer_is_dirty = True | |
379 |
|
380 | |||
380 | # Loop over the files in the filesystem |
|
381 | # Loop over the files in the filesystem | |
381 | # Assume we have a function that gathers the filenames of the |
|
382 | # Assume we have a function that gathers the filenames of the | |
382 | # documents to be indexed |
|
383 | # documents to be indexed | |
383 | ri_cnt_total = 0 # indexed |
|
384 | ri_cnt_total = 0 # indexed | |
384 | riwc_cnt_total = 0 # indexed with content |
|
385 | riwc_cnt_total = 0 # indexed with content | |
385 | for repo_name, repo in sorted(self.repo_paths.items()): |
|
386 | for repo_name, repo in sorted(self.repo_paths.items()): | |
386 | log.debug('Updating file index for repo %s', repo_name) |
|
387 | log.debug('Updating file index for repo %s', repo_name) | |
387 | # skip indexing if there aren't any revisions |
|
388 | # skip indexing if there aren't any revisions | |
388 | if len(repo) < 1: |
|
389 | if len(repo) < 1: | |
389 | continue |
|
390 | continue | |
390 | ri_cnt = 0 # indexed |
|
391 | ri_cnt = 0 # indexed | |
391 | riwc_cnt = 0 # indexed with content |
|
392 | riwc_cnt = 0 # indexed with content | |
392 | for path in self.get_paths(repo): |
|
393 | for path in self.get_paths(repo): | |
393 | path = safe_unicode(path) |
|
394 | path = safe_unicode(path) | |
394 | if path in to_index or path not in indexed_paths: |
|
395 | if path in to_index or path not in indexed_paths: | |
395 |
|
396 | |||
396 | # This is either a file that's changed, or a new file |
|
397 | # This is either a file that's changed, or a new file | |
397 | # that wasn't indexed before. So index it! |
|
398 | # that wasn't indexed before. So index it! | |
398 | i, iwc = self.add_doc(writer, path, repo, repo_name) |
|
399 | i, iwc = self.add_doc(writer, path, repo, repo_name) | |
399 | writer_is_dirty = True |
|
400 | writer_is_dirty = True | |
400 | ri_cnt += i |
|
401 | ri_cnt += i | |
401 | ri_cnt_total += 1 |
|
402 | ri_cnt_total += 1 | |
402 | riwc_cnt += iwc |
|
403 | riwc_cnt += iwc | |
403 | riwc_cnt_total += iwc |
|
404 | riwc_cnt_total += iwc | |
404 | log.debug('added %s files %s with content for repo %s', |
|
405 | log.debug('added %s files %s with content for repo %s', | |
405 | ri_cnt + riwc_cnt, riwc_cnt, repo.path |
|
406 | ri_cnt + riwc_cnt, riwc_cnt, repo.path | |
406 | ) |
|
407 | ) | |
407 | log.debug('indexed %s files in total and %s with content', |
|
408 | log.debug('indexed %s files in total and %s with content', | |
408 | ri_cnt_total, riwc_cnt_total |
|
409 | ri_cnt_total, riwc_cnt_total | |
409 | ) |
|
410 | ) | |
410 | finally: |
|
411 | finally: | |
411 | if writer_is_dirty: |
|
412 | if writer_is_dirty: | |
412 | log.debug('>> COMMITING CHANGES TO FILE INDEX <<') |
|
413 | log.debug('>> COMMITING CHANGES TO FILE INDEX <<') | |
413 | writer.commit(merge=True) |
|
414 | writer.commit(merge=True) | |
414 | log.debug('>>> FINISHED REBUILDING FILE INDEX <<<') |
|
415 | log.debug('>>> FINISHED REBUILDING FILE INDEX <<<') | |
415 | else: |
|
416 | else: | |
416 | log.debug('>> NOTHING TO COMMIT TO FILE INDEX <<') |
|
417 | log.debug('>> NOTHING TO COMMIT TO FILE INDEX <<') | |
417 | writer.cancel() |
|
418 | writer.cancel() | |
418 |
|
419 | |||
419 | def build_indexes(self): |
|
420 | def build_indexes(self): | |
420 | if os.path.exists(self.index_location): |
|
421 | if os.path.exists(self.index_location): | |
421 | log.debug('removing previous index') |
|
422 | log.debug('removing previous index') | |
422 | rmtree(self.index_location) |
|
423 | rmtree(self.index_location) | |
423 |
|
424 | |||
424 | if not os.path.exists(self.index_location): |
|
425 | if not os.path.exists(self.index_location): | |
425 | os.mkdir(self.index_location) |
|
426 | os.mkdir(self.index_location) | |
426 |
|
427 | |||
427 | chgset_idx = create_in(self.index_location, CHGSETS_SCHEMA, |
|
428 | chgset_idx = create_in(self.index_location, CHGSETS_SCHEMA, | |
428 | indexname=CHGSET_IDX_NAME) |
|
429 | indexname=CHGSET_IDX_NAME) | |
429 | chgset_idx_writer = chgset_idx.writer() |
|
430 | chgset_idx_writer = chgset_idx.writer() | |
430 |
|
431 | |||
431 | file_idx = create_in(self.index_location, SCHEMA, indexname=IDX_NAME) |
|
432 | file_idx = create_in(self.index_location, SCHEMA, indexname=IDX_NAME) | |
432 | file_idx_writer = file_idx.writer() |
|
433 | file_idx_writer = file_idx.writer() | |
433 | log.debug('BUILDING INDEX FOR EXTENSIONS %s ' |
|
434 | log.debug('BUILDING INDEX FOR EXTENSIONS %s ' | |
434 | 'AND REPOS %s' % (INDEX_EXTENSIONS, self.repo_paths.keys())) |
|
435 | 'AND REPOS %s' % (INDEX_EXTENSIONS, self.repo_paths.keys())) | |
435 |
|
436 | |||
436 | for repo_name, repo in sorted(self.repo_paths.items()): |
|
437 | for repo_name, repo in sorted(self.repo_paths.items()): | |
437 | log.debug('Updating indices for repo %s', repo_name) |
|
438 | log.debug('Updating indices for repo %s', repo_name) | |
438 | # skip indexing if there aren't any revisions |
|
439 | # skip indexing if there aren't any revisions | |
439 | if len(repo) < 1: |
|
440 | if len(repo) < 1: | |
440 | continue |
|
441 | continue | |
441 |
|
442 | |||
442 | self.index_files(file_idx_writer, repo_name, repo) |
|
443 | self.index_files(file_idx_writer, repo_name, repo) | |
443 | self.index_changesets(chgset_idx_writer, repo_name, repo) |
|
444 | self.index_changesets(chgset_idx_writer, repo_name, repo) | |
444 |
|
445 | |||
445 | log.debug('>> COMMITING CHANGES <<') |
|
446 | log.debug('>> COMMITING CHANGES <<') | |
446 | file_idx_writer.commit(merge=True) |
|
447 | file_idx_writer.commit(merge=True) | |
447 | chgset_idx_writer.commit(merge=True) |
|
448 | chgset_idx_writer.commit(merge=True) | |
448 | log.debug('>>> FINISHED BUILDING INDEX <<<') |
|
449 | log.debug('>>> FINISHED BUILDING INDEX <<<') | |
449 |
|
450 | |||
450 | def update_indexes(self): |
|
451 | def update_indexes(self): | |
451 | self.update_file_index() |
|
452 | self.update_file_index() | |
452 | self.update_changeset_index() |
|
453 | self.update_changeset_index() | |
453 |
|
454 | |||
454 | def run(self, full_index=False): |
|
455 | def run(self, full_index=False): | |
455 | """Run daemon""" |
|
456 | """Run daemon""" | |
456 | if full_index or self.initial: |
|
457 | if full_index or self.initial: | |
457 | self.build_indexes() |
|
458 | self.build_indexes() | |
458 | else: |
|
459 | else: | |
459 | self.update_indexes() |
|
460 | self.update_indexes() |
@@ -1,201 +1,197 b'' | |||||
1 | import datetime |
|
1 | import datetime | |
2 | import posixpath |
|
2 | import posixpath | |
3 | import stat |
|
3 | import stat | |
4 | import time |
|
4 | import time | |
5 |
|
5 | |||
6 | from dulwich import objects |
|
6 | from dulwich import objects | |
7 |
|
7 | |||
8 | from kallithea.lib.vcs.backends.base import BaseInMemoryChangeset |
|
8 | from kallithea.lib.vcs.backends.base import BaseInMemoryChangeset | |
9 | from kallithea.lib.vcs.exceptions import RepositoryError |
|
9 | from kallithea.lib.vcs.exceptions import RepositoryError | |
10 | from kallithea.lib.vcs.utils import safe_bytes, safe_str |
|
10 | from kallithea.lib.vcs.utils import safe_bytes, safe_str | |
11 |
|
11 | |||
12 |
|
12 | |||
13 | class GitInMemoryChangeset(BaseInMemoryChangeset): |
|
13 | class GitInMemoryChangeset(BaseInMemoryChangeset): | |
14 |
|
14 | |||
15 | def commit(self, message, author, parents=None, branch=None, date=None, |
|
15 | def commit(self, message, author, parents=None, branch=None, date=None, | |
16 | **kwargs): |
|
16 | **kwargs): | |
17 | """ |
|
17 | """ | |
18 | Performs in-memory commit (doesn't check workdir in any way) and |
|
18 | Performs in-memory commit (doesn't check workdir in any way) and | |
19 | returns newly created ``Changeset``. Updates repository's |
|
19 | returns newly created ``Changeset``. Updates repository's | |
20 | ``revisions``. |
|
20 | ``revisions``. | |
21 |
|
21 | |||
22 | :param message: message of the commit |
|
22 | :param message: message of the commit | |
23 | :param author: full username, i.e. "Joe Doe <joe.doe@example.com>" |
|
23 | :param author: full username, i.e. "Joe Doe <joe.doe@example.com>" | |
24 | :param parents: single parent or sequence of parents from which commit |
|
24 | :param parents: single parent or sequence of parents from which commit | |
25 | would be derived |
|
25 | would be derived | |
26 | :param date: ``datetime.datetime`` instance. Defaults to |
|
26 | :param date: ``datetime.datetime`` instance. Defaults to | |
27 | ``datetime.datetime.now()``. |
|
27 | ``datetime.datetime.now()``. | |
28 | :param branch: branch name, as string. If none given, default backend's |
|
28 | :param branch: branch name, as string. If none given, default backend's | |
29 | branch would be used. |
|
29 | branch would be used. | |
30 |
|
30 | |||
31 | :raises ``CommitError``: if any error occurs while committing |
|
31 | :raises ``CommitError``: if any error occurs while committing | |
32 | """ |
|
32 | """ | |
33 | self.check_integrity(parents) |
|
33 | self.check_integrity(parents) | |
34 |
|
34 | |||
35 | from .repository import GitRepository |
|
35 | from .repository import GitRepository | |
36 | if branch is None: |
|
36 | if branch is None: | |
37 | branch = GitRepository.DEFAULT_BRANCH_NAME |
|
37 | branch = GitRepository.DEFAULT_BRANCH_NAME | |
38 |
|
38 | |||
39 | repo = self.repository._repo |
|
39 | repo = self.repository._repo | |
40 | object_store = repo.object_store |
|
40 | object_store = repo.object_store | |
41 |
|
41 | |||
42 | ENCODING = "UTF-8" # TODO: should probably be kept in sync with safe_unicode/safe_bytes and vcs/conf/settings.py DEFAULT_ENCODINGS |
|
42 | ENCODING = "UTF-8" # TODO: should probably be kept in sync with safe_unicode/safe_bytes and vcs/conf/settings.py DEFAULT_ENCODINGS | |
43 |
|
43 | |||
44 | # Create tree and populates it with blobs |
|
44 | # Create tree and populates it with blobs | |
45 | commit_tree = self.parents[0] and repo[self.parents[0]._commit.tree] or \ |
|
45 | commit_tree = self.parents[0] and repo[self.parents[0]._commit.tree] or \ | |
46 | objects.Tree() |
|
46 | objects.Tree() | |
47 | for node in self.added + self.changed: |
|
47 | for node in self.added + self.changed: | |
48 | # Compute subdirs if needed |
|
48 | # Compute subdirs if needed | |
49 | dirpath, nodename = posixpath.split(node.path) |
|
49 | dirpath, nodename = posixpath.split(node.path) | |
50 | dirnames = safe_str(dirpath).split('/') if dirpath else [] |
|
50 | dirnames = safe_str(dirpath).split('/') if dirpath else [] | |
51 | parent = commit_tree |
|
51 | parent = commit_tree | |
52 | ancestors = [('', parent)] |
|
52 | ancestors = [('', parent)] | |
53 |
|
53 | |||
54 | # Tries to dig for the deepest existing tree |
|
54 | # Tries to dig for the deepest existing tree | |
55 | while dirnames: |
|
55 | while dirnames: | |
56 | curdir = dirnames.pop(0) |
|
56 | curdir = dirnames.pop(0) | |
57 | try: |
|
57 | try: | |
58 | dir_id = parent[curdir][1] |
|
58 | dir_id = parent[curdir][1] | |
59 | except KeyError: |
|
59 | except KeyError: | |
60 | # put curdir back into dirnames and stops |
|
60 | # put curdir back into dirnames and stops | |
61 | dirnames.insert(0, curdir) |
|
61 | dirnames.insert(0, curdir) | |
62 | break |
|
62 | break | |
63 | else: |
|
63 | else: | |
64 | # If found, updates parent |
|
64 | # If found, updates parent | |
65 | parent = self.repository._repo[dir_id] |
|
65 | parent = self.repository._repo[dir_id] | |
66 | ancestors.append((curdir, parent)) |
|
66 | ancestors.append((curdir, parent)) | |
67 | # Now parent is deepest existing tree and we need to create subtrees |
|
67 | # Now parent is deepest existing tree and we need to create subtrees | |
68 | # for dirnames (in reverse order) [this only applies for nodes from added] |
|
68 | # for dirnames (in reverse order) [this only applies for nodes from added] | |
69 | new_trees = [] |
|
69 | new_trees = [] | |
70 |
|
70 | |||
71 | if not node.is_binary: |
|
71 | blob = objects.Blob.from_string(node.content) | |
72 | content = node.content.encode(ENCODING) |
|
|||
73 | else: |
|
|||
74 | content = node.content |
|
|||
75 | blob = objects.Blob.from_string(content) |
|
|||
76 |
|
72 | |||
77 | node_path = safe_bytes(node.name) |
|
73 | node_path = safe_bytes(node.name) | |
78 | if dirnames: |
|
74 | if dirnames: | |
79 | # If there are trees which should be created we need to build |
|
75 | # If there are trees which should be created we need to build | |
80 | # them now (in reverse order) |
|
76 | # them now (in reverse order) | |
81 | reversed_dirnames = list(reversed(dirnames)) |
|
77 | reversed_dirnames = list(reversed(dirnames)) | |
82 | curtree = objects.Tree() |
|
78 | curtree = objects.Tree() | |
83 | curtree[node_path] = node.mode, blob.id |
|
79 | curtree[node_path] = node.mode, blob.id | |
84 | new_trees.append(curtree) |
|
80 | new_trees.append(curtree) | |
85 | for dirname in reversed_dirnames[:-1]: |
|
81 | for dirname in reversed_dirnames[:-1]: | |
86 | newtree = objects.Tree() |
|
82 | newtree = objects.Tree() | |
87 | #newtree.add(stat.S_IFDIR, dirname, curtree.id) |
|
83 | #newtree.add(stat.S_IFDIR, dirname, curtree.id) | |
88 | newtree[dirname] = stat.S_IFDIR, curtree.id |
|
84 | newtree[dirname] = stat.S_IFDIR, curtree.id | |
89 | new_trees.append(newtree) |
|
85 | new_trees.append(newtree) | |
90 | curtree = newtree |
|
86 | curtree = newtree | |
91 | parent[reversed_dirnames[-1]] = stat.S_IFDIR, curtree.id |
|
87 | parent[reversed_dirnames[-1]] = stat.S_IFDIR, curtree.id | |
92 | else: |
|
88 | else: | |
93 | parent.add(name=node_path, mode=node.mode, hexsha=blob.id) |
|
89 | parent.add(name=node_path, mode=node.mode, hexsha=blob.id) | |
94 |
|
90 | |||
95 | new_trees.append(parent) |
|
91 | new_trees.append(parent) | |
96 | # Update ancestors |
|
92 | # Update ancestors | |
97 | for parent, tree, path in reversed([(a[1], b[1], b[0]) for a, b in |
|
93 | for parent, tree, path in reversed([(a[1], b[1], b[0]) for a, b in | |
98 | zip(ancestors, ancestors[1:])] |
|
94 | zip(ancestors, ancestors[1:])] | |
99 | ): |
|
95 | ): | |
100 | parent[path] = stat.S_IFDIR, tree.id |
|
96 | parent[path] = stat.S_IFDIR, tree.id | |
101 | object_store.add_object(tree) |
|
97 | object_store.add_object(tree) | |
102 |
|
98 | |||
103 | object_store.add_object(blob) |
|
99 | object_store.add_object(blob) | |
104 | for tree in new_trees: |
|
100 | for tree in new_trees: | |
105 | object_store.add_object(tree) |
|
101 | object_store.add_object(tree) | |
106 | for node in self.removed: |
|
102 | for node in self.removed: | |
107 | paths = node.path.split('/') |
|
103 | paths = node.path.split('/') | |
108 | tree = commit_tree |
|
104 | tree = commit_tree | |
109 | trees = [tree] |
|
105 | trees = [tree] | |
110 | # Traverse deep into the forest... |
|
106 | # Traverse deep into the forest... | |
111 | for path in paths: |
|
107 | for path in paths: | |
112 | try: |
|
108 | try: | |
113 | obj = self.repository._repo[tree[path][1]] |
|
109 | obj = self.repository._repo[tree[path][1]] | |
114 | if isinstance(obj, objects.Tree): |
|
110 | if isinstance(obj, objects.Tree): | |
115 | trees.append(obj) |
|
111 | trees.append(obj) | |
116 | tree = obj |
|
112 | tree = obj | |
117 | except KeyError: |
|
113 | except KeyError: | |
118 | break |
|
114 | break | |
119 | # Cut down the blob and all rotten trees on the way back... |
|
115 | # Cut down the blob and all rotten trees on the way back... | |
120 | for path, tree in reversed(zip(paths, trees)): |
|
116 | for path, tree in reversed(zip(paths, trees)): | |
121 | del tree[path] |
|
117 | del tree[path] | |
122 | if tree: |
|
118 | if tree: | |
123 | # This tree still has elements - don't remove it or any |
|
119 | # This tree still has elements - don't remove it or any | |
124 | # of it's parents |
|
120 | # of it's parents | |
125 | break |
|
121 | break | |
126 |
|
122 | |||
127 | object_store.add_object(commit_tree) |
|
123 | object_store.add_object(commit_tree) | |
128 |
|
124 | |||
129 | # Create commit |
|
125 | # Create commit | |
130 | commit = objects.Commit() |
|
126 | commit = objects.Commit() | |
131 | commit.tree = commit_tree.id |
|
127 | commit.tree = commit_tree.id | |
132 | commit.parents = [p._commit.id for p in self.parents if p] |
|
128 | commit.parents = [p._commit.id for p in self.parents if p] | |
133 | commit.author = commit.committer = safe_str(author) |
|
129 | commit.author = commit.committer = safe_str(author) | |
134 | commit.encoding = ENCODING |
|
130 | commit.encoding = ENCODING | |
135 | commit.message = safe_str(message) |
|
131 | commit.message = safe_str(message) | |
136 |
|
132 | |||
137 | # Compute date |
|
133 | # Compute date | |
138 | if date is None: |
|
134 | if date is None: | |
139 | date = time.time() |
|
135 | date = time.time() | |
140 | elif isinstance(date, datetime.datetime): |
|
136 | elif isinstance(date, datetime.datetime): | |
141 | date = time.mktime(date.timetuple()) |
|
137 | date = time.mktime(date.timetuple()) | |
142 |
|
138 | |||
143 | author_time = kwargs.pop('author_time', date) |
|
139 | author_time = kwargs.pop('author_time', date) | |
144 | commit.commit_time = int(date) |
|
140 | commit.commit_time = int(date) | |
145 | commit.author_time = int(author_time) |
|
141 | commit.author_time = int(author_time) | |
146 | tz = time.timezone |
|
142 | tz = time.timezone | |
147 | author_tz = kwargs.pop('author_timezone', tz) |
|
143 | author_tz = kwargs.pop('author_timezone', tz) | |
148 | commit.commit_timezone = tz |
|
144 | commit.commit_timezone = tz | |
149 | commit.author_timezone = author_tz |
|
145 | commit.author_timezone = author_tz | |
150 |
|
146 | |||
151 | object_store.add_object(commit) |
|
147 | object_store.add_object(commit) | |
152 |
|
148 | |||
153 | ref = 'refs/heads/%s' % branch |
|
149 | ref = 'refs/heads/%s' % branch | |
154 | repo.refs[ref] = commit.id |
|
150 | repo.refs[ref] = commit.id | |
155 |
|
151 | |||
156 | # Update vcs repository object & recreate dulwich repo |
|
152 | # Update vcs repository object & recreate dulwich repo | |
157 | self.repository.revisions.append(commit.id) |
|
153 | self.repository.revisions.append(commit.id) | |
158 | # invalidate parsed refs after commit |
|
154 | # invalidate parsed refs after commit | |
159 | self.repository._parsed_refs = self.repository._get_parsed_refs() |
|
155 | self.repository._parsed_refs = self.repository._get_parsed_refs() | |
160 | tip = self.repository.get_changeset() |
|
156 | tip = self.repository.get_changeset() | |
161 | self.reset() |
|
157 | self.reset() | |
162 | return tip |
|
158 | return tip | |
163 |
|
159 | |||
164 | def _get_missing_trees(self, path, root_tree): |
|
160 | def _get_missing_trees(self, path, root_tree): | |
165 | """ |
|
161 | """ | |
166 | Creates missing ``Tree`` objects for the given path. |
|
162 | Creates missing ``Tree`` objects for the given path. | |
167 |
|
163 | |||
168 | :param path: path given as a string. It may be a path to a file node |
|
164 | :param path: path given as a string. It may be a path to a file node | |
169 | (i.e. ``foo/bar/baz.txt``) or directory path - in that case it must |
|
165 | (i.e. ``foo/bar/baz.txt``) or directory path - in that case it must | |
170 | end with slash (i.e. ``foo/bar/``). |
|
166 | end with slash (i.e. ``foo/bar/``). | |
171 | :param root_tree: ``dulwich.objects.Tree`` object from which we start |
|
167 | :param root_tree: ``dulwich.objects.Tree`` object from which we start | |
172 | traversing (should be commit's root tree) |
|
168 | traversing (should be commit's root tree) | |
173 | """ |
|
169 | """ | |
174 | dirpath = posixpath.split(path)[0] |
|
170 | dirpath = posixpath.split(path)[0] | |
175 | dirs = dirpath.split('/') |
|
171 | dirs = dirpath.split('/') | |
176 | if not dirs or dirs == ['']: |
|
172 | if not dirs or dirs == ['']: | |
177 | return [] |
|
173 | return [] | |
178 |
|
174 | |||
179 | def get_tree_for_dir(tree, dirname): |
|
175 | def get_tree_for_dir(tree, dirname): | |
180 | for name, mode, id in tree.iteritems(): |
|
176 | for name, mode, id in tree.iteritems(): | |
181 | if name == dirname: |
|
177 | if name == dirname: | |
182 | obj = self.repository._repo[id] |
|
178 | obj = self.repository._repo[id] | |
183 | if isinstance(obj, objects.Tree): |
|
179 | if isinstance(obj, objects.Tree): | |
184 | return obj |
|
180 | return obj | |
185 | else: |
|
181 | else: | |
186 | raise RepositoryError("Cannot create directory %s " |
|
182 | raise RepositoryError("Cannot create directory %s " | |
187 | "at tree %s as path is occupied and is not a " |
|
183 | "at tree %s as path is occupied and is not a " | |
188 | "Tree" % (dirname, tree)) |
|
184 | "Tree" % (dirname, tree)) | |
189 | return None |
|
185 | return None | |
190 |
|
186 | |||
191 | trees = [] |
|
187 | trees = [] | |
192 | parent = root_tree |
|
188 | parent = root_tree | |
193 | for dirname in dirs: |
|
189 | for dirname in dirs: | |
194 | tree = get_tree_for_dir(parent, dirname) |
|
190 | tree = get_tree_for_dir(parent, dirname) | |
195 | if tree is None: |
|
191 | if tree is None: | |
196 | tree = objects.Tree() |
|
192 | tree = objects.Tree() | |
197 | parent.add(stat.S_IFDIR, dirname, tree.id) |
|
193 | parent.add(stat.S_IFDIR, dirname, tree.id) | |
198 | parent = tree |
|
194 | parent = tree | |
199 | # Always append tree |
|
195 | # Always append tree | |
200 | trees.append(tree) |
|
196 | trees.append(tree) | |
201 | return trees |
|
197 | return trees |
@@ -1,109 +1,107 b'' | |||||
1 | import datetime |
|
1 | import datetime | |
2 |
|
2 | |||
3 | from kallithea.lib.vcs.backends.base import BaseInMemoryChangeset |
|
3 | from kallithea.lib.vcs.backends.base import BaseInMemoryChangeset | |
4 | from kallithea.lib.vcs.exceptions import RepositoryError |
|
4 | from kallithea.lib.vcs.exceptions import RepositoryError | |
5 | from kallithea.lib.vcs.utils import safe_bytes |
|
5 | from kallithea.lib.vcs.utils import safe_bytes | |
6 | from kallithea.lib.vcs.utils.hgcompat import hex, memctx, memfilectx |
|
6 | from kallithea.lib.vcs.utils.hgcompat import hex, memctx, memfilectx | |
7 |
|
7 | |||
8 |
|
8 | |||
9 | class MercurialInMemoryChangeset(BaseInMemoryChangeset): |
|
9 | class MercurialInMemoryChangeset(BaseInMemoryChangeset): | |
10 |
|
10 | |||
11 | def commit(self, message, author, parents=None, branch=None, date=None, |
|
11 | def commit(self, message, author, parents=None, branch=None, date=None, | |
12 | **kwargs): |
|
12 | **kwargs): | |
13 | """ |
|
13 | """ | |
14 | Performs in-memory commit (doesn't check workdir in any way) and |
|
14 | Performs in-memory commit (doesn't check workdir in any way) and | |
15 | returns newly created ``Changeset``. Updates repository's |
|
15 | returns newly created ``Changeset``. Updates repository's | |
16 | ``revisions``. |
|
16 | ``revisions``. | |
17 |
|
17 | |||
18 | :param message: message of the commit |
|
18 | :param message: message of the commit | |
19 | :param author: full username, i.e. "Joe Doe <joe.doe@example.com>" |
|
19 | :param author: full username, i.e. "Joe Doe <joe.doe@example.com>" | |
20 | :param parents: single parent or sequence of parents from which commit |
|
20 | :param parents: single parent or sequence of parents from which commit | |
21 | would be derived |
|
21 | would be derived | |
22 | :param date: ``datetime.datetime`` instance. Defaults to |
|
22 | :param date: ``datetime.datetime`` instance. Defaults to | |
23 | ``datetime.datetime.now()``. |
|
23 | ``datetime.datetime.now()``. | |
24 | :param branch: branch name, as string. If none given, default backend's |
|
24 | :param branch: branch name, as string. If none given, default backend's | |
25 | branch would be used. |
|
25 | branch would be used. | |
26 |
|
26 | |||
27 | :raises ``CommitError``: if any error occurs while committing |
|
27 | :raises ``CommitError``: if any error occurs while committing | |
28 | """ |
|
28 | """ | |
29 | self.check_integrity(parents) |
|
29 | self.check_integrity(parents) | |
30 |
|
30 | |||
31 | from .repository import MercurialRepository |
|
31 | from .repository import MercurialRepository | |
32 | if not isinstance(message, unicode) or not isinstance(author, unicode): |
|
32 | if not isinstance(message, unicode) or not isinstance(author, unicode): | |
33 | raise RepositoryError('Given message and author needs to be ' |
|
33 | raise RepositoryError('Given message and author needs to be ' | |
34 | 'an <unicode> instance got %r & %r instead' |
|
34 | 'an <unicode> instance got %r & %r instead' | |
35 | % (type(message), type(author))) |
|
35 | % (type(message), type(author))) | |
36 |
|
36 | |||
37 | if branch is None: |
|
37 | if branch is None: | |
38 | branch = MercurialRepository.DEFAULT_BRANCH_NAME |
|
38 | branch = MercurialRepository.DEFAULT_BRANCH_NAME | |
39 | kwargs['branch'] = branch |
|
39 | kwargs['branch'] = branch | |
40 |
|
40 | |||
41 | def filectxfn(_repo, memctx, path): |
|
41 | def filectxfn(_repo, memctx, path): | |
42 | """ |
|
42 | """ | |
43 | Marks given path as added/changed/removed in a given _repo. This is |
|
43 | Marks given path as added/changed/removed in a given _repo. This is | |
44 | for internal mercurial commit function. |
|
44 | for internal mercurial commit function. | |
45 | """ |
|
45 | """ | |
46 |
|
46 | |||
47 | # check if this path is removed |
|
47 | # check if this path is removed | |
48 | if path in (node.path for node in self.removed): |
|
48 | if path in (node.path for node in self.removed): | |
49 | return None |
|
49 | return None | |
50 |
|
50 | |||
51 | # check if this path is added |
|
51 | # check if this path is added | |
52 | for node in self.added: |
|
52 | for node in self.added: | |
53 | if node.path == path: |
|
53 | if node.path == path: | |
54 | return memfilectx(_repo, memctx, path=node.path, |
|
54 | return memfilectx(_repo, memctx, path=node.path, | |
55 |
data= |
|
55 | data=node.content, | |
56 | if not node.is_binary else node.content), |
|
|||
57 | islink=False, |
|
56 | islink=False, | |
58 | isexec=node.is_executable, |
|
57 | isexec=node.is_executable, | |
59 | copysource=False) |
|
58 | copysource=False) | |
60 |
|
59 | |||
61 | # or changed |
|
60 | # or changed | |
62 | for node in self.changed: |
|
61 | for node in self.changed: | |
63 | if node.path == path: |
|
62 | if node.path == path: | |
64 | return memfilectx(_repo, memctx, path=node.path, |
|
63 | return memfilectx(_repo, memctx, path=node.path, | |
65 |
data= |
|
64 | data=node.content, | |
66 | if not node.is_binary else node.content), |
|
|||
67 | islink=False, |
|
65 | islink=False, | |
68 | isexec=node.is_executable, |
|
66 | isexec=node.is_executable, | |
69 | copysource=False) |
|
67 | copysource=False) | |
70 |
|
68 | |||
71 | raise RepositoryError("Given path haven't been marked as added," |
|
69 | raise RepositoryError("Given path haven't been marked as added," | |
72 | "changed or removed (%s)" % path) |
|
70 | "changed or removed (%s)" % path) | |
73 |
|
71 | |||
74 | parents = [None, None] |
|
72 | parents = [None, None] | |
75 | for i, parent in enumerate(self.parents): |
|
73 | for i, parent in enumerate(self.parents): | |
76 | if parent is not None: |
|
74 | if parent is not None: | |
77 | parents[i] = parent._ctx.node() |
|
75 | parents[i] = parent._ctx.node() | |
78 |
|
76 | |||
79 | if date and isinstance(date, datetime.datetime): |
|
77 | if date and isinstance(date, datetime.datetime): | |
80 | date = date.strftime('%a, %d %b %Y %H:%M:%S') |
|
78 | date = date.strftime('%a, %d %b %Y %H:%M:%S') | |
81 |
|
79 | |||
82 | commit_ctx = memctx(repo=self.repository._repo, |
|
80 | commit_ctx = memctx(repo=self.repository._repo, | |
83 | parents=parents, |
|
81 | parents=parents, | |
84 | text='', |
|
82 | text='', | |
85 | files=self.get_paths(), |
|
83 | files=self.get_paths(), | |
86 | filectxfn=filectxfn, |
|
84 | filectxfn=filectxfn, | |
87 | user=author, |
|
85 | user=author, | |
88 | date=date, |
|
86 | date=date, | |
89 | extra=kwargs) |
|
87 | extra=kwargs) | |
90 |
|
88 | |||
91 | # injecting given _repo params |
|
89 | # injecting given _repo params | |
92 | commit_ctx._text = safe_bytes(message) |
|
90 | commit_ctx._text = safe_bytes(message) | |
93 | commit_ctx._user = safe_bytes(author) |
|
91 | commit_ctx._user = safe_bytes(author) | |
94 | commit_ctx._date = date |
|
92 | commit_ctx._date = date | |
95 |
|
93 | |||
96 | # TODO: Catch exceptions! |
|
94 | # TODO: Catch exceptions! | |
97 | n = self.repository._repo.commitctx(commit_ctx) |
|
95 | n = self.repository._repo.commitctx(commit_ctx) | |
98 | # Returns mercurial node |
|
96 | # Returns mercurial node | |
99 | self._commit_ctx = commit_ctx # For reference |
|
97 | self._commit_ctx = commit_ctx # For reference | |
100 | # Update vcs repository object & recreate mercurial _repo |
|
98 | # Update vcs repository object & recreate mercurial _repo | |
101 | # new_ctx = self.repository._repo[node] |
|
99 | # new_ctx = self.repository._repo[node] | |
102 | # new_tip = self.repository.get_changeset(new_ctx.hex()) |
|
100 | # new_tip = self.repository.get_changeset(new_ctx.hex()) | |
103 | new_id = hex(n) |
|
101 | new_id = hex(n) | |
104 | self.repository.revisions.append(new_id) |
|
102 | self.repository.revisions.append(new_id) | |
105 | self._repo = self.repository._get_repo(create=False) |
|
103 | self._repo = self.repository._get_repo(create=False) | |
106 | self.repository.branches = self.repository._get_branches() |
|
104 | self.repository.branches = self.repository._get_branches() | |
107 | tip = self.repository.get_changeset() |
|
105 | tip = self.repository.get_changeset() | |
108 | self.reset() |
|
106 | self.reset() | |
109 | return tip |
|
107 | return tip |
@@ -1,622 +1,617 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 | """ |
|
2 | """ | |
3 | vcs.nodes |
|
3 | vcs.nodes | |
4 | ~~~~~~~~~ |
|
4 | ~~~~~~~~~ | |
5 |
|
5 | |||
6 | Module holding everything related to vcs nodes. |
|
6 | Module holding everything related to vcs nodes. | |
7 |
|
7 | |||
8 | :created_on: Apr 8, 2010 |
|
8 | :created_on: Apr 8, 2010 | |
9 | :copyright: (c) 2010-2011 by Marcin Kuzminski, Lukasz Balcerzak. |
|
9 | :copyright: (c) 2010-2011 by Marcin Kuzminski, Lukasz Balcerzak. | |
10 | """ |
|
10 | """ | |
11 |
|
11 | |||
12 | import functools |
|
12 | import functools | |
13 | import mimetypes |
|
13 | import mimetypes | |
14 | import posixpath |
|
14 | import posixpath | |
15 | import stat |
|
15 | import stat | |
16 |
|
16 | |||
17 | from kallithea.lib.vcs.backends.base import EmptyChangeset |
|
17 | from kallithea.lib.vcs.backends.base import EmptyChangeset | |
18 | from kallithea.lib.vcs.exceptions import NodeError, RemovedFileNodeError |
|
18 | from kallithea.lib.vcs.exceptions import NodeError, RemovedFileNodeError | |
19 | from kallithea.lib.vcs.utils import safe_str, safe_unicode |
|
19 | from kallithea.lib.vcs.utils import safe_bytes, safe_str, safe_unicode | |
20 | from kallithea.lib.vcs.utils.lazy import LazyProperty |
|
20 | from kallithea.lib.vcs.utils.lazy import LazyProperty | |
21 |
|
21 | |||
22 |
|
22 | |||
23 | class NodeKind: |
|
23 | class NodeKind: | |
24 | SUBMODULE = -1 |
|
24 | SUBMODULE = -1 | |
25 | DIR = 1 |
|
25 | DIR = 1 | |
26 | FILE = 2 |
|
26 | FILE = 2 | |
27 |
|
27 | |||
28 |
|
28 | |||
29 | class NodeState: |
|
29 | class NodeState: | |
30 | ADDED = u'added' |
|
30 | ADDED = u'added' | |
31 | CHANGED = u'changed' |
|
31 | CHANGED = u'changed' | |
32 | NOT_CHANGED = u'not changed' |
|
32 | NOT_CHANGED = u'not changed' | |
33 | REMOVED = u'removed' |
|
33 | REMOVED = u'removed' | |
34 |
|
34 | |||
35 |
|
35 | |||
36 | class NodeGeneratorBase(object): |
|
36 | class NodeGeneratorBase(object): | |
37 | """ |
|
37 | """ | |
38 | Base class for removed added and changed filenodes, it's a lazy generator |
|
38 | Base class for removed added and changed filenodes, it's a lazy generator | |
39 | class that will create filenodes only on iteration or call |
|
39 | class that will create filenodes only on iteration or call | |
40 |
|
40 | |||
41 | The len method doesn't need to create filenodes at all |
|
41 | The len method doesn't need to create filenodes at all | |
42 | """ |
|
42 | """ | |
43 |
|
43 | |||
44 | def __init__(self, current_paths, cs): |
|
44 | def __init__(self, current_paths, cs): | |
45 | self.cs = cs |
|
45 | self.cs = cs | |
46 | self.current_paths = current_paths |
|
46 | self.current_paths = current_paths | |
47 |
|
47 | |||
48 | def __call__(self): |
|
48 | def __call__(self): | |
49 | return [n for n in self] |
|
49 | return [n for n in self] | |
50 |
|
50 | |||
51 | def __getitem__(self, key): |
|
51 | def __getitem__(self, key): | |
52 | assert isinstance(key, slice), key |
|
52 | assert isinstance(key, slice), key | |
53 | for p in self.current_paths[key]: |
|
53 | for p in self.current_paths[key]: | |
54 | yield self.cs.get_node(p) |
|
54 | yield self.cs.get_node(p) | |
55 |
|
55 | |||
56 | def __len__(self): |
|
56 | def __len__(self): | |
57 | return len(self.current_paths) |
|
57 | return len(self.current_paths) | |
58 |
|
58 | |||
59 | def __iter__(self): |
|
59 | def __iter__(self): | |
60 | for p in self.current_paths: |
|
60 | for p in self.current_paths: | |
61 | yield self.cs.get_node(p) |
|
61 | yield self.cs.get_node(p) | |
62 |
|
62 | |||
63 |
|
63 | |||
64 | class AddedFileNodesGenerator(NodeGeneratorBase): |
|
64 | class AddedFileNodesGenerator(NodeGeneratorBase): | |
65 | """ |
|
65 | """ | |
66 | Class holding Added files for current changeset |
|
66 | Class holding Added files for current changeset | |
67 | """ |
|
67 | """ | |
68 | pass |
|
68 | pass | |
69 |
|
69 | |||
70 |
|
70 | |||
71 | class ChangedFileNodesGenerator(NodeGeneratorBase): |
|
71 | class ChangedFileNodesGenerator(NodeGeneratorBase): | |
72 | """ |
|
72 | """ | |
73 | Class holding Changed files for current changeset |
|
73 | Class holding Changed files for current changeset | |
74 | """ |
|
74 | """ | |
75 | pass |
|
75 | pass | |
76 |
|
76 | |||
77 |
|
77 | |||
78 | class RemovedFileNodesGenerator(NodeGeneratorBase): |
|
78 | class RemovedFileNodesGenerator(NodeGeneratorBase): | |
79 | """ |
|
79 | """ | |
80 | Class holding removed files for current changeset |
|
80 | Class holding removed files for current changeset | |
81 | """ |
|
81 | """ | |
82 | def __iter__(self): |
|
82 | def __iter__(self): | |
83 | for p in self.current_paths: |
|
83 | for p in self.current_paths: | |
84 | yield RemovedFileNode(path=p) |
|
84 | yield RemovedFileNode(path=p) | |
85 |
|
85 | |||
86 | def __getitem__(self, key): |
|
86 | def __getitem__(self, key): | |
87 | assert isinstance(key, slice), key |
|
87 | assert isinstance(key, slice), key | |
88 | for p in self.current_paths[key]: |
|
88 | for p in self.current_paths[key]: | |
89 | yield RemovedFileNode(path=p) |
|
89 | yield RemovedFileNode(path=p) | |
90 |
|
90 | |||
91 |
|
91 | |||
92 | @functools.total_ordering |
|
92 | @functools.total_ordering | |
93 | class Node(object): |
|
93 | class Node(object): | |
94 | """ |
|
94 | """ | |
95 | Simplest class representing file or directory on repository. SCM backends |
|
95 | Simplest class representing file or directory on repository. SCM backends | |
96 | should use ``FileNode`` and ``DirNode`` subclasses rather than ``Node`` |
|
96 | should use ``FileNode`` and ``DirNode`` subclasses rather than ``Node`` | |
97 | directly. |
|
97 | directly. | |
98 |
|
98 | |||
99 | Node's ``path`` cannot start with slash as we operate on *relative* paths |
|
99 | Node's ``path`` cannot start with slash as we operate on *relative* paths | |
100 | only. Moreover, every single node is identified by the ``path`` attribute, |
|
100 | only. Moreover, every single node is identified by the ``path`` attribute, | |
101 | so it cannot end with slash, too. Otherwise, path could lead to mistakes. |
|
101 | so it cannot end with slash, too. Otherwise, path could lead to mistakes. | |
102 | """ |
|
102 | """ | |
103 |
|
103 | |||
104 | def __init__(self, path, kind): |
|
104 | def __init__(self, path, kind): | |
105 | if path.startswith('/'): |
|
105 | if path.startswith('/'): | |
106 | raise NodeError("Cannot initialize Node objects with slash at " |
|
106 | raise NodeError("Cannot initialize Node objects with slash at " | |
107 | "the beginning as only relative paths are supported") |
|
107 | "the beginning as only relative paths are supported") | |
108 | self.path = safe_str(path.rstrip('/')) # we store paths as str |
|
108 | self.path = safe_str(path.rstrip('/')) # we store paths as str | |
109 | if path == '' and kind != NodeKind.DIR: |
|
109 | if path == '' and kind != NodeKind.DIR: | |
110 | raise NodeError("Only DirNode and its subclasses may be " |
|
110 | raise NodeError("Only DirNode and its subclasses may be " | |
111 | "initialized with empty path") |
|
111 | "initialized with empty path") | |
112 | self.kind = kind |
|
112 | self.kind = kind | |
113 | #self.dirs, self.files = [], [] |
|
113 | #self.dirs, self.files = [], [] | |
114 | if self.is_root() and not self.is_dir(): |
|
114 | if self.is_root() and not self.is_dir(): | |
115 | raise NodeError("Root node cannot be FILE kind") |
|
115 | raise NodeError("Root node cannot be FILE kind") | |
116 |
|
116 | |||
117 | @LazyProperty |
|
117 | @LazyProperty | |
118 | def parent(self): |
|
118 | def parent(self): | |
119 | parent_path = self.get_parent_path() |
|
119 | parent_path = self.get_parent_path() | |
120 | if parent_path: |
|
120 | if parent_path: | |
121 | if self.changeset: |
|
121 | if self.changeset: | |
122 | return self.changeset.get_node(parent_path) |
|
122 | return self.changeset.get_node(parent_path) | |
123 | return DirNode(parent_path) |
|
123 | return DirNode(parent_path) | |
124 | return None |
|
124 | return None | |
125 |
|
125 | |||
126 | @LazyProperty |
|
126 | @LazyProperty | |
127 | def name(self): |
|
127 | def name(self): | |
128 | """ |
|
128 | """ | |
129 | Returns name of the node so if its path |
|
129 | Returns name of the node so if its path | |
130 | then only last part is returned. |
|
130 | then only last part is returned. | |
131 | """ |
|
131 | """ | |
132 | return safe_unicode(self.path.rstrip('/').split('/')[-1]) |
|
132 | return safe_unicode(self.path.rstrip('/').split('/')[-1]) | |
133 |
|
133 | |||
134 | def _get_kind(self): |
|
134 | def _get_kind(self): | |
135 | return self._kind |
|
135 | return self._kind | |
136 |
|
136 | |||
137 | def _set_kind(self, kind): |
|
137 | def _set_kind(self, kind): | |
138 | if hasattr(self, '_kind'): |
|
138 | if hasattr(self, '_kind'): | |
139 | raise NodeError("Cannot change node's kind") |
|
139 | raise NodeError("Cannot change node's kind") | |
140 | else: |
|
140 | else: | |
141 | self._kind = kind |
|
141 | self._kind = kind | |
142 | # Post setter check (path's trailing slash) |
|
142 | # Post setter check (path's trailing slash) | |
143 | if self.path.endswith('/'): |
|
143 | if self.path.endswith('/'): | |
144 | raise NodeError("Node's path cannot end with slash") |
|
144 | raise NodeError("Node's path cannot end with slash") | |
145 |
|
145 | |||
146 | kind = property(_get_kind, _set_kind) |
|
146 | kind = property(_get_kind, _set_kind) | |
147 |
|
147 | |||
148 | def __eq__(self, other): |
|
148 | def __eq__(self, other): | |
149 | if type(self) is not type(other): |
|
149 | if type(self) is not type(other): | |
150 | return False |
|
150 | return False | |
151 | if self._kind != other._kind: |
|
151 | if self._kind != other._kind: | |
152 | return False |
|
152 | return False | |
153 | if self.path != other.path: |
|
153 | if self.path != other.path: | |
154 | return False |
|
154 | return False | |
155 | if self.is_file(): |
|
155 | if self.is_file(): | |
156 | return self.content == other.content |
|
156 | return self.content == other.content | |
157 | else: |
|
157 | else: | |
158 | # For DirNode's check without entering each dir |
|
158 | # For DirNode's check without entering each dir | |
159 | self_nodes_paths = list(sorted(n.path for n in self.nodes)) |
|
159 | self_nodes_paths = list(sorted(n.path for n in self.nodes)) | |
160 | other_nodes_paths = list(sorted(n.path for n in self.nodes)) |
|
160 | other_nodes_paths = list(sorted(n.path for n in self.nodes)) | |
161 | return self_nodes_paths == other_nodes_paths |
|
161 | return self_nodes_paths == other_nodes_paths | |
162 |
|
162 | |||
163 | def __lt__(self, other): |
|
163 | def __lt__(self, other): | |
164 | if self._kind < other._kind: |
|
164 | if self._kind < other._kind: | |
165 | return True |
|
165 | return True | |
166 | if self._kind > other._kind: |
|
166 | if self._kind > other._kind: | |
167 | return False |
|
167 | return False | |
168 | if self.path < other.path: |
|
168 | if self.path < other.path: | |
169 | return True |
|
169 | return True | |
170 | if self.path > other.path: |
|
170 | if self.path > other.path: | |
171 | return False |
|
171 | return False | |
172 | if self.is_file(): |
|
172 | if self.is_file(): | |
173 | return self.content < other.content |
|
173 | return self.content < other.content | |
174 | else: |
|
174 | else: | |
175 | # For DirNode's check without entering each dir |
|
175 | # For DirNode's check without entering each dir | |
176 | self_nodes_paths = list(sorted(n.path for n in self.nodes)) |
|
176 | self_nodes_paths = list(sorted(n.path for n in self.nodes)) | |
177 | other_nodes_paths = list(sorted(n.path for n in self.nodes)) |
|
177 | other_nodes_paths = list(sorted(n.path for n in self.nodes)) | |
178 | return self_nodes_paths < other_nodes_paths |
|
178 | return self_nodes_paths < other_nodes_paths | |
179 |
|
179 | |||
180 | def __repr__(self): |
|
180 | def __repr__(self): | |
181 | return '<%s %r>' % (self.__class__.__name__, self.path) |
|
181 | return '<%s %r>' % (self.__class__.__name__, self.path) | |
182 |
|
182 | |||
183 | def __str__(self): |
|
183 | def __str__(self): | |
184 | return self.__repr__() |
|
184 | return self.__repr__() | |
185 |
|
185 | |||
186 | def __unicode__(self): |
|
186 | def __unicode__(self): | |
187 | return self.name |
|
187 | return self.name | |
188 |
|
188 | |||
189 | def get_parent_path(self): |
|
189 | def get_parent_path(self): | |
190 | """ |
|
190 | """ | |
191 | Returns node's parent path or empty string if node is root. |
|
191 | Returns node's parent path or empty string if node is root. | |
192 | """ |
|
192 | """ | |
193 | if self.is_root(): |
|
193 | if self.is_root(): | |
194 | return '' |
|
194 | return '' | |
195 | return posixpath.dirname(self.path.rstrip('/')) + '/' |
|
195 | return posixpath.dirname(self.path.rstrip('/')) + '/' | |
196 |
|
196 | |||
197 | def is_file(self): |
|
197 | def is_file(self): | |
198 | """ |
|
198 | """ | |
199 | Returns ``True`` if node's kind is ``NodeKind.FILE``, ``False`` |
|
199 | Returns ``True`` if node's kind is ``NodeKind.FILE``, ``False`` | |
200 | otherwise. |
|
200 | otherwise. | |
201 | """ |
|
201 | """ | |
202 | return self.kind == NodeKind.FILE |
|
202 | return self.kind == NodeKind.FILE | |
203 |
|
203 | |||
204 | def is_dir(self): |
|
204 | def is_dir(self): | |
205 | """ |
|
205 | """ | |
206 | Returns ``True`` if node's kind is ``NodeKind.DIR``, ``False`` |
|
206 | Returns ``True`` if node's kind is ``NodeKind.DIR``, ``False`` | |
207 | otherwise. |
|
207 | otherwise. | |
208 | """ |
|
208 | """ | |
209 | return self.kind == NodeKind.DIR |
|
209 | return self.kind == NodeKind.DIR | |
210 |
|
210 | |||
211 | def is_root(self): |
|
211 | def is_root(self): | |
212 | """ |
|
212 | """ | |
213 | Returns ``True`` if node is a root node and ``False`` otherwise. |
|
213 | Returns ``True`` if node is a root node and ``False`` otherwise. | |
214 | """ |
|
214 | """ | |
215 | return self.kind == NodeKind.DIR and self.path == '' |
|
215 | return self.kind == NodeKind.DIR and self.path == '' | |
216 |
|
216 | |||
217 | def is_submodule(self): |
|
217 | def is_submodule(self): | |
218 | """ |
|
218 | """ | |
219 | Returns ``True`` if node's kind is ``NodeKind.SUBMODULE``, ``False`` |
|
219 | Returns ``True`` if node's kind is ``NodeKind.SUBMODULE``, ``False`` | |
220 | otherwise. |
|
220 | otherwise. | |
221 | """ |
|
221 | """ | |
222 | return self.kind == NodeKind.SUBMODULE |
|
222 | return self.kind == NodeKind.SUBMODULE | |
223 |
|
223 | |||
224 | @LazyProperty |
|
224 | @LazyProperty | |
225 | def added(self): |
|
225 | def added(self): | |
226 | return self.state is NodeState.ADDED |
|
226 | return self.state is NodeState.ADDED | |
227 |
|
227 | |||
228 | @LazyProperty |
|
228 | @LazyProperty | |
229 | def changed(self): |
|
229 | def changed(self): | |
230 | return self.state is NodeState.CHANGED |
|
230 | return self.state is NodeState.CHANGED | |
231 |
|
231 | |||
232 | @LazyProperty |
|
232 | @LazyProperty | |
233 | def not_changed(self): |
|
233 | def not_changed(self): | |
234 | return self.state is NodeState.NOT_CHANGED |
|
234 | return self.state is NodeState.NOT_CHANGED | |
235 |
|
235 | |||
236 | @LazyProperty |
|
236 | @LazyProperty | |
237 | def removed(self): |
|
237 | def removed(self): | |
238 | return self.state is NodeState.REMOVED |
|
238 | return self.state is NodeState.REMOVED | |
239 |
|
239 | |||
240 |
|
240 | |||
241 | class FileNode(Node): |
|
241 | class FileNode(Node): | |
242 | """ |
|
242 | """ | |
243 | Class representing file nodes. |
|
243 | Class representing file nodes. | |
244 |
|
244 | |||
245 | :attribute: path: path to the node, relative to repository's root |
|
245 | :attribute: path: path to the node, relative to repository's root | |
246 | :attribute: content: if given arbitrary sets content of the file |
|
246 | :attribute: content: if given arbitrary sets content of the file | |
247 | :attribute: changeset: if given, first time content is accessed, callback |
|
247 | :attribute: changeset: if given, first time content is accessed, callback | |
248 | :attribute: mode: octal stat mode for a node. Default is 0100644. |
|
248 | :attribute: mode: octal stat mode for a node. Default is 0100644. | |
249 | """ |
|
249 | """ | |
250 |
|
250 | |||
251 | def __init__(self, path, content=None, changeset=None, mode=None): |
|
251 | def __init__(self, path, content=None, changeset=None, mode=None): | |
252 | """ |
|
252 | """ | |
253 | Only one of ``content`` and ``changeset`` may be given. Passing both |
|
253 | Only one of ``content`` and ``changeset`` may be given. Passing both | |
254 | would raise ``NodeError`` exception. |
|
254 | would raise ``NodeError`` exception. | |
255 |
|
255 | |||
256 | :param path: relative path to the node |
|
256 | :param path: relative path to the node | |
257 | :param content: content may be passed to constructor |
|
257 | :param content: content may be passed to constructor | |
258 | :param changeset: if given, will use it to lazily fetch content |
|
258 | :param changeset: if given, will use it to lazily fetch content | |
259 | :param mode: octal representation of ST_MODE (i.e. 0100644) |
|
259 | :param mode: octal representation of ST_MODE (i.e. 0100644) | |
260 | """ |
|
260 | """ | |
261 |
|
261 | |||
262 | if content and changeset: |
|
262 | if content and changeset: | |
263 | raise NodeError("Cannot use both content and changeset") |
|
263 | raise NodeError("Cannot use both content and changeset") | |
264 | super(FileNode, self).__init__(path, kind=NodeKind.FILE) |
|
264 | super(FileNode, self).__init__(path, kind=NodeKind.FILE) | |
265 | self.changeset = changeset |
|
265 | self.changeset = changeset | |
|
266 | if not isinstance(content, bytes) and content is not None: | |||
|
267 | # File content is one thing that inherently must be bytes ... but | |||
|
268 | # VCS module tries to be "user friendly" and support unicode ... | |||
|
269 | content = safe_bytes(content) | |||
266 | self._content = content |
|
270 | self._content = content | |
267 | self._mode = mode or 0o100644 |
|
271 | self._mode = mode or 0o100644 | |
268 |
|
272 | |||
269 | @LazyProperty |
|
273 | @LazyProperty | |
270 | def mode(self): |
|
274 | def mode(self): | |
271 | """ |
|
275 | """ | |
272 | Returns lazily mode of the FileNode. If ``changeset`` is not set, would |
|
276 | Returns lazily mode of the FileNode. If ``changeset`` is not set, would | |
273 | use value given at initialization or 0100644 (default). |
|
277 | use value given at initialization or 0100644 (default). | |
274 | """ |
|
278 | """ | |
275 | if self.changeset: |
|
279 | if self.changeset: | |
276 | mode = self.changeset.get_file_mode(self.path) |
|
280 | mode = self.changeset.get_file_mode(self.path) | |
277 | else: |
|
281 | else: | |
278 | mode = self._mode |
|
282 | mode = self._mode | |
279 | return mode |
|
283 | return mode | |
280 |
|
284 | |||
281 | def _get_content(self): |
|
285 | @property | |
|
286 | def content(self): | |||
|
287 | """ | |||
|
288 | Returns lazily byte content of the FileNode. | |||
|
289 | """ | |||
282 | if self.changeset: |
|
290 | if self.changeset: | |
283 | content = self.changeset.get_file_content(self.path) |
|
291 | content = self.changeset.get_file_content(self.path) | |
284 | else: |
|
292 | else: | |
285 | content = self._content |
|
293 | content = self._content | |
286 | return content |
|
294 | return content | |
287 |
|
295 | |||
288 | @property |
|
|||
289 | def content(self): |
|
|||
290 | """ |
|
|||
291 | Returns lazily content of the FileNode. If possible, would try to |
|
|||
292 | decode content from UTF-8. |
|
|||
293 | """ |
|
|||
294 | content = self._get_content() |
|
|||
295 |
|
||||
296 | if bool(content and '\0' in content): |
|
|||
297 | return content |
|
|||
298 | return safe_unicode(content) |
|
|||
299 |
|
||||
300 | @LazyProperty |
|
296 | @LazyProperty | |
301 | def size(self): |
|
297 | def size(self): | |
302 | if self.changeset: |
|
298 | if self.changeset: | |
303 | return self.changeset.get_file_size(self.path) |
|
299 | return self.changeset.get_file_size(self.path) | |
304 | raise NodeError("Cannot retrieve size of the file without related " |
|
300 | raise NodeError("Cannot retrieve size of the file without related " | |
305 | "changeset attribute") |
|
301 | "changeset attribute") | |
306 |
|
302 | |||
307 | @LazyProperty |
|
303 | @LazyProperty | |
308 | def message(self): |
|
304 | def message(self): | |
309 | if self.changeset: |
|
305 | if self.changeset: | |
310 | return self.last_changeset.message |
|
306 | return self.last_changeset.message | |
311 | raise NodeError("Cannot retrieve message of the file without related " |
|
307 | raise NodeError("Cannot retrieve message of the file without related " | |
312 | "changeset attribute") |
|
308 | "changeset attribute") | |
313 |
|
309 | |||
314 | @LazyProperty |
|
310 | @LazyProperty | |
315 | def last_changeset(self): |
|
311 | def last_changeset(self): | |
316 | if self.changeset: |
|
312 | if self.changeset: | |
317 | return self.changeset.get_file_changeset(self.path) |
|
313 | return self.changeset.get_file_changeset(self.path) | |
318 | raise NodeError("Cannot retrieve last changeset of the file without " |
|
314 | raise NodeError("Cannot retrieve last changeset of the file without " | |
319 | "related changeset attribute") |
|
315 | "related changeset attribute") | |
320 |
|
316 | |||
321 | def get_mimetype(self): |
|
317 | def get_mimetype(self): | |
322 | """ |
|
318 | """ | |
323 | Mimetype is calculated based on the file's content. |
|
319 | Mimetype is calculated based on the file's content. | |
324 | """ |
|
320 | """ | |
325 |
|
321 | |||
326 | mtype, encoding = mimetypes.guess_type(self.name) |
|
322 | mtype, encoding = mimetypes.guess_type(self.name) | |
327 |
|
323 | |||
328 | if mtype is None: |
|
324 | if mtype is None: | |
329 | if self.is_binary: |
|
325 | if self.is_binary: | |
330 | mtype = 'application/octet-stream' |
|
326 | mtype = 'application/octet-stream' | |
331 | encoding = None |
|
327 | encoding = None | |
332 | else: |
|
328 | else: | |
333 | mtype = 'text/plain' |
|
329 | mtype = 'text/plain' | |
334 | encoding = None |
|
330 | encoding = None | |
335 |
|
331 | |||
336 | # try with pygments |
|
332 | # try with pygments | |
337 | try: |
|
333 | try: | |
338 | from pygments import lexers |
|
334 | from pygments import lexers | |
339 | mt = lexers.get_lexer_for_filename(self.name).mimetypes |
|
335 | mt = lexers.get_lexer_for_filename(self.name).mimetypes | |
340 | except lexers.ClassNotFound: |
|
336 | except lexers.ClassNotFound: | |
341 | mt = None |
|
337 | mt = None | |
342 |
|
338 | |||
343 | if mt: |
|
339 | if mt: | |
344 | mtype = mt[0] |
|
340 | mtype = mt[0] | |
345 |
|
341 | |||
346 | return mtype, encoding |
|
342 | return mtype, encoding | |
347 |
|
343 | |||
348 | @LazyProperty |
|
344 | @LazyProperty | |
349 | def mimetype(self): |
|
345 | def mimetype(self): | |
350 | """ |
|
346 | """ | |
351 | Wrapper around full mimetype info. It returns only type of fetched |
|
347 | Wrapper around full mimetype info. It returns only type of fetched | |
352 | mimetype without the encoding part. use get_mimetype function to fetch |
|
348 | mimetype without the encoding part. use get_mimetype function to fetch | |
353 | full set of (type,encoding) |
|
349 | full set of (type,encoding) | |
354 | """ |
|
350 | """ | |
355 | return self.get_mimetype()[0] |
|
351 | return self.get_mimetype()[0] | |
356 |
|
352 | |||
357 | @LazyProperty |
|
353 | @LazyProperty | |
358 | def mimetype_main(self): |
|
354 | def mimetype_main(self): | |
359 | return self.mimetype.split('/')[0] |
|
355 | return self.mimetype.split('/')[0] | |
360 |
|
356 | |||
361 | @LazyProperty |
|
357 | @LazyProperty | |
362 | def lexer(self): |
|
358 | def lexer(self): | |
363 | """ |
|
359 | """ | |
364 | Returns pygment's lexer class. Would try to guess lexer taking file's |
|
360 | Returns pygment's lexer class. Would try to guess lexer taking file's | |
365 | content, name and mimetype. |
|
361 | content, name and mimetype. | |
366 | """ |
|
362 | """ | |
367 | from pygments import lexers |
|
363 | from pygments import lexers | |
368 | try: |
|
364 | try: | |
369 | lexer = lexers.guess_lexer_for_filename(self.name, self.content, stripnl=False) |
|
365 | lexer = lexers.guess_lexer_for_filename(self.name, safe_unicode(self.content), stripnl=False) | |
370 | except lexers.ClassNotFound: |
|
366 | except lexers.ClassNotFound: | |
371 | lexer = lexers.TextLexer(stripnl=False) |
|
367 | lexer = lexers.TextLexer(stripnl=False) | |
372 | # returns first alias |
|
368 | # returns first alias | |
373 | return lexer |
|
369 | return lexer | |
374 |
|
370 | |||
375 | @LazyProperty |
|
371 | @LazyProperty | |
376 | def lexer_alias(self): |
|
372 | def lexer_alias(self): | |
377 | """ |
|
373 | """ | |
378 | Returns first alias of the lexer guessed for this file. |
|
374 | Returns first alias of the lexer guessed for this file. | |
379 | """ |
|
375 | """ | |
380 | return self.lexer.aliases[0] |
|
376 | return self.lexer.aliases[0] | |
381 |
|
377 | |||
382 | @LazyProperty |
|
378 | @LazyProperty | |
383 | def history(self): |
|
379 | def history(self): | |
384 | """ |
|
380 | """ | |
385 | Returns a list of changeset for this file in which the file was changed |
|
381 | Returns a list of changeset for this file in which the file was changed | |
386 | """ |
|
382 | """ | |
387 | if self.changeset is None: |
|
383 | if self.changeset is None: | |
388 | raise NodeError('Unable to get changeset for this FileNode') |
|
384 | raise NodeError('Unable to get changeset for this FileNode') | |
389 | return self.changeset.get_file_history(self.path) |
|
385 | return self.changeset.get_file_history(self.path) | |
390 |
|
386 | |||
391 | @LazyProperty |
|
387 | @LazyProperty | |
392 | def annotate(self): |
|
388 | def annotate(self): | |
393 | """ |
|
389 | """ | |
394 | Returns a list of three element tuples with lineno,changeset and line |
|
390 | Returns a list of three element tuples with lineno,changeset and line | |
395 | """ |
|
391 | """ | |
396 | if self.changeset is None: |
|
392 | if self.changeset is None: | |
397 | raise NodeError('Unable to get changeset for this FileNode') |
|
393 | raise NodeError('Unable to get changeset for this FileNode') | |
398 | return self.changeset.get_file_annotate(self.path) |
|
394 | return self.changeset.get_file_annotate(self.path) | |
399 |
|
395 | |||
400 | @LazyProperty |
|
396 | @LazyProperty | |
401 | def state(self): |
|
397 | def state(self): | |
402 | if not self.changeset: |
|
398 | if not self.changeset: | |
403 | raise NodeError("Cannot check state of the node if it's not " |
|
399 | raise NodeError("Cannot check state of the node if it's not " | |
404 | "linked with changeset") |
|
400 | "linked with changeset") | |
405 | elif self.path in (node.path for node in self.changeset.added): |
|
401 | elif self.path in (node.path for node in self.changeset.added): | |
406 | return NodeState.ADDED |
|
402 | return NodeState.ADDED | |
407 | elif self.path in (node.path for node in self.changeset.changed): |
|
403 | elif self.path in (node.path for node in self.changeset.changed): | |
408 | return NodeState.CHANGED |
|
404 | return NodeState.CHANGED | |
409 | else: |
|
405 | else: | |
410 | return NodeState.NOT_CHANGED |
|
406 | return NodeState.NOT_CHANGED | |
411 |
|
407 | |||
412 | @property |
|
408 | @property | |
413 | def is_binary(self): |
|
409 | def is_binary(self): | |
414 | """ |
|
410 | """ | |
415 | Returns True if file has binary content. |
|
411 | Returns True if file has binary content. | |
416 | """ |
|
412 | """ | |
417 |
|
|
413 | return b'\0' in self.content | |
418 | return _bin |
|
|||
419 |
|
414 | |||
420 | def is_browser_compatible_image(self): |
|
415 | def is_browser_compatible_image(self): | |
421 | return self.mimetype in [ |
|
416 | return self.mimetype in [ | |
422 | "image/gif", |
|
417 | "image/gif", | |
423 | "image/jpeg", |
|
418 | "image/jpeg", | |
424 | "image/png", |
|
419 | "image/png", | |
425 | "image/bmp" |
|
420 | "image/bmp" | |
426 | ] |
|
421 | ] | |
427 |
|
422 | |||
428 | @LazyProperty |
|
423 | @LazyProperty | |
429 | def extension(self): |
|
424 | def extension(self): | |
430 | """Returns filenode extension""" |
|
425 | """Returns filenode extension""" | |
431 | return self.name.split('.')[-1] |
|
426 | return self.name.split('.')[-1] | |
432 |
|
427 | |||
433 | @property |
|
428 | @property | |
434 | def is_executable(self): |
|
429 | def is_executable(self): | |
435 | """ |
|
430 | """ | |
436 | Returns ``True`` if file has executable flag turned on. |
|
431 | Returns ``True`` if file has executable flag turned on. | |
437 | """ |
|
432 | """ | |
438 | return bool(self.mode & stat.S_IXUSR) |
|
433 | return bool(self.mode & stat.S_IXUSR) | |
439 |
|
434 | |||
440 | def __repr__(self): |
|
435 | def __repr__(self): | |
441 | return '<%s %r @ %s>' % (self.__class__.__name__, self.path, |
|
436 | return '<%s %r @ %s>' % (self.__class__.__name__, self.path, | |
442 | getattr(self.changeset, 'short_id', '')) |
|
437 | getattr(self.changeset, 'short_id', '')) | |
443 |
|
438 | |||
444 |
|
439 | |||
445 | class RemovedFileNode(FileNode): |
|
440 | class RemovedFileNode(FileNode): | |
446 | """ |
|
441 | """ | |
447 | Dummy FileNode class - trying to access any public attribute except path, |
|
442 | Dummy FileNode class - trying to access any public attribute except path, | |
448 | name, kind or state (or methods/attributes checking those two) would raise |
|
443 | name, kind or state (or methods/attributes checking those two) would raise | |
449 | RemovedFileNodeError. |
|
444 | RemovedFileNodeError. | |
450 | """ |
|
445 | """ | |
451 | ALLOWED_ATTRIBUTES = [ |
|
446 | ALLOWED_ATTRIBUTES = [ | |
452 | 'name', 'path', 'state', 'is_root', 'is_file', 'is_dir', 'kind', |
|
447 | 'name', 'path', 'state', 'is_root', 'is_file', 'is_dir', 'kind', | |
453 | 'added', 'changed', 'not_changed', 'removed' |
|
448 | 'added', 'changed', 'not_changed', 'removed' | |
454 | ] |
|
449 | ] | |
455 |
|
450 | |||
456 | def __init__(self, path): |
|
451 | def __init__(self, path): | |
457 | """ |
|
452 | """ | |
458 | :param path: relative path to the node |
|
453 | :param path: relative path to the node | |
459 | """ |
|
454 | """ | |
460 | super(RemovedFileNode, self).__init__(path=path) |
|
455 | super(RemovedFileNode, self).__init__(path=path) | |
461 |
|
456 | |||
462 | def __getattribute__(self, attr): |
|
457 | def __getattribute__(self, attr): | |
463 | if attr.startswith('_') or attr in RemovedFileNode.ALLOWED_ATTRIBUTES: |
|
458 | if attr.startswith('_') or attr in RemovedFileNode.ALLOWED_ATTRIBUTES: | |
464 | return super(RemovedFileNode, self).__getattribute__(attr) |
|
459 | return super(RemovedFileNode, self).__getattribute__(attr) | |
465 | raise RemovedFileNodeError("Cannot access attribute %s on " |
|
460 | raise RemovedFileNodeError("Cannot access attribute %s on " | |
466 | "RemovedFileNode" % attr) |
|
461 | "RemovedFileNode" % attr) | |
467 |
|
462 | |||
468 | @LazyProperty |
|
463 | @LazyProperty | |
469 | def state(self): |
|
464 | def state(self): | |
470 | return NodeState.REMOVED |
|
465 | return NodeState.REMOVED | |
471 |
|
466 | |||
472 |
|
467 | |||
473 | class DirNode(Node): |
|
468 | class DirNode(Node): | |
474 | """ |
|
469 | """ | |
475 | DirNode stores list of files and directories within this node. |
|
470 | DirNode stores list of files and directories within this node. | |
476 | Nodes may be used standalone but within repository context they |
|
471 | Nodes may be used standalone but within repository context they | |
477 | lazily fetch data within same repository's changeset. |
|
472 | lazily fetch data within same repository's changeset. | |
478 | """ |
|
473 | """ | |
479 |
|
474 | |||
480 | def __init__(self, path, nodes=(), changeset=None): |
|
475 | def __init__(self, path, nodes=(), changeset=None): | |
481 | """ |
|
476 | """ | |
482 | Only one of ``nodes`` and ``changeset`` may be given. Passing both |
|
477 | Only one of ``nodes`` and ``changeset`` may be given. Passing both | |
483 | would raise ``NodeError`` exception. |
|
478 | would raise ``NodeError`` exception. | |
484 |
|
479 | |||
485 | :param path: relative path to the node |
|
480 | :param path: relative path to the node | |
486 | :param nodes: content may be passed to constructor |
|
481 | :param nodes: content may be passed to constructor | |
487 | :param changeset: if given, will use it to lazily fetch content |
|
482 | :param changeset: if given, will use it to lazily fetch content | |
488 | :param size: always 0 for ``DirNode`` |
|
483 | :param size: always 0 for ``DirNode`` | |
489 | """ |
|
484 | """ | |
490 | if nodes and changeset: |
|
485 | if nodes and changeset: | |
491 | raise NodeError("Cannot use both nodes and changeset") |
|
486 | raise NodeError("Cannot use both nodes and changeset") | |
492 | super(DirNode, self).__init__(path, NodeKind.DIR) |
|
487 | super(DirNode, self).__init__(path, NodeKind.DIR) | |
493 | self.changeset = changeset |
|
488 | self.changeset = changeset | |
494 | self._nodes = nodes |
|
489 | self._nodes = nodes | |
495 |
|
490 | |||
496 | @LazyProperty |
|
491 | @LazyProperty | |
497 | def content(self): |
|
492 | def content(self): | |
498 | raise NodeError("%s represents a dir and has no ``content`` attribute" |
|
493 | raise NodeError("%s represents a dir and has no ``content`` attribute" | |
499 | % self) |
|
494 | % self) | |
500 |
|
495 | |||
501 | @LazyProperty |
|
496 | @LazyProperty | |
502 | def nodes(self): |
|
497 | def nodes(self): | |
503 | if self.changeset: |
|
498 | if self.changeset: | |
504 | nodes = self.changeset.get_nodes(self.path) |
|
499 | nodes = self.changeset.get_nodes(self.path) | |
505 | else: |
|
500 | else: | |
506 | nodes = self._nodes |
|
501 | nodes = self._nodes | |
507 | self._nodes_dict = dict((node.path, node) for node in nodes) |
|
502 | self._nodes_dict = dict((node.path, node) for node in nodes) | |
508 | return sorted(nodes) |
|
503 | return sorted(nodes) | |
509 |
|
504 | |||
510 | @LazyProperty |
|
505 | @LazyProperty | |
511 | def files(self): |
|
506 | def files(self): | |
512 | return sorted((node for node in self.nodes if node.is_file())) |
|
507 | return sorted((node for node in self.nodes if node.is_file())) | |
513 |
|
508 | |||
514 | @LazyProperty |
|
509 | @LazyProperty | |
515 | def dirs(self): |
|
510 | def dirs(self): | |
516 | return sorted((node for node in self.nodes if node.is_dir())) |
|
511 | return sorted((node for node in self.nodes if node.is_dir())) | |
517 |
|
512 | |||
518 | def __iter__(self): |
|
513 | def __iter__(self): | |
519 | for node in self.nodes: |
|
514 | for node in self.nodes: | |
520 | yield node |
|
515 | yield node | |
521 |
|
516 | |||
522 | def get_node(self, path): |
|
517 | def get_node(self, path): | |
523 | """ |
|
518 | """ | |
524 | Returns node from within this particular ``DirNode``, so it is now |
|
519 | Returns node from within this particular ``DirNode``, so it is now | |
525 | allowed to fetch, i.e. node located at 'docs/api/index.rst' from node |
|
520 | allowed to fetch, i.e. node located at 'docs/api/index.rst' from node | |
526 | 'docs'. In order to access deeper nodes one must fetch nodes between |
|
521 | 'docs'. In order to access deeper nodes one must fetch nodes between | |
527 | them first - this would work:: |
|
522 | them first - this would work:: | |
528 |
|
523 | |||
529 | docs = root.get_node('docs') |
|
524 | docs = root.get_node('docs') | |
530 | docs.get_node('api').get_node('index.rst') |
|
525 | docs.get_node('api').get_node('index.rst') | |
531 |
|
526 | |||
532 | :param: path - relative to the current node |
|
527 | :param: path - relative to the current node | |
533 |
|
528 | |||
534 | .. note:: |
|
529 | .. note:: | |
535 | To access lazily (as in example above) node have to be initialized |
|
530 | To access lazily (as in example above) node have to be initialized | |
536 | with related changeset object - without it node is out of |
|
531 | with related changeset object - without it node is out of | |
537 | context and may know nothing about anything else than nearest |
|
532 | context and may know nothing about anything else than nearest | |
538 | (located at same level) nodes. |
|
533 | (located at same level) nodes. | |
539 | """ |
|
534 | """ | |
540 | try: |
|
535 | try: | |
541 | path = path.rstrip('/') |
|
536 | path = path.rstrip('/') | |
542 | if path == '': |
|
537 | if path == '': | |
543 | raise NodeError("Cannot retrieve node without path") |
|
538 | raise NodeError("Cannot retrieve node without path") | |
544 | self.nodes # access nodes first in order to set _nodes_dict |
|
539 | self.nodes # access nodes first in order to set _nodes_dict | |
545 | paths = path.split('/') |
|
540 | paths = path.split('/') | |
546 | if len(paths) == 1: |
|
541 | if len(paths) == 1: | |
547 | if not self.is_root(): |
|
542 | if not self.is_root(): | |
548 | path = '/'.join((self.path, paths[0])) |
|
543 | path = '/'.join((self.path, paths[0])) | |
549 | else: |
|
544 | else: | |
550 | path = paths[0] |
|
545 | path = paths[0] | |
551 | return self._nodes_dict[path] |
|
546 | return self._nodes_dict[path] | |
552 | elif len(paths) > 1: |
|
547 | elif len(paths) > 1: | |
553 | if self.changeset is None: |
|
548 | if self.changeset is None: | |
554 | raise NodeError("Cannot access deeper " |
|
549 | raise NodeError("Cannot access deeper " | |
555 | "nodes without changeset") |
|
550 | "nodes without changeset") | |
556 | else: |
|
551 | else: | |
557 | path1, path2 = paths[0], '/'.join(paths[1:]) |
|
552 | path1, path2 = paths[0], '/'.join(paths[1:]) | |
558 | return self.get_node(path1).get_node(path2) |
|
553 | return self.get_node(path1).get_node(path2) | |
559 | else: |
|
554 | else: | |
560 | raise KeyError |
|
555 | raise KeyError | |
561 | except KeyError: |
|
556 | except KeyError: | |
562 | raise NodeError("Node does not exist at %s" % path) |
|
557 | raise NodeError("Node does not exist at %s" % path) | |
563 |
|
558 | |||
564 | @LazyProperty |
|
559 | @LazyProperty | |
565 | def state(self): |
|
560 | def state(self): | |
566 | raise NodeError("Cannot access state of DirNode") |
|
561 | raise NodeError("Cannot access state of DirNode") | |
567 |
|
562 | |||
568 | @LazyProperty |
|
563 | @LazyProperty | |
569 | def size(self): |
|
564 | def size(self): | |
570 | size = 0 |
|
565 | size = 0 | |
571 | for root, dirs, files in self.changeset.walk(self.path): |
|
566 | for root, dirs, files in self.changeset.walk(self.path): | |
572 | for f in files: |
|
567 | for f in files: | |
573 | size += f.size |
|
568 | size += f.size | |
574 |
|
569 | |||
575 | return size |
|
570 | return size | |
576 |
|
571 | |||
577 | def __repr__(self): |
|
572 | def __repr__(self): | |
578 | return '<%s %r @ %s>' % (self.__class__.__name__, self.path, |
|
573 | return '<%s %r @ %s>' % (self.__class__.__name__, self.path, | |
579 | getattr(self.changeset, 'short_id', '')) |
|
574 | getattr(self.changeset, 'short_id', '')) | |
580 |
|
575 | |||
581 |
|
576 | |||
582 | class RootNode(DirNode): |
|
577 | class RootNode(DirNode): | |
583 | """ |
|
578 | """ | |
584 | DirNode being the root node of the repository. |
|
579 | DirNode being the root node of the repository. | |
585 | """ |
|
580 | """ | |
586 |
|
581 | |||
587 | def __init__(self, nodes=(), changeset=None): |
|
582 | def __init__(self, nodes=(), changeset=None): | |
588 | super(RootNode, self).__init__(path='', nodes=nodes, |
|
583 | super(RootNode, self).__init__(path='', nodes=nodes, | |
589 | changeset=changeset) |
|
584 | changeset=changeset) | |
590 |
|
585 | |||
591 | def __repr__(self): |
|
586 | def __repr__(self): | |
592 | return '<%s>' % self.__class__.__name__ |
|
587 | return '<%s>' % self.__class__.__name__ | |
593 |
|
588 | |||
594 |
|
589 | |||
595 | class SubModuleNode(Node): |
|
590 | class SubModuleNode(Node): | |
596 | """ |
|
591 | """ | |
597 | represents a SubModule of Git or SubRepo of Mercurial |
|
592 | represents a SubModule of Git or SubRepo of Mercurial | |
598 | """ |
|
593 | """ | |
599 | is_binary = False |
|
594 | is_binary = False | |
600 | size = 0 |
|
595 | size = 0 | |
601 |
|
596 | |||
602 | def __init__(self, name, url, changeset=None, alias=None): |
|
597 | def __init__(self, name, url, changeset=None, alias=None): | |
603 | self.path = name |
|
598 | self.path = name | |
604 | self.kind = NodeKind.SUBMODULE |
|
599 | self.kind = NodeKind.SUBMODULE | |
605 | self.alias = alias |
|
600 | self.alias = alias | |
606 | # we have to use emptyChangeset here since this can point to svn/git/hg |
|
601 | # we have to use emptyChangeset here since this can point to svn/git/hg | |
607 | # submodules we cannot get from repository |
|
602 | # submodules we cannot get from repository | |
608 | self.changeset = EmptyChangeset(str(changeset), alias=alias) |
|
603 | self.changeset = EmptyChangeset(str(changeset), alias=alias) | |
609 | self.url = url |
|
604 | self.url = url | |
610 |
|
605 | |||
611 | def __repr__(self): |
|
606 | def __repr__(self): | |
612 | return '<%s %r @ %s>' % (self.__class__.__name__, self.path, |
|
607 | return '<%s %r @ %s>' % (self.__class__.__name__, self.path, | |
613 | getattr(self.changeset, 'short_id', '')) |
|
608 | getattr(self.changeset, 'short_id', '')) | |
614 |
|
609 | |||
615 | @LazyProperty |
|
610 | @LazyProperty | |
616 | def name(self): |
|
611 | def name(self): | |
617 | """ |
|
612 | """ | |
618 | Returns name of the node so if its path |
|
613 | Returns name of the node so if its path | |
619 | then only last part is returned. |
|
614 | then only last part is returned. | |
620 | """ |
|
615 | """ | |
621 | org = safe_unicode(self.path.rstrip('/').split('/')[-1]) |
|
616 | org = safe_unicode(self.path.rstrip('/').split('/')[-1]) | |
622 | return u'%s @ %s' % (org, self.changeset.short_id) |
|
617 | return u'%s @ %s' % (org, self.changeset.short_id) |
@@ -1,177 +1,176 b'' | |||||
1 | from pygments import highlight |
|
1 | from pygments import highlight | |
2 | from pygments.formatters import HtmlFormatter |
|
2 | from pygments.formatters import HtmlFormatter | |
3 |
|
3 | |||
4 | from kallithea.lib.vcs.exceptions import VCSError |
|
4 | from kallithea.lib.vcs.exceptions import VCSError | |
5 | from kallithea.lib.vcs.nodes import FileNode |
|
5 | from kallithea.lib.vcs.nodes import FileNode | |
|
6 | from kallithea.lib.vcs.utils import safe_unicode | |||
6 |
|
7 | |||
7 |
|
8 | |||
8 | def annotate_highlight(filenode, annotate_from_changeset_func=None, |
|
9 | def annotate_highlight(filenode, annotate_from_changeset_func=None, | |
9 | order=None, headers=None, **options): |
|
10 | order=None, headers=None, **options): | |
10 | """ |
|
11 | """ | |
11 | Returns html portion containing annotated table with 3 columns: line |
|
12 | Returns html portion containing annotated table with 3 columns: line | |
12 | numbers, changeset information and pygmentized line of code. |
|
13 | numbers, changeset information and pygmentized line of code. | |
13 |
|
14 | |||
14 | :param filenode: FileNode object |
|
15 | :param filenode: FileNode object | |
15 | :param annotate_from_changeset_func: function taking changeset and |
|
16 | :param annotate_from_changeset_func: function taking changeset and | |
16 | returning single annotate cell; needs break line at the end |
|
17 | returning single annotate cell; needs break line at the end | |
17 | :param order: ordered sequence of ``ls`` (line numbers column), |
|
18 | :param order: ordered sequence of ``ls`` (line numbers column), | |
18 | ``annotate`` (annotate column), ``code`` (code column); Default is |
|
19 | ``annotate`` (annotate column), ``code`` (code column); Default is | |
19 | ``['ls', 'annotate', 'code']`` |
|
20 | ``['ls', 'annotate', 'code']`` | |
20 | :param headers: dictionary with headers (keys are whats in ``order`` |
|
21 | :param headers: dictionary with headers (keys are whats in ``order`` | |
21 | parameter) |
|
22 | parameter) | |
22 | """ |
|
23 | """ | |
23 | options['linenos'] = True |
|
24 | options['linenos'] = True | |
24 | formatter = AnnotateHtmlFormatter(filenode=filenode, order=order, |
|
25 | formatter = AnnotateHtmlFormatter(filenode=filenode, order=order, | |
25 | headers=headers, |
|
26 | headers=headers, | |
26 | annotate_from_changeset_func=annotate_from_changeset_func, **options) |
|
27 | annotate_from_changeset_func=annotate_from_changeset_func, **options) | |
27 | lexer = filenode.lexer |
|
28 | return highlight(safe_unicode(filenode.content), filenode.lexer, formatter) | |
28 | highlighted = highlight(filenode.content, lexer, formatter) |
|
|||
29 | return highlighted |
|
|||
30 |
|
29 | |||
31 |
|
30 | |||
32 | class AnnotateHtmlFormatter(HtmlFormatter): |
|
31 | class AnnotateHtmlFormatter(HtmlFormatter): | |
33 |
|
32 | |||
34 | def __init__(self, filenode, annotate_from_changeset_func=None, |
|
33 | def __init__(self, filenode, annotate_from_changeset_func=None, | |
35 | order=None, **options): |
|
34 | order=None, **options): | |
36 | """ |
|
35 | """ | |
37 | If ``annotate_from_changeset_func`` is passed it should be a function |
|
36 | If ``annotate_from_changeset_func`` is passed it should be a function | |
38 | which returns string from the given changeset. For example, we may pass |
|
37 | which returns string from the given changeset. For example, we may pass | |
39 | following function as ``annotate_from_changeset_func``:: |
|
38 | following function as ``annotate_from_changeset_func``:: | |
40 |
|
39 | |||
41 | def changeset_to_anchor(changeset): |
|
40 | def changeset_to_anchor(changeset): | |
42 | return '<a href="/changesets/%s/">%s</a>\n' % \ |
|
41 | return '<a href="/changesets/%s/">%s</a>\n' % \ | |
43 | (changeset.id, changeset.id) |
|
42 | (changeset.id, changeset.id) | |
44 |
|
43 | |||
45 | :param annotate_from_changeset_func: see above |
|
44 | :param annotate_from_changeset_func: see above | |
46 | :param order: (default: ``['ls', 'annotate', 'code']``); order of |
|
45 | :param order: (default: ``['ls', 'annotate', 'code']``); order of | |
47 | columns; |
|
46 | columns; | |
48 | :param options: standard pygment's HtmlFormatter options, there is |
|
47 | :param options: standard pygment's HtmlFormatter options, there is | |
49 | extra option tough, ``headers``. For instance we can pass:: |
|
48 | extra option tough, ``headers``. For instance we can pass:: | |
50 |
|
49 | |||
51 | formatter = AnnotateHtmlFormatter(filenode, headers={ |
|
50 | formatter = AnnotateHtmlFormatter(filenode, headers={ | |
52 | 'ls': '#', |
|
51 | 'ls': '#', | |
53 | 'annotate': 'Annotate', |
|
52 | 'annotate': 'Annotate', | |
54 | 'code': 'Code', |
|
53 | 'code': 'Code', | |
55 | }) |
|
54 | }) | |
56 |
|
55 | |||
57 | """ |
|
56 | """ | |
58 | super(AnnotateHtmlFormatter, self).__init__(**options) |
|
57 | super(AnnotateHtmlFormatter, self).__init__(**options) | |
59 | self.annotate_from_changeset_func = annotate_from_changeset_func |
|
58 | self.annotate_from_changeset_func = annotate_from_changeset_func | |
60 | self.order = order or ('ls', 'annotate', 'code') |
|
59 | self.order = order or ('ls', 'annotate', 'code') | |
61 | headers = options.pop('headers', None) |
|
60 | headers = options.pop('headers', None) | |
62 | if headers and not ('ls' in headers and 'annotate' in headers and |
|
61 | if headers and not ('ls' in headers and 'annotate' in headers and | |
63 | 'code' in headers |
|
62 | 'code' in headers | |
64 | ): |
|
63 | ): | |
65 | raise ValueError("If headers option dict is specified it must " |
|
64 | raise ValueError("If headers option dict is specified it must " | |
66 | "all 'ls', 'annotate' and 'code' keys") |
|
65 | "all 'ls', 'annotate' and 'code' keys") | |
67 | self.headers = headers |
|
66 | self.headers = headers | |
68 | if isinstance(filenode, FileNode): |
|
67 | if isinstance(filenode, FileNode): | |
69 | self.filenode = filenode |
|
68 | self.filenode = filenode | |
70 | else: |
|
69 | else: | |
71 | raise VCSError("This formatter expect FileNode parameter, not %r" |
|
70 | raise VCSError("This formatter expect FileNode parameter, not %r" | |
72 | % type(filenode)) |
|
71 | % type(filenode)) | |
73 |
|
72 | |||
74 | def annotate_from_changeset(self, changeset): |
|
73 | def annotate_from_changeset(self, changeset): | |
75 | """ |
|
74 | """ | |
76 | Returns full html line for single changeset per annotated line. |
|
75 | Returns full html line for single changeset per annotated line. | |
77 | """ |
|
76 | """ | |
78 | if self.annotate_from_changeset_func: |
|
77 | if self.annotate_from_changeset_func: | |
79 | return self.annotate_from_changeset_func(changeset) |
|
78 | return self.annotate_from_changeset_func(changeset) | |
80 | else: |
|
79 | else: | |
81 | return ''.join((changeset.id, '\n')) |
|
80 | return ''.join((changeset.id, '\n')) | |
82 |
|
81 | |||
83 | def _wrap_tablelinenos(self, inner): |
|
82 | def _wrap_tablelinenos(self, inner): | |
84 | inner_lines = [] |
|
83 | inner_lines = [] | |
85 | lncount = 0 |
|
84 | lncount = 0 | |
86 | for t, line in inner: |
|
85 | for t, line in inner: | |
87 | if t: |
|
86 | if t: | |
88 | lncount += 1 |
|
87 | lncount += 1 | |
89 | inner_lines.append(line) |
|
88 | inner_lines.append(line) | |
90 |
|
89 | |||
91 | fl = self.linenostart |
|
90 | fl = self.linenostart | |
92 | mw = len(str(lncount + fl - 1)) |
|
91 | mw = len(str(lncount + fl - 1)) | |
93 | sp = self.linenospecial |
|
92 | sp = self.linenospecial | |
94 | st = self.linenostep |
|
93 | st = self.linenostep | |
95 | la = self.lineanchors |
|
94 | la = self.lineanchors | |
96 | aln = self.anchorlinenos |
|
95 | aln = self.anchorlinenos | |
97 | if sp: |
|
96 | if sp: | |
98 | lines = [] |
|
97 | lines = [] | |
99 |
|
98 | |||
100 | for i in range(fl, fl + lncount): |
|
99 | for i in range(fl, fl + lncount): | |
101 | if i % st == 0: |
|
100 | if i % st == 0: | |
102 | if i % sp == 0: |
|
101 | if i % sp == 0: | |
103 | if aln: |
|
102 | if aln: | |
104 | lines.append('<a href="#%s-%d" class="special">' |
|
103 | lines.append('<a href="#%s-%d" class="special">' | |
105 | '%*d</a>' % |
|
104 | '%*d</a>' % | |
106 | (la, i, mw, i)) |
|
105 | (la, i, mw, i)) | |
107 | else: |
|
106 | else: | |
108 | lines.append('<span class="special">' |
|
107 | lines.append('<span class="special">' | |
109 | '%*d</span>' % (mw, i)) |
|
108 | '%*d</span>' % (mw, i)) | |
110 | else: |
|
109 | else: | |
111 | if aln: |
|
110 | if aln: | |
112 | lines.append('<a href="#%s-%d">' |
|
111 | lines.append('<a href="#%s-%d">' | |
113 | '%*d</a>' % (la, i, mw, i)) |
|
112 | '%*d</a>' % (la, i, mw, i)) | |
114 | else: |
|
113 | else: | |
115 | lines.append('%*d' % (mw, i)) |
|
114 | lines.append('%*d' % (mw, i)) | |
116 | else: |
|
115 | else: | |
117 | lines.append('') |
|
116 | lines.append('') | |
118 | ls = '\n'.join(lines) |
|
117 | ls = '\n'.join(lines) | |
119 | else: |
|
118 | else: | |
120 | lines = [] |
|
119 | lines = [] | |
121 | for i in range(fl, fl + lncount): |
|
120 | for i in range(fl, fl + lncount): | |
122 | if i % st == 0: |
|
121 | if i % st == 0: | |
123 | if aln: |
|
122 | if aln: | |
124 | lines.append('<a href="#%s-%d">%*d</a>' |
|
123 | lines.append('<a href="#%s-%d">%*d</a>' | |
125 | % (la, i, mw, i)) |
|
124 | % (la, i, mw, i)) | |
126 | else: |
|
125 | else: | |
127 | lines.append('%*d' % (mw, i)) |
|
126 | lines.append('%*d' % (mw, i)) | |
128 | else: |
|
127 | else: | |
129 | lines.append('') |
|
128 | lines.append('') | |
130 | ls = '\n'.join(lines) |
|
129 | ls = '\n'.join(lines) | |
131 |
|
130 | |||
132 | annotate_changesets = [tup[1] for tup in self.filenode.annotate] |
|
131 | annotate_changesets = [tup[1] for tup in self.filenode.annotate] | |
133 | # If pygments cropped last lines break we need do that too |
|
132 | # If pygments cropped last lines break we need do that too | |
134 | ln_cs = len(annotate_changesets) |
|
133 | ln_cs = len(annotate_changesets) | |
135 | ln_ = len(ls.splitlines()) |
|
134 | ln_ = len(ls.splitlines()) | |
136 | if ln_cs > ln_: |
|
135 | if ln_cs > ln_: | |
137 | annotate_changesets = annotate_changesets[:ln_ - ln_cs] |
|
136 | annotate_changesets = annotate_changesets[:ln_ - ln_cs] | |
138 | annotate = ''.join((self.annotate_from_changeset(changeset) |
|
137 | annotate = ''.join((self.annotate_from_changeset(changeset) | |
139 | for changeset in annotate_changesets)) |
|
138 | for changeset in annotate_changesets)) | |
140 | # in case you wonder about the seemingly redundant <div> here: |
|
139 | # in case you wonder about the seemingly redundant <div> here: | |
141 | # since the content in the other cell also is wrapped in a div, |
|
140 | # since the content in the other cell also is wrapped in a div, | |
142 | # some browsers in some configurations seem to mess up the formatting. |
|
141 | # some browsers in some configurations seem to mess up the formatting. | |
143 | ''' |
|
142 | ''' | |
144 | yield 0, ('<table class="%stable">' % self.cssclass + |
|
143 | yield 0, ('<table class="%stable">' % self.cssclass + | |
145 | '<tr><td class="linenos"><div class="linenodiv"><pre>' + |
|
144 | '<tr><td class="linenos"><div class="linenodiv"><pre>' + | |
146 | ls + '</pre></div></td>' + |
|
145 | ls + '</pre></div></td>' + | |
147 | '<td class="code">') |
|
146 | '<td class="code">') | |
148 | yield 0, ''.join(inner_lines) |
|
147 | yield 0, ''.join(inner_lines) | |
149 | yield 0, '</td></tr></table>' |
|
148 | yield 0, '</td></tr></table>' | |
150 |
|
149 | |||
151 | ''' |
|
150 | ''' | |
152 | headers_row = [] |
|
151 | headers_row = [] | |
153 | if self.headers: |
|
152 | if self.headers: | |
154 | headers_row = ['<tr class="annotate-header">'] |
|
153 | headers_row = ['<tr class="annotate-header">'] | |
155 | for key in self.order: |
|
154 | for key in self.order: | |
156 | td = ''.join(('<td>', self.headers[key], '</td>')) |
|
155 | td = ''.join(('<td>', self.headers[key], '</td>')) | |
157 | headers_row.append(td) |
|
156 | headers_row.append(td) | |
158 | headers_row.append('</tr>') |
|
157 | headers_row.append('</tr>') | |
159 |
|
158 | |||
160 | body_row_start = ['<tr>'] |
|
159 | body_row_start = ['<tr>'] | |
161 | for key in self.order: |
|
160 | for key in self.order: | |
162 | if key == 'ls': |
|
161 | if key == 'ls': | |
163 | body_row_start.append( |
|
162 | body_row_start.append( | |
164 | '<td class="linenos"><div class="linenodiv"><pre>' + |
|
163 | '<td class="linenos"><div class="linenodiv"><pre>' + | |
165 | ls + '</pre></div></td>') |
|
164 | ls + '</pre></div></td>') | |
166 | elif key == 'annotate': |
|
165 | elif key == 'annotate': | |
167 | body_row_start.append( |
|
166 | body_row_start.append( | |
168 | '<td class="annotate"><div class="annotatediv"><pre>' + |
|
167 | '<td class="annotate"><div class="annotatediv"><pre>' + | |
169 | annotate + '</pre></div></td>') |
|
168 | annotate + '</pre></div></td>') | |
170 | elif key == 'code': |
|
169 | elif key == 'code': | |
171 | body_row_start.append('<td class="code">') |
|
170 | body_row_start.append('<td class="code">') | |
172 | yield 0, ('<table class="%stable">' % self.cssclass + |
|
171 | yield 0, ('<table class="%stable">' % self.cssclass + | |
173 | ''.join(headers_row) + |
|
172 | ''.join(headers_row) + | |
174 | ''.join(body_row_start) |
|
173 | ''.join(body_row_start) | |
175 | ) |
|
174 | ) | |
176 | yield 0, ''.join(inner_lines) |
|
175 | yield 0, ''.join(inner_lines) | |
177 | yield 0, '</td></tr></table>' |
|
176 | yield 0, '</td></tr></table>' |
@@ -1,174 +1,174 b'' | |||||
1 | ## -*- coding: utf-8 -*- |
|
1 | ## -*- coding: utf-8 -*- | |
2 | <%inherit file="/base/base.html"/> |
|
2 | <%inherit file="/base/base.html"/> | |
3 |
|
3 | |||
4 | <%block name="title"> |
|
4 | <%block name="title"> | |
5 | ${_('Edit Gist')} · ${c.gist.gist_access_id} |
|
5 | ${_('Edit Gist')} · ${c.gist.gist_access_id} | |
6 | </%block> |
|
6 | </%block> | |
7 |
|
7 | |||
8 | <%block name="js_extra"> |
|
8 | <%block name="js_extra"> | |
9 | <script type="text/javascript" src="${h.url('/codemirror/lib/codemirror.js')}"></script> |
|
9 | <script type="text/javascript" src="${h.url('/codemirror/lib/codemirror.js')}"></script> | |
10 | <script type="text/javascript" src="${h.url('/js/codemirror_loadmode.js')}"></script> |
|
10 | <script type="text/javascript" src="${h.url('/js/codemirror_loadmode.js')}"></script> | |
11 | <script type="text/javascript" src="${h.url('/codemirror/mode/meta.js')}"></script> |
|
11 | <script type="text/javascript" src="${h.url('/codemirror/mode/meta.js')}"></script> | |
12 | </%block> |
|
12 | </%block> | |
13 | <%block name="css_extra"> |
|
13 | <%block name="css_extra"> | |
14 | <link rel="stylesheet" type="text/css" href="${h.url('/codemirror/lib/codemirror.css')}"/> |
|
14 | <link rel="stylesheet" type="text/css" href="${h.url('/codemirror/lib/codemirror.css')}"/> | |
15 | </%block> |
|
15 | </%block> | |
16 |
|
16 | |||
17 | <%def name="breadcrumbs_links()"> |
|
17 | <%def name="breadcrumbs_links()"> | |
18 | ${_('Edit Gist')} · ${c.gist.gist_access_id} |
|
18 | ${_('Edit Gist')} · ${c.gist.gist_access_id} | |
19 | </%def> |
|
19 | </%def> | |
20 |
|
20 | |||
21 | <%block name="header_menu"> |
|
21 | <%block name="header_menu"> | |
22 | ${self.menu('gists')} |
|
22 | ${self.menu('gists')} | |
23 | </%block> |
|
23 | </%block> | |
24 |
|
24 | |||
25 | <%def name="main()"> |
|
25 | <%def name="main()"> | |
26 | <div class="panel panel-primary"> |
|
26 | <div class="panel panel-primary"> | |
27 | <div class="panel-heading clearfix"> |
|
27 | <div class="panel-heading clearfix"> | |
28 | ${self.breadcrumbs()} |
|
28 | ${self.breadcrumbs()} | |
29 | </div> |
|
29 | </div> | |
30 |
|
30 | |||
31 | <div class="panel-body"> |
|
31 | <div class="panel-body"> | |
32 | <div id="edit_error" style="display: none" class="flash_msg"> |
|
32 | <div id="edit_error" style="display: none" class="flash_msg"> | |
33 | <div class="alert alert-dismissable alert-warning"> |
|
33 | <div class="alert alert-dismissable alert-warning"> | |
34 | <button type="button" class="close" data-dismiss="alert" aria-hidden="true"><i class="icon-cancel-circled"></i></button> |
|
34 | <button type="button" class="close" data-dismiss="alert" aria-hidden="true"><i class="icon-cancel-circled"></i></button> | |
35 | ${(h.HTML(_('Gist was updated since you started editing. Copy your changes and click %(here)s to reload new version.')) |
|
35 | ${(h.HTML(_('Gist was updated since you started editing. Copy your changes and click %(here)s to reload new version.')) | |
36 | % {'here': h.link_to(_('here'),h.url('edit_gist', gist_id=c.gist.gist_access_id))})} |
|
36 | % {'here': h.link_to(_('here'),h.url('edit_gist', gist_id=c.gist.gist_access_id))})} | |
37 | </div> |
|
37 | </div> | |
38 | <script> |
|
38 | <script> | |
39 | if (typeof jQuery != 'undefined') { |
|
39 | if (typeof jQuery != 'undefined') { | |
40 | $(".alert").alert(); |
|
40 | $(".alert").alert(); | |
41 | } |
|
41 | } | |
42 | </script> |
|
42 | </script> | |
43 | </div> |
|
43 | </div> | |
44 |
|
44 | |||
45 | <div id="files_data"> |
|
45 | <div id="files_data"> | |
46 | ${h.form(h.url('edit_gist', gist_id=c.gist.gist_access_id), method='post', id='eform')} |
|
46 | ${h.form(h.url('edit_gist', gist_id=c.gist.gist_access_id), method='post', id='eform')} | |
47 | <div> |
|
47 | <div> | |
48 | <input type="hidden" value="${c.file_changeset.raw_id}" name="parent_hash"> |
|
48 | <input type="hidden" value="${c.file_changeset.raw_id}" name="parent_hash"> | |
49 | <textarea class="form-control" |
|
49 | <textarea class="form-control" | |
50 | id="description" name="description" |
|
50 | id="description" name="description" | |
51 | placeholder="${_('Gist description ...')}">${c.gist.gist_description}</textarea> |
|
51 | placeholder="${_('Gist description ...')}">${c.gist.gist_description}</textarea> | |
52 | <div> |
|
52 | <div> | |
53 | <label> |
|
53 | <label> | |
54 | ${_('Gist lifetime')} |
|
54 | ${_('Gist lifetime')} | |
55 | ${h.select('lifetime', '0', c.lifetime_options)} |
|
55 | ${h.select('lifetime', '0', c.lifetime_options)} | |
56 | </label> |
|
56 | </label> | |
57 | <span class="text-muted"> |
|
57 | <span class="text-muted"> | |
58 | %if c.gist.gist_expires == -1: |
|
58 | %if c.gist.gist_expires == -1: | |
59 | ${_('Expires')}: ${_('Never')} |
|
59 | ${_('Expires')}: ${_('Never')} | |
60 | %else: |
|
60 | %else: | |
61 | ${_('Expires')}: ${h.age(h.time_to_datetime(c.gist.gist_expires))} |
|
61 | ${_('Expires')}: ${h.age(h.time_to_datetime(c.gist.gist_expires))} | |
62 | %endif |
|
62 | %endif | |
63 | </span> |
|
63 | </span> | |
64 | </div> |
|
64 | </div> | |
65 | </div> |
|
65 | </div> | |
66 |
|
66 | |||
67 | % for cnt, file in enumerate(c.files): |
|
67 | % for cnt, file in enumerate(c.files): | |
68 | <div id="body" class="panel panel-default form-inline"> |
|
68 | <div id="body" class="panel panel-default form-inline"> | |
69 | <div class="panel-heading"> |
|
69 | <div class="panel-heading"> | |
70 | <input type="hidden" value="${h.safe_unicode(file.path)}" name="org_files"> |
|
70 | <input type="hidden" value="${h.safe_unicode(file.path)}" name="org_files"> | |
71 | <input class="form-control" id="filename_${h.FID('f',file.path)}" name="files" size="30" type="text" value="${h.safe_unicode(file.path)}"> |
|
71 | <input class="form-control" id="filename_${h.FID('f',file.path)}" name="files" size="30" type="text" value="${h.safe_unicode(file.path)}"> | |
72 | <select class="form-control" id="mimetype_${h.FID('f',file.path)}" name="mimetypes"></select> |
|
72 | <select class="form-control" id="mimetype_${h.FID('f',file.path)}" name="mimetypes"></select> | |
73 | </div> |
|
73 | </div> | |
74 | <div class="panel-body no-padding"> |
|
74 | <div class="panel-body no-padding"> | |
75 | <div id="editor_container"> |
|
75 | <div id="editor_container"> | |
76 | <textarea id="editor_${h.FID('f',file.path)}" name="contents" style="display:none">${file.content}</textarea> |
|
76 | <textarea id="editor_${h.FID('f',file.path)}" name="contents" style="display:none">${safe_unicode(file.content)}</textarea> | |
77 | </div> |
|
77 | </div> | |
78 | </div> |
|
78 | </div> | |
79 | </div> |
|
79 | </div> | |
80 |
|
80 | |||
81 | ## dynamic edit box. |
|
81 | ## dynamic edit box. | |
82 | <script type="text/javascript"> |
|
82 | <script type="text/javascript"> | |
83 | $(document).ready(function(){ |
|
83 | $(document).ready(function(){ | |
84 | var myCodeMirror = initCodeMirror(${h.js('editor_' + h.FID('f',file.path))}, ${h.jshtml(request.script_name)}, ''); |
|
84 | var myCodeMirror = initCodeMirror(${h.js('editor_' + h.FID('f',file.path))}, ${h.jshtml(request.script_name)}, ''); | |
85 |
|
85 | |||
86 | //inject new modes |
|
86 | //inject new modes | |
87 | var $mimetype_select = $(${h.js('#mimetype_' + h.FID('f',file.path))}); |
|
87 | var $mimetype_select = $(${h.js('#mimetype_' + h.FID('f',file.path))}); | |
88 | $mimetype_select.each(function(){ |
|
88 | $mimetype_select.each(function(){ | |
89 | var modes_select = this; |
|
89 | var modes_select = this; | |
90 | var index = 1; |
|
90 | var index = 1; | |
91 | for(var i=0;i<CodeMirror.modeInfo.length;i++) { |
|
91 | for(var i=0;i<CodeMirror.modeInfo.length;i++) { | |
92 | var m = CodeMirror.modeInfo[i]; |
|
92 | var m = CodeMirror.modeInfo[i]; | |
93 | var opt = new Option(m.name, m.mime); |
|
93 | var opt = new Option(m.name, m.mime); | |
94 | $(opt).attr('mode', m.mode); |
|
94 | $(opt).attr('mode', m.mode); | |
95 | if (m.mime == 'text/plain') { |
|
95 | if (m.mime == 'text/plain') { | |
96 | // default plain text |
|
96 | // default plain text | |
97 | $(opt).prop('selected', true); |
|
97 | $(opt).prop('selected', true); | |
98 | modes_select.options[0] = opt; |
|
98 | modes_select.options[0] = opt; | |
99 | } else { |
|
99 | } else { | |
100 | modes_select.options[index++] = opt; |
|
100 | modes_select.options[index++] = opt; | |
101 | } |
|
101 | } | |
102 | } |
|
102 | } | |
103 | }); |
|
103 | }); | |
104 |
|
104 | |||
105 | var $filename_input = $(${h.js('#filename_' + h.FID('f',file.path))}); |
|
105 | var $filename_input = $(${h.js('#filename_' + h.FID('f',file.path))}); | |
106 | // on select change set new mode |
|
106 | // on select change set new mode | |
107 | $mimetype_select.change(function(e){ |
|
107 | $mimetype_select.change(function(e){ | |
108 | var selected = e.currentTarget; |
|
108 | var selected = e.currentTarget; | |
109 | var node = selected.options[selected.selectedIndex]; |
|
109 | var node = selected.options[selected.selectedIndex]; | |
110 | var detected_mode = CodeMirror.findModeByMIME(node.value); |
|
110 | var detected_mode = CodeMirror.findModeByMIME(node.value); | |
111 | setCodeMirrorMode(myCodeMirror, detected_mode); |
|
111 | setCodeMirrorMode(myCodeMirror, detected_mode); | |
112 |
|
112 | |||
113 | var proposed_ext = CodeMirror.findExtensionByMode(detected_mode); |
|
113 | var proposed_ext = CodeMirror.findExtensionByMode(detected_mode); | |
114 | var file_data = CodeMirror.getFilenameAndExt($filename_input.val()); |
|
114 | var file_data = CodeMirror.getFilenameAndExt($filename_input.val()); | |
115 | var filename = file_data['filename'] || 'filename1'; |
|
115 | var filename = file_data['filename'] || 'filename1'; | |
116 | $filename_input.val(filename + '.' + proposed_ext); |
|
116 | $filename_input.val(filename + '.' + proposed_ext); | |
117 | }); |
|
117 | }); | |
118 |
|
118 | |||
119 | // on type the new filename set mode |
|
119 | // on type the new filename set mode | |
120 | $filename_input.keyup(function(e){ |
|
120 | $filename_input.keyup(function(e){ | |
121 | var file_data = CodeMirror.getFilenameAndExt(this.value); |
|
121 | var file_data = CodeMirror.getFilenameAndExt(this.value); | |
122 | if(file_data['ext'] != null){ |
|
122 | if(file_data['ext'] != null){ | |
123 | var detected_mode = CodeMirror.findModeByExtension(file_data['ext']) || CodeMirror.findModeByMIME('text/plain'); |
|
123 | var detected_mode = CodeMirror.findModeByExtension(file_data['ext']) || CodeMirror.findModeByMIME('text/plain'); | |
124 |
|
124 | |||
125 | if (detected_mode){ |
|
125 | if (detected_mode){ | |
126 | setCodeMirrorMode(myCodeMirror, detected_mode); |
|
126 | setCodeMirrorMode(myCodeMirror, detected_mode); | |
127 | $mimetype_select.val(detected_mode.mime); |
|
127 | $mimetype_select.val(detected_mode.mime); | |
128 | } |
|
128 | } | |
129 | } |
|
129 | } | |
130 | }); |
|
130 | }); | |
131 |
|
131 | |||
132 | // set mode on page load |
|
132 | // set mode on page load | |
133 | var detected_mode = CodeMirror.findModeByExtension(${h.js(file.extension)}); |
|
133 | var detected_mode = CodeMirror.findModeByExtension(${h.js(file.extension)}); | |
134 |
|
134 | |||
135 | if (detected_mode){ |
|
135 | if (detected_mode){ | |
136 | setCodeMirrorMode(myCodeMirror, detected_mode); |
|
136 | setCodeMirrorMode(myCodeMirror, detected_mode); | |
137 | $mimetype_select.val(detected_mode.mime); |
|
137 | $mimetype_select.val(detected_mode.mime); | |
138 | } |
|
138 | } | |
139 | }); |
|
139 | }); | |
140 | </script> |
|
140 | </script> | |
141 |
|
141 | |||
142 | %endfor |
|
142 | %endfor | |
143 |
|
143 | |||
144 | <div> |
|
144 | <div> | |
145 | ${h.submit('update',_('Update Gist'),class_="btn btn-success")} |
|
145 | ${h.submit('update',_('Update Gist'),class_="btn btn-success")} | |
146 | <a class="btn btn-default" href="${h.url('gist', gist_id=c.gist.gist_access_id)}">${_('Cancel')}</a> |
|
146 | <a class="btn btn-default" href="${h.url('gist', gist_id=c.gist.gist_access_id)}">${_('Cancel')}</a> | |
147 | </div> |
|
147 | </div> | |
148 | ${h.end_form()} |
|
148 | ${h.end_form()} | |
149 | <script> |
|
149 | <script> | |
150 | $('#update').on('click', function(e){ |
|
150 | $('#update').on('click', function(e){ | |
151 | e.preventDefault(); |
|
151 | e.preventDefault(); | |
152 |
|
152 | |||
153 | // check for newer version. |
|
153 | // check for newer version. | |
154 | $.ajax({ |
|
154 | $.ajax({ | |
155 | url: ${h.js(h.url('edit_gist_check_revision', gist_id=c.gist.gist_access_id))}, |
|
155 | url: ${h.js(h.url('edit_gist_check_revision', gist_id=c.gist.gist_access_id))}, | |
156 | data: {'revision': ${h.js(c.file_changeset.raw_id)}, '_session_csrf_secret_token': _session_csrf_secret_token}, |
|
156 | data: {'revision': ${h.js(c.file_changeset.raw_id)}, '_session_csrf_secret_token': _session_csrf_secret_token}, | |
157 | dataType: 'json', |
|
157 | dataType: 'json', | |
158 | type: 'POST', |
|
158 | type: 'POST', | |
159 | success: function(data) { |
|
159 | success: function(data) { | |
160 | if(data.success == false){ |
|
160 | if(data.success == false){ | |
161 | $('#edit_error').show(); |
|
161 | $('#edit_error').show(); | |
162 | } |
|
162 | } | |
163 | else{ |
|
163 | else{ | |
164 | $('#eform').submit(); |
|
164 | $('#eform').submit(); | |
165 | } |
|
165 | } | |
166 | } |
|
166 | } | |
167 | }); |
|
167 | }); | |
168 | }); |
|
168 | }); | |
169 | </script> |
|
169 | </script> | |
170 | </div> |
|
170 | </div> | |
171 | </div> |
|
171 | </div> | |
172 |
|
172 | |||
173 | </div> |
|
173 | </div> | |
174 | </%def> |
|
174 | </%def> |
@@ -1,118 +1,118 b'' | |||||
1 | <%inherit file="/base/base.html"/> |
|
1 | <%inherit file="/base/base.html"/> | |
2 |
|
2 | |||
3 | <%block name="title"> |
|
3 | <%block name="title"> | |
4 | ${_('%s File Edit') % c.repo_name} |
|
4 | ${_('%s File Edit') % c.repo_name} | |
5 | </%block> |
|
5 | </%block> | |
6 |
|
6 | |||
7 | <%block name="js_extra"> |
|
7 | <%block name="js_extra"> | |
8 | <script type="text/javascript" src="${h.url('/codemirror/lib/codemirror.js')}"></script> |
|
8 | <script type="text/javascript" src="${h.url('/codemirror/lib/codemirror.js')}"></script> | |
9 | <script type="text/javascript" src="${h.url('/js/codemirror_loadmode.js')}"></script> |
|
9 | <script type="text/javascript" src="${h.url('/js/codemirror_loadmode.js')}"></script> | |
10 | <script type="text/javascript" src="${h.url('/codemirror/mode/meta.js')}"></script> |
|
10 | <script type="text/javascript" src="${h.url('/codemirror/mode/meta.js')}"></script> | |
11 | </%block> |
|
11 | </%block> | |
12 | <%block name="css_extra"> |
|
12 | <%block name="css_extra"> | |
13 | <link rel="stylesheet" type="text/css" href="${h.url('/codemirror/lib/codemirror.css')}"/> |
|
13 | <link rel="stylesheet" type="text/css" href="${h.url('/codemirror/lib/codemirror.css')}"/> | |
14 | </%block> |
|
14 | </%block> | |
15 |
|
15 | |||
16 | <%block name="header_menu"> |
|
16 | <%block name="header_menu"> | |
17 | ${self.menu('repositories')} |
|
17 | ${self.menu('repositories')} | |
18 | </%block> |
|
18 | </%block> | |
19 |
|
19 | |||
20 | <%def name="breadcrumbs_links()"> |
|
20 | <%def name="breadcrumbs_links()"> | |
21 | ${_('Edit file')} @ ${h.show_id(c.cs)} |
|
21 | ${_('Edit file')} @ ${h.show_id(c.cs)} | |
22 | </%def> |
|
22 | </%def> | |
23 |
|
23 | |||
24 | <%def name="main()"> |
|
24 | <%def name="main()"> | |
25 | ${self.repo_context_bar('files')} |
|
25 | ${self.repo_context_bar('files')} | |
26 | <div class="panel panel-primary"> |
|
26 | <div class="panel panel-primary"> | |
27 | <div class="panel-heading clearfix"> |
|
27 | <div class="panel-heading clearfix"> | |
28 | <div class="pull-left"> |
|
28 | <div class="pull-left"> | |
29 | ${self.breadcrumbs()} |
|
29 | ${self.breadcrumbs()} | |
30 | </div> |
|
30 | </div> | |
31 | <div class="pull-right"> |
|
31 | <div class="pull-right"> | |
32 | <a href="#">${_('Branch')}: ${c.cs.branch}</a> |
|
32 | <a href="#">${_('Branch')}: ${c.cs.branch}</a> | |
33 | </div> |
|
33 | </div> | |
34 | </div> |
|
34 | </div> | |
35 | <div class="panel-body" id="edit"> |
|
35 | <div class="panel-body" id="edit"> | |
36 | <div id="files_data"> |
|
36 | <div id="files_data"> | |
37 | ${h.form(h.url.current(),method='post',id='eform')} |
|
37 | ${h.form(h.url.current(),method='post',id='eform')} | |
38 | <h3 class="files_location"> |
|
38 | <h3 class="files_location"> | |
39 | ${_('Location')}: ${h.files_breadcrumbs(c.repo_name,c.cs.raw_id,c.file.path)} |
|
39 | ${_('Location')}: ${h.files_breadcrumbs(c.repo_name,c.cs.raw_id,c.file.path)} | |
40 | </h3> |
|
40 | </h3> | |
41 | <div id="body" class="panel panel-default"> |
|
41 | <div id="body" class="panel panel-default"> | |
42 | <div class="panel-heading clearfix form-inline form-group-sm"> |
|
42 | <div class="panel-heading clearfix form-inline form-group-sm"> | |
43 | <span> |
|
43 | <span> | |
44 | <span><i class="icon-doc-inv"></i></span> |
|
44 | <span><i class="icon-doc-inv"></i></span> | |
45 | <span>${h.link_to(h.show_id(c.file.changeset),h.url('changeset_home',repo_name=c.repo_name,revision=c.file.changeset.raw_id))}</span> |
|
45 | <span>${h.link_to(h.show_id(c.file.changeset),h.url('changeset_home',repo_name=c.repo_name,revision=c.file.changeset.raw_id))}</span> | |
46 | <span>${h.format_byte_size(c.file.size,binary=True)}</span> |
|
46 | <span>${h.format_byte_size(c.file.size,binary=True)}</span> | |
47 | <span>${c.file.mimetype}</span> |
|
47 | <span>${c.file.mimetype}</span> | |
48 | <select class="form-control" id="mimetype" name="mimetype"></select> |
|
48 | <select class="form-control" id="mimetype" name="mimetype"></select> | |
49 | </span> |
|
49 | </span> | |
50 | <span class="pull-right buttons"> |
|
50 | <span class="pull-right buttons"> | |
51 | ${h.link_to(_('Show Annotation'),h.url('files_annotate_home',repo_name=c.repo_name,revision=c.cs.raw_id,f_path=c.f_path),class_="btn btn-default btn-xs")} |
|
51 | ${h.link_to(_('Show Annotation'),h.url('files_annotate_home',repo_name=c.repo_name,revision=c.cs.raw_id,f_path=c.f_path),class_="btn btn-default btn-xs")} | |
52 | ${h.link_to(_('Show as Raw'),h.url('files_raw_home',repo_name=c.repo_name,revision=c.cs.raw_id,f_path=c.f_path),class_="btn btn-default btn-xs")} |
|
52 | ${h.link_to(_('Show as Raw'),h.url('files_raw_home',repo_name=c.repo_name,revision=c.cs.raw_id,f_path=c.f_path),class_="btn btn-default btn-xs")} | |
53 | ${h.link_to(_('Download as Raw'),h.url('files_rawfile_home',repo_name=c.repo_name,revision=c.cs.raw_id,f_path=c.f_path),class_="btn btn-default btn-xs")} |
|
53 | ${h.link_to(_('Download as Raw'),h.url('files_rawfile_home',repo_name=c.repo_name,revision=c.cs.raw_id,f_path=c.f_path),class_="btn btn-default btn-xs")} | |
54 | % if h.HasRepoPermissionLevel('write')(c.repo_name): |
|
54 | % if h.HasRepoPermissionLevel('write')(c.repo_name): | |
55 | % if not c.file.is_binary: |
|
55 | % if not c.file.is_binary: | |
56 | ${h.link_to(_('Source'),h.url('files_home',repo_name=c.repo_name,revision=c.cs.raw_id,f_path=c.f_path),class_="btn btn-default btn-xs")} |
|
56 | ${h.link_to(_('Source'),h.url('files_home',repo_name=c.repo_name,revision=c.cs.raw_id,f_path=c.f_path),class_="btn btn-default btn-xs")} | |
57 | % endif |
|
57 | % endif | |
58 | % endif |
|
58 | % endif | |
59 | </span> |
|
59 | </span> | |
60 | </div> |
|
60 | </div> | |
61 | <div class="panel-body no-padding"> |
|
61 | <div class="panel-body no-padding"> | |
62 | <textarea id="editor" name="content" style="display:none">${h.escape(c.file.content)|n}</textarea> |
|
62 | <textarea id="editor" name="content" style="display:none">${h.escape(safe_unicode(c.file.content))|n}</textarea> | |
63 | </div> |
|
63 | </div> | |
64 | </div> |
|
64 | </div> | |
65 | <div> |
|
65 | <div> | |
66 | <div class="form-group"> |
|
66 | <div class="form-group"> | |
67 | <label>${_('Commit Message')}</label> |
|
67 | <label>${_('Commit Message')}</label> | |
68 | <textarea class="form-control" id="commit" name="message" placeholder="${c.default_message}"></textarea> |
|
68 | <textarea class="form-control" id="commit" name="message" placeholder="${c.default_message}"></textarea> | |
69 | </div> |
|
69 | </div> | |
70 | <div class="form-group buttons"> |
|
70 | <div class="form-group buttons"> | |
71 | ${h.submit('commit',_('Commit Changes'),class_="btn btn-success")} |
|
71 | ${h.submit('commit',_('Commit Changes'),class_="btn btn-success")} | |
72 | ${h.reset('reset',_('Reset'),class_="btn btn-default")} |
|
72 | ${h.reset('reset',_('Reset'),class_="btn btn-default")} | |
73 | </div> |
|
73 | </div> | |
74 | </div> |
|
74 | </div> | |
75 | ${h.end_form()} |
|
75 | ${h.end_form()} | |
76 | </div> |
|
76 | </div> | |
77 | </div> |
|
77 | </div> | |
78 | </div> |
|
78 | </div> | |
79 |
|
79 | |||
80 | <script type="text/javascript"> |
|
80 | <script type="text/javascript"> | |
81 | $(document).ready(function(){ |
|
81 | $(document).ready(function(){ | |
82 | var reset_url = ${h.jshtml(h.url('files_home',repo_name=c.repo_name,revision=c.cs.raw_id,f_path=c.file.path))}; |
|
82 | var reset_url = ${h.jshtml(h.url('files_home',repo_name=c.repo_name,revision=c.cs.raw_id,f_path=c.file.path))}; | |
83 | var myCodeMirror = initCodeMirror('editor', ${h.jshtml(request.script_name)}, reset_url); |
|
83 | var myCodeMirror = initCodeMirror('editor', ${h.jshtml(request.script_name)}, reset_url); | |
84 |
|
84 | |||
85 | //inject new modes, based on codeMirrors modeInfo object |
|
85 | //inject new modes, based on codeMirrors modeInfo object | |
86 | var $mimetype_select = $('#mimetype'); |
|
86 | var $mimetype_select = $('#mimetype'); | |
87 | $mimetype_select.each(function(){ |
|
87 | $mimetype_select.each(function(){ | |
88 | var modes_select = this; |
|
88 | var modes_select = this; | |
89 | var index = 1; |
|
89 | var index = 1; | |
90 | for(var i=0;i<CodeMirror.modeInfo.length;i++){ |
|
90 | for(var i=0;i<CodeMirror.modeInfo.length;i++){ | |
91 | var m = CodeMirror.modeInfo[i]; |
|
91 | var m = CodeMirror.modeInfo[i]; | |
92 | var opt = new Option(m.name, m.mime); |
|
92 | var opt = new Option(m.name, m.mime); | |
93 | $(opt).attr('mode', m.mode); |
|
93 | $(opt).attr('mode', m.mode); | |
94 | if (m.mime == 'text/plain') { |
|
94 | if (m.mime == 'text/plain') { | |
95 | // default plain text |
|
95 | // default plain text | |
96 | $(opt).prop('selected', true); |
|
96 | $(opt).prop('selected', true); | |
97 | modes_select.options[0] = opt; |
|
97 | modes_select.options[0] = opt; | |
98 | } else { |
|
98 | } else { | |
99 | modes_select.options[index++] = opt; |
|
99 | modes_select.options[index++] = opt; | |
100 | } |
|
100 | } | |
101 | } |
|
101 | } | |
102 | }); |
|
102 | }); | |
103 | // try to detect the mode based on the file we edit |
|
103 | // try to detect the mode based on the file we edit | |
104 | var detected_mode = CodeMirror.findModeByExtension(${h.js(c.file.extension)}); |
|
104 | var detected_mode = CodeMirror.findModeByExtension(${h.js(c.file.extension)}); | |
105 | if(detected_mode){ |
|
105 | if(detected_mode){ | |
106 | setCodeMirrorMode(myCodeMirror, detected_mode); |
|
106 | setCodeMirrorMode(myCodeMirror, detected_mode); | |
107 | $($mimetype_select.find('option[value="'+detected_mode.mime+'"]')[0]).prop('selected', true); |
|
107 | $($mimetype_select.find('option[value="'+detected_mode.mime+'"]')[0]).prop('selected', true); | |
108 | } |
|
108 | } | |
109 |
|
109 | |||
110 | $mimetype_select.on('change', function(e){ |
|
110 | $mimetype_select.on('change', function(e){ | |
111 | var selected = e.currentTarget; |
|
111 | var selected = e.currentTarget; | |
112 | var node = selected.options[selected.selectedIndex]; |
|
112 | var node = selected.options[selected.selectedIndex]; | |
113 | var detected_mode = CodeMirror.findModeByMIME(node.value); |
|
113 | var detected_mode = CodeMirror.findModeByMIME(node.value); | |
114 | setCodeMirrorMode(myCodeMirror, detected_mode); |
|
114 | setCodeMirrorMode(myCodeMirror, detected_mode); | |
115 | }); |
|
115 | }); | |
116 | }); |
|
116 | }); | |
117 | </script> |
|
117 | </script> | |
118 | </%def> |
|
118 | </%def> |
@@ -1,844 +1,844 b'' | |||||
1 | import datetime |
|
1 | import datetime | |
2 | import os |
|
2 | import os | |
3 | import sys |
|
3 | import sys | |
4 | import urllib2 |
|
4 | import urllib2 | |
5 |
|
5 | |||
6 | import mock |
|
6 | import mock | |
7 | import pytest |
|
7 | import pytest | |
8 |
|
8 | |||
9 | from kallithea.lib.vcs.backends.git import GitChangeset, GitRepository |
|
9 | from kallithea.lib.vcs.backends.git import GitChangeset, GitRepository | |
10 | from kallithea.lib.vcs.exceptions import NodeDoesNotExistError, RepositoryError, VCSError |
|
10 | from kallithea.lib.vcs.exceptions import NodeDoesNotExistError, RepositoryError, VCSError | |
11 | from kallithea.lib.vcs.nodes import DirNode, FileNode, NodeKind, NodeState |
|
11 | from kallithea.lib.vcs.nodes import DirNode, FileNode, NodeKind, NodeState | |
12 | from kallithea.model.scm import ScmModel |
|
12 | from kallithea.model.scm import ScmModel | |
13 | from kallithea.tests.vcs.base import _BackendTestMixin |
|
13 | from kallithea.tests.vcs.base import _BackendTestMixin | |
14 | from kallithea.tests.vcs.conf import TEST_GIT_REPO, TEST_GIT_REPO_CLONE, TESTS_TMP_PATH, get_new_dir |
|
14 | from kallithea.tests.vcs.conf import TEST_GIT_REPO, TEST_GIT_REPO_CLONE, TESTS_TMP_PATH, get_new_dir | |
15 |
|
15 | |||
16 |
|
16 | |||
17 | class TestGitRepository(object): |
|
17 | class TestGitRepository(object): | |
18 |
|
18 | |||
19 | def __check_for_existing_repo(self): |
|
19 | def __check_for_existing_repo(self): | |
20 | if os.path.exists(TEST_GIT_REPO_CLONE): |
|
20 | if os.path.exists(TEST_GIT_REPO_CLONE): | |
21 | pytest.fail('Cannot test git clone repo as location %s already ' |
|
21 | pytest.fail('Cannot test git clone repo as location %s already ' | |
22 | 'exists. You should manually remove it first.' |
|
22 | 'exists. You should manually remove it first.' | |
23 | % TEST_GIT_REPO_CLONE) |
|
23 | % TEST_GIT_REPO_CLONE) | |
24 |
|
24 | |||
25 | def setup_method(self): |
|
25 | def setup_method(self): | |
26 | self.repo = GitRepository(TEST_GIT_REPO) |
|
26 | self.repo = GitRepository(TEST_GIT_REPO) | |
27 |
|
27 | |||
28 | def test_wrong_repo_path(self): |
|
28 | def test_wrong_repo_path(self): | |
29 | wrong_repo_path = os.path.join(TESTS_TMP_PATH, 'errorrepo') |
|
29 | wrong_repo_path = os.path.join(TESTS_TMP_PATH, 'errorrepo') | |
30 | with pytest.raises(RepositoryError): |
|
30 | with pytest.raises(RepositoryError): | |
31 | GitRepository(wrong_repo_path) |
|
31 | GitRepository(wrong_repo_path) | |
32 |
|
32 | |||
33 | def test_git_cmd_injection(self): |
|
33 | def test_git_cmd_injection(self): | |
34 | repo_inject_path = TEST_GIT_REPO + '; echo "Cake";' |
|
34 | repo_inject_path = TEST_GIT_REPO + '; echo "Cake";' | |
35 | with pytest.raises(urllib2.URLError): |
|
35 | with pytest.raises(urllib2.URLError): | |
36 | # Should fail because URL will contain the parts after ; too |
|
36 | # Should fail because URL will contain the parts after ; too | |
37 | GitRepository(get_new_dir('injection-repo'), src_url=repo_inject_path, update_after_clone=True, create=True) |
|
37 | GitRepository(get_new_dir('injection-repo'), src_url=repo_inject_path, update_after_clone=True, create=True) | |
38 |
|
38 | |||
39 | with pytest.raises(RepositoryError): |
|
39 | with pytest.raises(RepositoryError): | |
40 | # Should fail on direct clone call, which as of this writing does not happen outside of class |
|
40 | # Should fail on direct clone call, which as of this writing does not happen outside of class | |
41 | clone_fail_repo = GitRepository(get_new_dir('injection-repo'), create=True) |
|
41 | clone_fail_repo = GitRepository(get_new_dir('injection-repo'), create=True) | |
42 | clone_fail_repo.clone(repo_inject_path, update_after_clone=True,) |
|
42 | clone_fail_repo.clone(repo_inject_path, update_after_clone=True,) | |
43 |
|
43 | |||
44 | # Verify correct quoting of evil characters that should work on posix file systems |
|
44 | # Verify correct quoting of evil characters that should work on posix file systems | |
45 | if sys.platform == 'win32': |
|
45 | if sys.platform == 'win32': | |
46 | # windows does not allow '"' in dir names |
|
46 | # windows does not allow '"' in dir names | |
47 | # and some versions of the git client don't like ` and ' |
|
47 | # and some versions of the git client don't like ` and ' | |
48 | tricky_path = get_new_dir("tricky-path-repo-$") |
|
48 | tricky_path = get_new_dir("tricky-path-repo-$") | |
49 | else: |
|
49 | else: | |
50 | tricky_path = get_new_dir("tricky-path-repo-$'\"`") |
|
50 | tricky_path = get_new_dir("tricky-path-repo-$'\"`") | |
51 | successfully_cloned = GitRepository(tricky_path, src_url=TEST_GIT_REPO, update_after_clone=True, create=True) |
|
51 | successfully_cloned = GitRepository(tricky_path, src_url=TEST_GIT_REPO, update_after_clone=True, create=True) | |
52 | # Repo should have been created |
|
52 | # Repo should have been created | |
53 | assert not successfully_cloned._repo.bare |
|
53 | assert not successfully_cloned._repo.bare | |
54 |
|
54 | |||
55 | if sys.platform == 'win32': |
|
55 | if sys.platform == 'win32': | |
56 | # windows does not allow '"' in dir names |
|
56 | # windows does not allow '"' in dir names | |
57 | # and some versions of the git client don't like ` and ' |
|
57 | # and some versions of the git client don't like ` and ' | |
58 | tricky_path_2 = get_new_dir("tricky-path-2-repo-$") |
|
58 | tricky_path_2 = get_new_dir("tricky-path-2-repo-$") | |
59 | else: |
|
59 | else: | |
60 | tricky_path_2 = get_new_dir("tricky-path-2-repo-$'\"`") |
|
60 | tricky_path_2 = get_new_dir("tricky-path-2-repo-$'\"`") | |
61 | successfully_cloned2 = GitRepository(tricky_path_2, src_url=tricky_path, bare=True, create=True) |
|
61 | successfully_cloned2 = GitRepository(tricky_path_2, src_url=tricky_path, bare=True, create=True) | |
62 | # Repo should have been created and thus used correct quoting for clone |
|
62 | # Repo should have been created and thus used correct quoting for clone | |
63 | assert successfully_cloned2._repo.bare |
|
63 | assert successfully_cloned2._repo.bare | |
64 |
|
64 | |||
65 | # Should pass because URL has been properly quoted |
|
65 | # Should pass because URL has been properly quoted | |
66 | successfully_cloned.pull(tricky_path_2) |
|
66 | successfully_cloned.pull(tricky_path_2) | |
67 | successfully_cloned2.fetch(tricky_path) |
|
67 | successfully_cloned2.fetch(tricky_path) | |
68 |
|
68 | |||
69 | def test_repo_create_with_spaces_in_path(self): |
|
69 | def test_repo_create_with_spaces_in_path(self): | |
70 | repo_path = get_new_dir("path with spaces") |
|
70 | repo_path = get_new_dir("path with spaces") | |
71 | repo = GitRepository(repo_path, src_url=None, bare=True, create=True) |
|
71 | repo = GitRepository(repo_path, src_url=None, bare=True, create=True) | |
72 | # Repo should have been created |
|
72 | # Repo should have been created | |
73 | assert repo._repo.bare |
|
73 | assert repo._repo.bare | |
74 |
|
74 | |||
75 | def test_repo_clone(self): |
|
75 | def test_repo_clone(self): | |
76 | self.__check_for_existing_repo() |
|
76 | self.__check_for_existing_repo() | |
77 | repo = GitRepository(TEST_GIT_REPO) |
|
77 | repo = GitRepository(TEST_GIT_REPO) | |
78 | repo_clone = GitRepository(TEST_GIT_REPO_CLONE, |
|
78 | repo_clone = GitRepository(TEST_GIT_REPO_CLONE, | |
79 | src_url=TEST_GIT_REPO, create=True, update_after_clone=True) |
|
79 | src_url=TEST_GIT_REPO, create=True, update_after_clone=True) | |
80 | assert len(repo.revisions) == len(repo_clone.revisions) |
|
80 | assert len(repo.revisions) == len(repo_clone.revisions) | |
81 | # Checking hashes of changesets should be enough |
|
81 | # Checking hashes of changesets should be enough | |
82 | for changeset in repo.get_changesets(): |
|
82 | for changeset in repo.get_changesets(): | |
83 | raw_id = changeset.raw_id |
|
83 | raw_id = changeset.raw_id | |
84 | assert raw_id == repo_clone.get_changeset(raw_id).raw_id |
|
84 | assert raw_id == repo_clone.get_changeset(raw_id).raw_id | |
85 |
|
85 | |||
86 | def test_repo_clone_with_spaces_in_path(self): |
|
86 | def test_repo_clone_with_spaces_in_path(self): | |
87 | repo_path = get_new_dir("path with spaces") |
|
87 | repo_path = get_new_dir("path with spaces") | |
88 | successfully_cloned = GitRepository(repo_path, src_url=TEST_GIT_REPO, update_after_clone=True, create=True) |
|
88 | successfully_cloned = GitRepository(repo_path, src_url=TEST_GIT_REPO, update_after_clone=True, create=True) | |
89 | # Repo should have been created |
|
89 | # Repo should have been created | |
90 | assert not successfully_cloned._repo.bare |
|
90 | assert not successfully_cloned._repo.bare | |
91 |
|
91 | |||
92 | successfully_cloned.pull(TEST_GIT_REPO) |
|
92 | successfully_cloned.pull(TEST_GIT_REPO) | |
93 | self.repo.fetch(repo_path) |
|
93 | self.repo.fetch(repo_path) | |
94 |
|
94 | |||
95 | def test_repo_clone_without_create(self): |
|
95 | def test_repo_clone_without_create(self): | |
96 | with pytest.raises(RepositoryError): |
|
96 | with pytest.raises(RepositoryError): | |
97 | GitRepository(TEST_GIT_REPO_CLONE + '_wo_create', src_url=TEST_GIT_REPO) |
|
97 | GitRepository(TEST_GIT_REPO_CLONE + '_wo_create', src_url=TEST_GIT_REPO) | |
98 |
|
98 | |||
99 | def test_repo_clone_with_update(self): |
|
99 | def test_repo_clone_with_update(self): | |
100 | repo = GitRepository(TEST_GIT_REPO) |
|
100 | repo = GitRepository(TEST_GIT_REPO) | |
101 | clone_path = TEST_GIT_REPO_CLONE + '_with_update' |
|
101 | clone_path = TEST_GIT_REPO_CLONE + '_with_update' | |
102 | repo_clone = GitRepository(clone_path, |
|
102 | repo_clone = GitRepository(clone_path, | |
103 | create=True, src_url=TEST_GIT_REPO, update_after_clone=True) |
|
103 | create=True, src_url=TEST_GIT_REPO, update_after_clone=True) | |
104 | assert len(repo.revisions) == len(repo_clone.revisions) |
|
104 | assert len(repo.revisions) == len(repo_clone.revisions) | |
105 |
|
105 | |||
106 | # check if current workdir was updated |
|
106 | # check if current workdir was updated | |
107 | fpath = os.path.join(clone_path, 'MANIFEST.in') |
|
107 | fpath = os.path.join(clone_path, 'MANIFEST.in') | |
108 | assert os.path.isfile(fpath) == True, 'Repo was cloned and updated but file %s could not be found' % fpath |
|
108 | assert os.path.isfile(fpath) == True, 'Repo was cloned and updated but file %s could not be found' % fpath | |
109 |
|
109 | |||
110 | def test_repo_clone_without_update(self): |
|
110 | def test_repo_clone_without_update(self): | |
111 | repo = GitRepository(TEST_GIT_REPO) |
|
111 | repo = GitRepository(TEST_GIT_REPO) | |
112 | clone_path = TEST_GIT_REPO_CLONE + '_without_update' |
|
112 | clone_path = TEST_GIT_REPO_CLONE + '_without_update' | |
113 | repo_clone = GitRepository(clone_path, |
|
113 | repo_clone = GitRepository(clone_path, | |
114 | create=True, src_url=TEST_GIT_REPO, update_after_clone=False) |
|
114 | create=True, src_url=TEST_GIT_REPO, update_after_clone=False) | |
115 | assert len(repo.revisions) == len(repo_clone.revisions) |
|
115 | assert len(repo.revisions) == len(repo_clone.revisions) | |
116 | # check if current workdir was *NOT* updated |
|
116 | # check if current workdir was *NOT* updated | |
117 | fpath = os.path.join(clone_path, 'MANIFEST.in') |
|
117 | fpath = os.path.join(clone_path, 'MANIFEST.in') | |
118 | # Make sure it's not bare repo |
|
118 | # Make sure it's not bare repo | |
119 | assert not repo_clone._repo.bare |
|
119 | assert not repo_clone._repo.bare | |
120 | assert os.path.isfile(fpath) == False, 'Repo was cloned and updated but file %s was found' % fpath |
|
120 | assert os.path.isfile(fpath) == False, 'Repo was cloned and updated but file %s was found' % fpath | |
121 |
|
121 | |||
122 | def test_repo_clone_into_bare_repo(self): |
|
122 | def test_repo_clone_into_bare_repo(self): | |
123 | repo = GitRepository(TEST_GIT_REPO) |
|
123 | repo = GitRepository(TEST_GIT_REPO) | |
124 | clone_path = TEST_GIT_REPO_CLONE + '_bare.git' |
|
124 | clone_path = TEST_GIT_REPO_CLONE + '_bare.git' | |
125 | repo_clone = GitRepository(clone_path, create=True, |
|
125 | repo_clone = GitRepository(clone_path, create=True, | |
126 | src_url=repo.path, bare=True) |
|
126 | src_url=repo.path, bare=True) | |
127 | assert repo_clone._repo.bare |
|
127 | assert repo_clone._repo.bare | |
128 |
|
128 | |||
129 | def test_create_repo_is_not_bare_by_default(self): |
|
129 | def test_create_repo_is_not_bare_by_default(self): | |
130 | repo = GitRepository(get_new_dir('not-bare-by-default'), create=True) |
|
130 | repo = GitRepository(get_new_dir('not-bare-by-default'), create=True) | |
131 | assert not repo._repo.bare |
|
131 | assert not repo._repo.bare | |
132 |
|
132 | |||
133 | def test_create_bare_repo(self): |
|
133 | def test_create_bare_repo(self): | |
134 | repo = GitRepository(get_new_dir('bare-repo'), create=True, bare=True) |
|
134 | repo = GitRepository(get_new_dir('bare-repo'), create=True, bare=True) | |
135 | assert repo._repo.bare |
|
135 | assert repo._repo.bare | |
136 |
|
136 | |||
137 | def test_revisions(self): |
|
137 | def test_revisions(self): | |
138 | # there are 112 revisions (by now) |
|
138 | # there are 112 revisions (by now) | |
139 | # so we can assume they would be available from now on |
|
139 | # so we can assume they would be available from now on | |
140 | subset = set([ |
|
140 | subset = set([ | |
141 | 'c1214f7e79e02fc37156ff215cd71275450cffc3', |
|
141 | 'c1214f7e79e02fc37156ff215cd71275450cffc3', | |
142 | '38b5fe81f109cb111f549bfe9bb6b267e10bc557', |
|
142 | '38b5fe81f109cb111f549bfe9bb6b267e10bc557', | |
143 | 'fa6600f6848800641328adbf7811fd2372c02ab2', |
|
143 | 'fa6600f6848800641328adbf7811fd2372c02ab2', | |
144 | '102607b09cdd60e2793929c4f90478be29f85a17', |
|
144 | '102607b09cdd60e2793929c4f90478be29f85a17', | |
145 | '49d3fd156b6f7db46313fac355dca1a0b94a0017', |
|
145 | '49d3fd156b6f7db46313fac355dca1a0b94a0017', | |
146 | '2d1028c054665b962fa3d307adfc923ddd528038', |
|
146 | '2d1028c054665b962fa3d307adfc923ddd528038', | |
147 | 'd7e0d30fbcae12c90680eb095a4f5f02505ce501', |
|
147 | 'd7e0d30fbcae12c90680eb095a4f5f02505ce501', | |
148 | 'ff7ca51e58c505fec0dd2491de52c622bb7a806b', |
|
148 | 'ff7ca51e58c505fec0dd2491de52c622bb7a806b', | |
149 | 'dd80b0f6cf5052f17cc738c2951c4f2070200d7f', |
|
149 | 'dd80b0f6cf5052f17cc738c2951c4f2070200d7f', | |
150 | '8430a588b43b5d6da365400117c89400326e7992', |
|
150 | '8430a588b43b5d6da365400117c89400326e7992', | |
151 | 'd955cd312c17b02143c04fa1099a352b04368118', |
|
151 | 'd955cd312c17b02143c04fa1099a352b04368118', | |
152 | 'f67b87e5c629c2ee0ba58f85197e423ff28d735b', |
|
152 | 'f67b87e5c629c2ee0ba58f85197e423ff28d735b', | |
153 | 'add63e382e4aabc9e1afdc4bdc24506c269b7618', |
|
153 | 'add63e382e4aabc9e1afdc4bdc24506c269b7618', | |
154 | 'f298fe1189f1b69779a4423f40b48edf92a703fc', |
|
154 | 'f298fe1189f1b69779a4423f40b48edf92a703fc', | |
155 | 'bd9b619eb41994cac43d67cf4ccc8399c1125808', |
|
155 | 'bd9b619eb41994cac43d67cf4ccc8399c1125808', | |
156 | '6e125e7c890379446e98980d8ed60fba87d0f6d1', |
|
156 | '6e125e7c890379446e98980d8ed60fba87d0f6d1', | |
157 | 'd4a54db9f745dfeba6933bf5b1e79e15d0af20bd', |
|
157 | 'd4a54db9f745dfeba6933bf5b1e79e15d0af20bd', | |
158 | '0b05e4ed56c802098dfc813cbe779b2f49e92500', |
|
158 | '0b05e4ed56c802098dfc813cbe779b2f49e92500', | |
159 | '191caa5b2c81ed17c0794bf7bb9958f4dcb0b87e', |
|
159 | '191caa5b2c81ed17c0794bf7bb9958f4dcb0b87e', | |
160 | '45223f8f114c64bf4d6f853e3c35a369a6305520', |
|
160 | '45223f8f114c64bf4d6f853e3c35a369a6305520', | |
161 | 'ca1eb7957a54bce53b12d1a51b13452f95bc7c7e', |
|
161 | 'ca1eb7957a54bce53b12d1a51b13452f95bc7c7e', | |
162 | 'f5ea29fc42ef67a2a5a7aecff10e1566699acd68', |
|
162 | 'f5ea29fc42ef67a2a5a7aecff10e1566699acd68', | |
163 | '27d48942240f5b91dfda77accd2caac94708cc7d', |
|
163 | '27d48942240f5b91dfda77accd2caac94708cc7d', | |
164 | '622f0eb0bafd619d2560c26f80f09e3b0b0d78af', |
|
164 | '622f0eb0bafd619d2560c26f80f09e3b0b0d78af', | |
165 | 'e686b958768ee96af8029fe19c6050b1a8dd3b2b']) |
|
165 | 'e686b958768ee96af8029fe19c6050b1a8dd3b2b']) | |
166 | assert subset.issubset(set(self.repo.revisions)) |
|
166 | assert subset.issubset(set(self.repo.revisions)) | |
167 |
|
167 | |||
168 | def test_slicing(self): |
|
168 | def test_slicing(self): | |
169 | # 4 1 5 10 95 |
|
169 | # 4 1 5 10 95 | |
170 | for sfrom, sto, size in [(0, 4, 4), (1, 2, 1), (10, 15, 5), |
|
170 | for sfrom, sto, size in [(0, 4, 4), (1, 2, 1), (10, 15, 5), | |
171 | (10, 20, 10), (5, 100, 95)]: |
|
171 | (10, 20, 10), (5, 100, 95)]: | |
172 | revs = list(self.repo[sfrom:sto]) |
|
172 | revs = list(self.repo[sfrom:sto]) | |
173 | assert len(revs) == size |
|
173 | assert len(revs) == size | |
174 | assert revs[0] == self.repo.get_changeset(sfrom) |
|
174 | assert revs[0] == self.repo.get_changeset(sfrom) | |
175 | assert revs[-1] == self.repo.get_changeset(sto - 1) |
|
175 | assert revs[-1] == self.repo.get_changeset(sto - 1) | |
176 |
|
176 | |||
177 | def test_branches(self): |
|
177 | def test_branches(self): | |
178 | # TODO: Need more tests here |
|
178 | # TODO: Need more tests here | |
179 | # Removed (those are 'remotes' branches for cloned repo) |
|
179 | # Removed (those are 'remotes' branches for cloned repo) | |
180 | #assert 'master' in self.repo.branches |
|
180 | #assert 'master' in self.repo.branches | |
181 | #assert 'gittree' in self.repo.branches |
|
181 | #assert 'gittree' in self.repo.branches | |
182 | #assert 'web-branch' in self.repo.branches |
|
182 | #assert 'web-branch' in self.repo.branches | |
183 | for name, id in self.repo.branches.items(): |
|
183 | for name, id in self.repo.branches.items(): | |
184 | assert isinstance(self.repo.get_changeset(id), GitChangeset) |
|
184 | assert isinstance(self.repo.get_changeset(id), GitChangeset) | |
185 |
|
185 | |||
186 | def test_tags(self): |
|
186 | def test_tags(self): | |
187 | # TODO: Need more tests here |
|
187 | # TODO: Need more tests here | |
188 | assert 'v0.1.1' in self.repo.tags |
|
188 | assert 'v0.1.1' in self.repo.tags | |
189 | assert 'v0.1.2' in self.repo.tags |
|
189 | assert 'v0.1.2' in self.repo.tags | |
190 | for name, id in self.repo.tags.items(): |
|
190 | for name, id in self.repo.tags.items(): | |
191 | assert isinstance(self.repo.get_changeset(id), GitChangeset) |
|
191 | assert isinstance(self.repo.get_changeset(id), GitChangeset) | |
192 |
|
192 | |||
193 | def _test_single_changeset_cache(self, revision): |
|
193 | def _test_single_changeset_cache(self, revision): | |
194 | chset = self.repo.get_changeset(revision) |
|
194 | chset = self.repo.get_changeset(revision) | |
195 | assert revision in self.repo.changesets |
|
195 | assert revision in self.repo.changesets | |
196 | assert chset is self.repo.changesets[revision] |
|
196 | assert chset is self.repo.changesets[revision] | |
197 |
|
197 | |||
198 | def test_initial_changeset(self): |
|
198 | def test_initial_changeset(self): | |
199 | id = self.repo.revisions[0] |
|
199 | id = self.repo.revisions[0] | |
200 | init_chset = self.repo.get_changeset(id) |
|
200 | init_chset = self.repo.get_changeset(id) | |
201 | assert init_chset.message == 'initial import\n' |
|
201 | assert init_chset.message == 'initial import\n' | |
202 | assert init_chset.author == 'Marcin Kuzminski <marcin@python-blog.com>' |
|
202 | assert init_chset.author == 'Marcin Kuzminski <marcin@python-blog.com>' | |
203 | for path in ('vcs/__init__.py', |
|
203 | for path in ('vcs/__init__.py', | |
204 | 'vcs/backends/BaseRepository.py', |
|
204 | 'vcs/backends/BaseRepository.py', | |
205 | 'vcs/backends/__init__.py'): |
|
205 | 'vcs/backends/__init__.py'): | |
206 | assert isinstance(init_chset.get_node(path), FileNode) |
|
206 | assert isinstance(init_chset.get_node(path), FileNode) | |
207 | for path in ('', 'vcs', 'vcs/backends'): |
|
207 | for path in ('', 'vcs', 'vcs/backends'): | |
208 | assert isinstance(init_chset.get_node(path), DirNode) |
|
208 | assert isinstance(init_chset.get_node(path), DirNode) | |
209 |
|
209 | |||
210 | with pytest.raises(NodeDoesNotExistError): |
|
210 | with pytest.raises(NodeDoesNotExistError): | |
211 | init_chset.get_node(path='foobar') |
|
211 | init_chset.get_node(path='foobar') | |
212 |
|
212 | |||
213 | node = init_chset.get_node('vcs/') |
|
213 | node = init_chset.get_node('vcs/') | |
214 | assert hasattr(node, 'kind') |
|
214 | assert hasattr(node, 'kind') | |
215 | assert node.kind == NodeKind.DIR |
|
215 | assert node.kind == NodeKind.DIR | |
216 |
|
216 | |||
217 | node = init_chset.get_node('vcs') |
|
217 | node = init_chset.get_node('vcs') | |
218 | assert hasattr(node, 'kind') |
|
218 | assert hasattr(node, 'kind') | |
219 | assert node.kind == NodeKind.DIR |
|
219 | assert node.kind == NodeKind.DIR | |
220 |
|
220 | |||
221 | node = init_chset.get_node('vcs/__init__.py') |
|
221 | node = init_chset.get_node('vcs/__init__.py') | |
222 | assert hasattr(node, 'kind') |
|
222 | assert hasattr(node, 'kind') | |
223 | assert node.kind == NodeKind.FILE |
|
223 | assert node.kind == NodeKind.FILE | |
224 |
|
224 | |||
225 | def test_not_existing_changeset(self): |
|
225 | def test_not_existing_changeset(self): | |
226 | with pytest.raises(RepositoryError): |
|
226 | with pytest.raises(RepositoryError): | |
227 | self.repo.get_changeset('f' * 40) |
|
227 | self.repo.get_changeset('f' * 40) | |
228 |
|
228 | |||
229 | def test_changeset10(self): |
|
229 | def test_changeset10(self): | |
230 |
|
230 | |||
231 | chset10 = self.repo.get_changeset(self.repo.revisions[9]) |
|
231 | chset10 = self.repo.get_changeset(self.repo.revisions[9]) | |
232 | readme = """=== |
|
232 | readme = """=== | |
233 | VCS |
|
233 | VCS | |
234 | === |
|
234 | === | |
235 |
|
235 | |||
236 | Various Version Control System management abstraction layer for Python. |
|
236 | Various Version Control System management abstraction layer for Python. | |
237 |
|
237 | |||
238 | Introduction |
|
238 | Introduction | |
239 | ------------ |
|
239 | ------------ | |
240 |
|
240 | |||
241 | TODO: To be written... |
|
241 | TODO: To be written... | |
242 |
|
242 | |||
243 | """ |
|
243 | """ | |
244 | node = chset10.get_node('README.rst') |
|
244 | node = chset10.get_node('README.rst') | |
245 | assert node.kind == NodeKind.FILE |
|
245 | assert node.kind == NodeKind.FILE | |
246 | assert node.content == readme |
|
246 | assert node.content == readme | |
247 |
|
247 | |||
248 |
|
248 | |||
249 | class TestGitChangeset(object): |
|
249 | class TestGitChangeset(object): | |
250 |
|
250 | |||
251 | def setup_method(self): |
|
251 | def setup_method(self): | |
252 | self.repo = GitRepository(TEST_GIT_REPO) |
|
252 | self.repo = GitRepository(TEST_GIT_REPO) | |
253 |
|
253 | |||
254 | def test_default_changeset(self): |
|
254 | def test_default_changeset(self): | |
255 | tip = self.repo.get_changeset() |
|
255 | tip = self.repo.get_changeset() | |
256 | assert tip == self.repo.get_changeset(None) |
|
256 | assert tip == self.repo.get_changeset(None) | |
257 | assert tip == self.repo.get_changeset('tip') |
|
257 | assert tip == self.repo.get_changeset('tip') | |
258 |
|
258 | |||
259 | def test_root_node(self): |
|
259 | def test_root_node(self): | |
260 | tip = self.repo.get_changeset() |
|
260 | tip = self.repo.get_changeset() | |
261 | assert tip.root is tip.get_node('') |
|
261 | assert tip.root is tip.get_node('') | |
262 |
|
262 | |||
263 | def test_lazy_fetch(self): |
|
263 | def test_lazy_fetch(self): | |
264 | """ |
|
264 | """ | |
265 | Test if changeset's nodes expands and are cached as we walk through |
|
265 | Test if changeset's nodes expands and are cached as we walk through | |
266 | the revision. This test is somewhat hard to write as order of tests |
|
266 | the revision. This test is somewhat hard to write as order of tests | |
267 | is a key here. Written by running command after command in a shell. |
|
267 | is a key here. Written by running command after command in a shell. | |
268 | """ |
|
268 | """ | |
269 | commit_id = '2a13f185e4525f9d4b59882791a2d397b90d5ddc' |
|
269 | commit_id = '2a13f185e4525f9d4b59882791a2d397b90d5ddc' | |
270 | assert commit_id in self.repo.revisions |
|
270 | assert commit_id in self.repo.revisions | |
271 | chset = self.repo.get_changeset(commit_id) |
|
271 | chset = self.repo.get_changeset(commit_id) | |
272 | assert len(chset.nodes) == 0 |
|
272 | assert len(chset.nodes) == 0 | |
273 | root = chset.root |
|
273 | root = chset.root | |
274 | assert len(chset.nodes) == 1 |
|
274 | assert len(chset.nodes) == 1 | |
275 | assert len(root.nodes) == 8 |
|
275 | assert len(root.nodes) == 8 | |
276 | # accessing root.nodes updates chset.nodes |
|
276 | # accessing root.nodes updates chset.nodes | |
277 | assert len(chset.nodes) == 9 |
|
277 | assert len(chset.nodes) == 9 | |
278 |
|
278 | |||
279 | docs = root.get_node('docs') |
|
279 | docs = root.get_node('docs') | |
280 | # we haven't yet accessed anything new as docs dir was already cached |
|
280 | # we haven't yet accessed anything new as docs dir was already cached | |
281 | assert len(chset.nodes) == 9 |
|
281 | assert len(chset.nodes) == 9 | |
282 | assert len(docs.nodes) == 8 |
|
282 | assert len(docs.nodes) == 8 | |
283 | # accessing docs.nodes updates chset.nodes |
|
283 | # accessing docs.nodes updates chset.nodes | |
284 | assert len(chset.nodes) == 17 |
|
284 | assert len(chset.nodes) == 17 | |
285 |
|
285 | |||
286 | assert docs is chset.get_node('docs') |
|
286 | assert docs is chset.get_node('docs') | |
287 | assert docs is root.nodes[0] |
|
287 | assert docs is root.nodes[0] | |
288 | assert docs is root.dirs[0] |
|
288 | assert docs is root.dirs[0] | |
289 | assert docs is chset.get_node('docs') |
|
289 | assert docs is chset.get_node('docs') | |
290 |
|
290 | |||
291 | def test_nodes_with_changeset(self): |
|
291 | def test_nodes_with_changeset(self): | |
292 | commit_id = '2a13f185e4525f9d4b59882791a2d397b90d5ddc' |
|
292 | commit_id = '2a13f185e4525f9d4b59882791a2d397b90d5ddc' | |
293 | chset = self.repo.get_changeset(commit_id) |
|
293 | chset = self.repo.get_changeset(commit_id) | |
294 | root = chset.root |
|
294 | root = chset.root | |
295 | docs = root.get_node('docs') |
|
295 | docs = root.get_node('docs') | |
296 | assert docs is chset.get_node('docs') |
|
296 | assert docs is chset.get_node('docs') | |
297 | api = docs.get_node('api') |
|
297 | api = docs.get_node('api') | |
298 | assert api is chset.get_node('docs/api') |
|
298 | assert api is chset.get_node('docs/api') | |
299 | index = api.get_node('index.rst') |
|
299 | index = api.get_node('index.rst') | |
300 | assert index is chset.get_node('docs/api/index.rst') |
|
300 | assert index is chset.get_node('docs/api/index.rst') | |
301 | assert index is chset.get_node('docs') \ |
|
301 | assert index is chset.get_node('docs') \ | |
302 | .get_node('api') \ |
|
302 | .get_node('api') \ | |
303 | .get_node('index.rst') |
|
303 | .get_node('index.rst') | |
304 |
|
304 | |||
305 | def test_branch_and_tags(self): |
|
305 | def test_branch_and_tags(self): | |
306 | # Those tests seem to show wrong results: |
|
306 | # Those tests seem to show wrong results: | |
307 | # in Git, only heads have a branch - most changesets don't |
|
307 | # in Git, only heads have a branch - most changesets don't | |
308 | rev0 = self.repo.revisions[0] |
|
308 | rev0 = self.repo.revisions[0] | |
309 | chset0 = self.repo.get_changeset(rev0) |
|
309 | chset0 = self.repo.get_changeset(rev0) | |
310 | assert chset0.branch is None # should be 'master'? |
|
310 | assert chset0.branch is None # should be 'master'? | |
311 | assert chset0.branches == [] # should be 'master'? |
|
311 | assert chset0.branches == [] # should be 'master'? | |
312 | assert chset0.tags == [] |
|
312 | assert chset0.tags == [] | |
313 |
|
313 | |||
314 | rev10 = self.repo.revisions[10] |
|
314 | rev10 = self.repo.revisions[10] | |
315 | chset10 = self.repo.get_changeset(rev10) |
|
315 | chset10 = self.repo.get_changeset(rev10) | |
316 | assert chset10.branch is None # should be 'master'? |
|
316 | assert chset10.branch is None # should be 'master'? | |
317 | assert chset10.branches == [] # should be 'master'? |
|
317 | assert chset10.branches == [] # should be 'master'? | |
318 | assert chset10.tags == [] |
|
318 | assert chset10.tags == [] | |
319 |
|
319 | |||
320 | rev44 = self.repo.revisions[44] |
|
320 | rev44 = self.repo.revisions[44] | |
321 | chset44 = self.repo.get_changeset(rev44) |
|
321 | chset44 = self.repo.get_changeset(rev44) | |
322 | assert chset44.branch is None # should be 'web-branch'? |
|
322 | assert chset44.branch is None # should be 'web-branch'? | |
323 | assert chset44.branches == [] # should be 'web-branch'? |
|
323 | assert chset44.branches == [] # should be 'web-branch'? | |
324 |
|
324 | |||
325 | tip = self.repo.get_changeset('tip') |
|
325 | tip = self.repo.get_changeset('tip') | |
326 | assert 'tip' not in tip.tags # it should be? |
|
326 | assert 'tip' not in tip.tags # it should be? | |
327 | assert not tip.tags # how it is! |
|
327 | assert not tip.tags # how it is! | |
328 |
|
328 | |||
329 | def _test_slices(self, limit, offset): |
|
329 | def _test_slices(self, limit, offset): | |
330 | count = self.repo.count() |
|
330 | count = self.repo.count() | |
331 | changesets = self.repo.get_changesets(limit=limit, offset=offset) |
|
331 | changesets = self.repo.get_changesets(limit=limit, offset=offset) | |
332 | idx = 0 |
|
332 | idx = 0 | |
333 | for changeset in changesets: |
|
333 | for changeset in changesets: | |
334 | rev = offset + idx |
|
334 | rev = offset + idx | |
335 | idx += 1 |
|
335 | idx += 1 | |
336 | rev_id = self.repo.revisions[rev] |
|
336 | rev_id = self.repo.revisions[rev] | |
337 | if idx > limit: |
|
337 | if idx > limit: | |
338 | pytest.fail("Exceeded limit already (getting revision %s, " |
|
338 | pytest.fail("Exceeded limit already (getting revision %s, " | |
339 | "there are %s total revisions, offset=%s, limit=%s)" |
|
339 | "there are %s total revisions, offset=%s, limit=%s)" | |
340 | % (rev_id, count, offset, limit)) |
|
340 | % (rev_id, count, offset, limit)) | |
341 | assert changeset == self.repo.get_changeset(rev_id) |
|
341 | assert changeset == self.repo.get_changeset(rev_id) | |
342 | result = list(self.repo.get_changesets(limit=limit, offset=offset)) |
|
342 | result = list(self.repo.get_changesets(limit=limit, offset=offset)) | |
343 | start = offset |
|
343 | start = offset | |
344 | end = limit and offset + limit or None |
|
344 | end = limit and offset + limit or None | |
345 | sliced = list(self.repo[start:end]) |
|
345 | sliced = list(self.repo[start:end]) | |
346 | pytest.failUnlessEqual(result, sliced, |
|
346 | pytest.failUnlessEqual(result, sliced, | |
347 | msg="Comparison failed for limit=%s, offset=%s" |
|
347 | msg="Comparison failed for limit=%s, offset=%s" | |
348 | "(get_changeset returned: %s and sliced: %s" |
|
348 | "(get_changeset returned: %s and sliced: %s" | |
349 | % (limit, offset, result, sliced)) |
|
349 | % (limit, offset, result, sliced)) | |
350 |
|
350 | |||
351 | def _test_file_size(self, revision, path, size): |
|
351 | def _test_file_size(self, revision, path, size): | |
352 | node = self.repo.get_changeset(revision).get_node(path) |
|
352 | node = self.repo.get_changeset(revision).get_node(path) | |
353 | assert node.is_file() |
|
353 | assert node.is_file() | |
354 | assert node.size == size |
|
354 | assert node.size == size | |
355 |
|
355 | |||
356 | def test_file_size(self): |
|
356 | def test_file_size(self): | |
357 | to_check = ( |
|
357 | to_check = ( | |
358 | ('c1214f7e79e02fc37156ff215cd71275450cffc3', |
|
358 | ('c1214f7e79e02fc37156ff215cd71275450cffc3', | |
359 | 'vcs/backends/BaseRepository.py', 502), |
|
359 | 'vcs/backends/BaseRepository.py', 502), | |
360 | ('d7e0d30fbcae12c90680eb095a4f5f02505ce501', |
|
360 | ('d7e0d30fbcae12c90680eb095a4f5f02505ce501', | |
361 | 'vcs/backends/hg.py', 854), |
|
361 | 'vcs/backends/hg.py', 854), | |
362 | ('6e125e7c890379446e98980d8ed60fba87d0f6d1', |
|
362 | ('6e125e7c890379446e98980d8ed60fba87d0f6d1', | |
363 | 'setup.py', 1068), |
|
363 | 'setup.py', 1068), | |
364 | ('d955cd312c17b02143c04fa1099a352b04368118', |
|
364 | ('d955cd312c17b02143c04fa1099a352b04368118', | |
365 | 'vcs/backends/base.py', 2921), |
|
365 | 'vcs/backends/base.py', 2921), | |
366 | ('ca1eb7957a54bce53b12d1a51b13452f95bc7c7e', |
|
366 | ('ca1eb7957a54bce53b12d1a51b13452f95bc7c7e', | |
367 | 'vcs/backends/base.py', 3936), |
|
367 | 'vcs/backends/base.py', 3936), | |
368 | ('f50f42baeed5af6518ef4b0cb2f1423f3851a941', |
|
368 | ('f50f42baeed5af6518ef4b0cb2f1423f3851a941', | |
369 | 'vcs/backends/base.py', 6189), |
|
369 | 'vcs/backends/base.py', 6189), | |
370 | ) |
|
370 | ) | |
371 | for revision, path, size in to_check: |
|
371 | for revision, path, size in to_check: | |
372 | self._test_file_size(revision, path, size) |
|
372 | self._test_file_size(revision, path, size) | |
373 |
|
373 | |||
374 | def _test_dir_size(self, revision, path, size): |
|
374 | def _test_dir_size(self, revision, path, size): | |
375 | node = self.repo.get_changeset(revision).get_node(path) |
|
375 | node = self.repo.get_changeset(revision).get_node(path) | |
376 | assert node.size == size |
|
376 | assert node.size == size | |
377 |
|
377 | |||
378 | def test_dir_size(self): |
|
378 | def test_dir_size(self): | |
379 | to_check = ( |
|
379 | to_check = ( | |
380 | ('5f2c6ee195929b0be80749243c18121c9864a3b3', '/', 674076), |
|
380 | ('5f2c6ee195929b0be80749243c18121c9864a3b3', '/', 674076), | |
381 | ('7ab37bc680b4aa72c34d07b230c866c28e9fc204', '/', 674049), |
|
381 | ('7ab37bc680b4aa72c34d07b230c866c28e9fc204', '/', 674049), | |
382 | ('6892503fb8f2a552cef5f4d4cc2cdbd13ae1cd2f', '/', 671830), |
|
382 | ('6892503fb8f2a552cef5f4d4cc2cdbd13ae1cd2f', '/', 671830), | |
383 | ) |
|
383 | ) | |
384 | for revision, path, size in to_check: |
|
384 | for revision, path, size in to_check: | |
385 | self._test_dir_size(revision, path, size) |
|
385 | self._test_dir_size(revision, path, size) | |
386 |
|
386 | |||
387 | def test_repo_size(self): |
|
387 | def test_repo_size(self): | |
388 | assert self.repo.size == 674076 |
|
388 | assert self.repo.size == 674076 | |
389 |
|
389 | |||
390 | def test_file_history(self): |
|
390 | def test_file_history(self): | |
391 | # we can only check if those revisions are present in the history |
|
391 | # we can only check if those revisions are present in the history | |
392 | # as we cannot update this test every time file is changed |
|
392 | # as we cannot update this test every time file is changed | |
393 | files = { |
|
393 | files = { | |
394 | 'setup.py': [ |
|
394 | 'setup.py': [ | |
395 | '54386793436c938cff89326944d4c2702340037d', |
|
395 | '54386793436c938cff89326944d4c2702340037d', | |
396 | '51d254f0ecf5df2ce50c0b115741f4cf13985dab', |
|
396 | '51d254f0ecf5df2ce50c0b115741f4cf13985dab', | |
397 | '998ed409c795fec2012b1c0ca054d99888b22090', |
|
397 | '998ed409c795fec2012b1c0ca054d99888b22090', | |
398 | '5e0eb4c47f56564395f76333f319d26c79e2fb09', |
|
398 | '5e0eb4c47f56564395f76333f319d26c79e2fb09', | |
399 | '0115510b70c7229dbc5dc49036b32e7d91d23acd', |
|
399 | '0115510b70c7229dbc5dc49036b32e7d91d23acd', | |
400 | '7cb3fd1b6d8c20ba89e2264f1c8baebc8a52d36e', |
|
400 | '7cb3fd1b6d8c20ba89e2264f1c8baebc8a52d36e', | |
401 | '2a13f185e4525f9d4b59882791a2d397b90d5ddc', |
|
401 | '2a13f185e4525f9d4b59882791a2d397b90d5ddc', | |
402 | '191caa5b2c81ed17c0794bf7bb9958f4dcb0b87e', |
|
402 | '191caa5b2c81ed17c0794bf7bb9958f4dcb0b87e', | |
403 | 'ff7ca51e58c505fec0dd2491de52c622bb7a806b', |
|
403 | 'ff7ca51e58c505fec0dd2491de52c622bb7a806b', | |
404 | ], |
|
404 | ], | |
405 | 'vcs/nodes.py': [ |
|
405 | 'vcs/nodes.py': [ | |
406 | '33fa3223355104431402a888fa77a4e9956feb3e', |
|
406 | '33fa3223355104431402a888fa77a4e9956feb3e', | |
407 | 'fa014c12c26d10ba682fadb78f2a11c24c8118e1', |
|
407 | 'fa014c12c26d10ba682fadb78f2a11c24c8118e1', | |
408 | 'e686b958768ee96af8029fe19c6050b1a8dd3b2b', |
|
408 | 'e686b958768ee96af8029fe19c6050b1a8dd3b2b', | |
409 | 'ab5721ca0a081f26bf43d9051e615af2cc99952f', |
|
409 | 'ab5721ca0a081f26bf43d9051e615af2cc99952f', | |
410 | 'c877b68d18e792a66b7f4c529ea02c8f80801542', |
|
410 | 'c877b68d18e792a66b7f4c529ea02c8f80801542', | |
411 | '4313566d2e417cb382948f8d9d7c765330356054', |
|
411 | '4313566d2e417cb382948f8d9d7c765330356054', | |
412 | '6c2303a793671e807d1cfc70134c9ca0767d98c2', |
|
412 | '6c2303a793671e807d1cfc70134c9ca0767d98c2', | |
413 | '54386793436c938cff89326944d4c2702340037d', |
|
413 | '54386793436c938cff89326944d4c2702340037d', | |
414 | '54000345d2e78b03a99d561399e8e548de3f3203', |
|
414 | '54000345d2e78b03a99d561399e8e548de3f3203', | |
415 | '1c6b3677b37ea064cb4b51714d8f7498f93f4b2b', |
|
415 | '1c6b3677b37ea064cb4b51714d8f7498f93f4b2b', | |
416 | '2d03ca750a44440fb5ea8b751176d1f36f8e8f46', |
|
416 | '2d03ca750a44440fb5ea8b751176d1f36f8e8f46', | |
417 | '2a08b128c206db48c2f0b8f70df060e6db0ae4f8', |
|
417 | '2a08b128c206db48c2f0b8f70df060e6db0ae4f8', | |
418 | '30c26513ff1eb8e5ce0e1c6b477ee5dc50e2f34b', |
|
418 | '30c26513ff1eb8e5ce0e1c6b477ee5dc50e2f34b', | |
419 | 'ac71e9503c2ca95542839af0ce7b64011b72ea7c', |
|
419 | 'ac71e9503c2ca95542839af0ce7b64011b72ea7c', | |
420 | '12669288fd13adba2a9b7dd5b870cc23ffab92d2', |
|
420 | '12669288fd13adba2a9b7dd5b870cc23ffab92d2', | |
421 | '5a0c84f3e6fe3473e4c8427199d5a6fc71a9b382', |
|
421 | '5a0c84f3e6fe3473e4c8427199d5a6fc71a9b382', | |
422 | '12f2f5e2b38e6ff3fbdb5d722efed9aa72ecb0d5', |
|
422 | '12f2f5e2b38e6ff3fbdb5d722efed9aa72ecb0d5', | |
423 | '5eab1222a7cd4bfcbabc218ca6d04276d4e27378', |
|
423 | '5eab1222a7cd4bfcbabc218ca6d04276d4e27378', | |
424 | 'f50f42baeed5af6518ef4b0cb2f1423f3851a941', |
|
424 | 'f50f42baeed5af6518ef4b0cb2f1423f3851a941', | |
425 | 'd7e390a45f6aa96f04f5e7f583ad4f867431aa25', |
|
425 | 'd7e390a45f6aa96f04f5e7f583ad4f867431aa25', | |
426 | 'f15c21f97864b4f071cddfbf2750ec2e23859414', |
|
426 | 'f15c21f97864b4f071cddfbf2750ec2e23859414', | |
427 | 'e906ef056cf539a4e4e5fc8003eaf7cf14dd8ade', |
|
427 | 'e906ef056cf539a4e4e5fc8003eaf7cf14dd8ade', | |
428 | 'ea2b108b48aa8f8c9c4a941f66c1a03315ca1c3b', |
|
428 | 'ea2b108b48aa8f8c9c4a941f66c1a03315ca1c3b', | |
429 | '84dec09632a4458f79f50ddbbd155506c460b4f9', |
|
429 | '84dec09632a4458f79f50ddbbd155506c460b4f9', | |
430 | '0115510b70c7229dbc5dc49036b32e7d91d23acd', |
|
430 | '0115510b70c7229dbc5dc49036b32e7d91d23acd', | |
431 | '2a13f185e4525f9d4b59882791a2d397b90d5ddc', |
|
431 | '2a13f185e4525f9d4b59882791a2d397b90d5ddc', | |
432 | '3bf1c5868e570e39569d094f922d33ced2fa3b2b', |
|
432 | '3bf1c5868e570e39569d094f922d33ced2fa3b2b', | |
433 | 'b8d04012574729d2c29886e53b1a43ef16dd00a1', |
|
433 | 'b8d04012574729d2c29886e53b1a43ef16dd00a1', | |
434 | '6970b057cffe4aab0a792aa634c89f4bebf01441', |
|
434 | '6970b057cffe4aab0a792aa634c89f4bebf01441', | |
435 | 'dd80b0f6cf5052f17cc738c2951c4f2070200d7f', |
|
435 | 'dd80b0f6cf5052f17cc738c2951c4f2070200d7f', | |
436 | 'ff7ca51e58c505fec0dd2491de52c622bb7a806b', |
|
436 | 'ff7ca51e58c505fec0dd2491de52c622bb7a806b', | |
437 | ], |
|
437 | ], | |
438 | 'vcs/backends/git.py': [ |
|
438 | 'vcs/backends/git.py': [ | |
439 | '4cf116ad5a457530381135e2f4c453e68a1b0105', |
|
439 | '4cf116ad5a457530381135e2f4c453e68a1b0105', | |
440 | '9a751d84d8e9408e736329767387f41b36935153', |
|
440 | '9a751d84d8e9408e736329767387f41b36935153', | |
441 | 'cb681fb539c3faaedbcdf5ca71ca413425c18f01', |
|
441 | 'cb681fb539c3faaedbcdf5ca71ca413425c18f01', | |
442 | '428f81bb652bcba8d631bce926e8834ff49bdcc6', |
|
442 | '428f81bb652bcba8d631bce926e8834ff49bdcc6', | |
443 | '180ab15aebf26f98f714d8c68715e0f05fa6e1c7', |
|
443 | '180ab15aebf26f98f714d8c68715e0f05fa6e1c7', | |
444 | '2b8e07312a2e89e92b90426ab97f349f4bce2a3a', |
|
444 | '2b8e07312a2e89e92b90426ab97f349f4bce2a3a', | |
445 | '50e08c506174d8645a4bb517dd122ac946a0f3bf', |
|
445 | '50e08c506174d8645a4bb517dd122ac946a0f3bf', | |
446 | '54000345d2e78b03a99d561399e8e548de3f3203', |
|
446 | '54000345d2e78b03a99d561399e8e548de3f3203', | |
447 | ], |
|
447 | ], | |
448 | } |
|
448 | } | |
449 | for path, revs in files.items(): |
|
449 | for path, revs in files.items(): | |
450 | node = self.repo.get_changeset(revs[0]).get_node(path) |
|
450 | node = self.repo.get_changeset(revs[0]).get_node(path) | |
451 | node_revs = [chset.raw_id for chset in node.history] |
|
451 | node_revs = [chset.raw_id for chset in node.history] | |
452 | assert set(revs).issubset(set(node_revs)), "We assumed that %s is subset of revisions for which file %s " \ |
|
452 | assert set(revs).issubset(set(node_revs)), "We assumed that %s is subset of revisions for which file %s " \ | |
453 | "has been changed, and history of that node returned: %s" \ |
|
453 | "has been changed, and history of that node returned: %s" \ | |
454 | % (revs, path, node_revs) |
|
454 | % (revs, path, node_revs) | |
455 |
|
455 | |||
456 | def test_file_annotate(self): |
|
456 | def test_file_annotate(self): | |
457 | files = { |
|
457 | files = { | |
458 | 'vcs/backends/__init__.py': { |
|
458 | 'vcs/backends/__init__.py': { | |
459 | 'c1214f7e79e02fc37156ff215cd71275450cffc3': { |
|
459 | 'c1214f7e79e02fc37156ff215cd71275450cffc3': { | |
460 | 'lines_no': 1, |
|
460 | 'lines_no': 1, | |
461 | 'changesets': [ |
|
461 | 'changesets': [ | |
462 | 'c1214f7e79e02fc37156ff215cd71275450cffc3', |
|
462 | 'c1214f7e79e02fc37156ff215cd71275450cffc3', | |
463 | ], |
|
463 | ], | |
464 | }, |
|
464 | }, | |
465 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647': { |
|
465 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647': { | |
466 | 'lines_no': 21, |
|
466 | 'lines_no': 21, | |
467 | 'changesets': [ |
|
467 | 'changesets': [ | |
468 | '49d3fd156b6f7db46313fac355dca1a0b94a0017', |
|
468 | '49d3fd156b6f7db46313fac355dca1a0b94a0017', | |
469 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
469 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
470 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
470 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
471 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
471 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
472 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
472 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
473 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
473 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
474 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
474 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
475 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
475 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
476 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
476 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
477 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
477 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
478 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
478 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
479 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
479 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
480 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
480 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
481 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
481 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
482 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
482 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
483 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
483 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
484 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
484 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
485 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
485 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
486 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
486 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
487 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
487 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
488 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
488 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
489 | ], |
|
489 | ], | |
490 | }, |
|
490 | }, | |
491 | 'e29b67bd158580fc90fc5e9111240b90e6e86064': { |
|
491 | 'e29b67bd158580fc90fc5e9111240b90e6e86064': { | |
492 | 'lines_no': 32, |
|
492 | 'lines_no': 32, | |
493 | 'changesets': [ |
|
493 | 'changesets': [ | |
494 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
494 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
495 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
495 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
496 | '5eab1222a7cd4bfcbabc218ca6d04276d4e27378', |
|
496 | '5eab1222a7cd4bfcbabc218ca6d04276d4e27378', | |
497 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
497 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
498 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
498 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
499 | '992f38217b979d0b0987d0bae3cc26dac85d9b19', |
|
499 | '992f38217b979d0b0987d0bae3cc26dac85d9b19', | |
500 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
500 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
501 | '54000345d2e78b03a99d561399e8e548de3f3203', |
|
501 | '54000345d2e78b03a99d561399e8e548de3f3203', | |
502 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
502 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
503 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
503 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
504 | '78c3f0c23b7ee935ec276acb8b8212444c33c396', |
|
504 | '78c3f0c23b7ee935ec276acb8b8212444c33c396', | |
505 | '992f38217b979d0b0987d0bae3cc26dac85d9b19', |
|
505 | '992f38217b979d0b0987d0bae3cc26dac85d9b19', | |
506 | '992f38217b979d0b0987d0bae3cc26dac85d9b19', |
|
506 | '992f38217b979d0b0987d0bae3cc26dac85d9b19', | |
507 | '992f38217b979d0b0987d0bae3cc26dac85d9b19', |
|
507 | '992f38217b979d0b0987d0bae3cc26dac85d9b19', | |
508 | '992f38217b979d0b0987d0bae3cc26dac85d9b19', |
|
508 | '992f38217b979d0b0987d0bae3cc26dac85d9b19', | |
509 | '2a13f185e4525f9d4b59882791a2d397b90d5ddc', |
|
509 | '2a13f185e4525f9d4b59882791a2d397b90d5ddc', | |
510 | '992f38217b979d0b0987d0bae3cc26dac85d9b19', |
|
510 | '992f38217b979d0b0987d0bae3cc26dac85d9b19', | |
511 | '78c3f0c23b7ee935ec276acb8b8212444c33c396', |
|
511 | '78c3f0c23b7ee935ec276acb8b8212444c33c396', | |
512 | '992f38217b979d0b0987d0bae3cc26dac85d9b19', |
|
512 | '992f38217b979d0b0987d0bae3cc26dac85d9b19', | |
513 | '992f38217b979d0b0987d0bae3cc26dac85d9b19', |
|
513 | '992f38217b979d0b0987d0bae3cc26dac85d9b19', | |
514 | '992f38217b979d0b0987d0bae3cc26dac85d9b19', |
|
514 | '992f38217b979d0b0987d0bae3cc26dac85d9b19', | |
515 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
515 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
516 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
516 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
517 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
517 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
518 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
518 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
519 | '992f38217b979d0b0987d0bae3cc26dac85d9b19', |
|
519 | '992f38217b979d0b0987d0bae3cc26dac85d9b19', | |
520 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
520 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
521 | '992f38217b979d0b0987d0bae3cc26dac85d9b19', |
|
521 | '992f38217b979d0b0987d0bae3cc26dac85d9b19', | |
522 | '992f38217b979d0b0987d0bae3cc26dac85d9b19', |
|
522 | '992f38217b979d0b0987d0bae3cc26dac85d9b19', | |
523 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
523 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
524 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
524 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
525 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', |
|
525 | '16fba1ae9334d79b66d7afed2c2dfbfa2ae53647', | |
526 | ], |
|
526 | ], | |
527 | }, |
|
527 | }, | |
528 | }, |
|
528 | }, | |
529 | } |
|
529 | } | |
530 |
|
530 | |||
531 | for fname, revision_dict in files.items(): |
|
531 | for fname, revision_dict in files.items(): | |
532 | for rev, data in revision_dict.items(): |
|
532 | for rev, data in revision_dict.items(): | |
533 | cs = self.repo.get_changeset(rev) |
|
533 | cs = self.repo.get_changeset(rev) | |
534 |
|
534 | |||
535 | l1_1 = [x[1] for x in cs.get_file_annotate(fname)] |
|
535 | l1_1 = [x[1] for x in cs.get_file_annotate(fname)] | |
536 | l1_2 = [x[2]().raw_id for x in cs.get_file_annotate(fname)] |
|
536 | l1_2 = [x[2]().raw_id for x in cs.get_file_annotate(fname)] | |
537 | assert l1_1 == l1_2 |
|
537 | assert l1_1 == l1_2 | |
538 | l1 = l1_1 |
|
538 | l1 = l1_1 | |
539 | l2 = files[fname][rev]['changesets'] |
|
539 | l2 = files[fname][rev]['changesets'] | |
540 | assert l1 == l2, "The lists of revision for %s@rev %s" \ |
|
540 | assert l1 == l2, "The lists of revision for %s@rev %s" \ | |
541 | "from annotation list should match each other, " \ |
|
541 | "from annotation list should match each other, " \ | |
542 | "got \n%s \nvs \n%s " % (fname, rev, l1, l2) |
|
542 | "got \n%s \nvs \n%s " % (fname, rev, l1, l2) | |
543 |
|
543 | |||
544 | def test_files_state(self): |
|
544 | def test_files_state(self): | |
545 | """ |
|
545 | """ | |
546 | Tests state of FileNodes. |
|
546 | Tests state of FileNodes. | |
547 | """ |
|
547 | """ | |
548 | node = self.repo \ |
|
548 | node = self.repo \ | |
549 | .get_changeset('e6ea6d16e2f26250124a1f4b4fe37a912f9d86a0') \ |
|
549 | .get_changeset('e6ea6d16e2f26250124a1f4b4fe37a912f9d86a0') \ | |
550 | .get_node('vcs/utils/diffs.py') |
|
550 | .get_node('vcs/utils/diffs.py') | |
551 | assert node.state, NodeState.ADDED |
|
551 | assert node.state, NodeState.ADDED | |
552 | assert node.added |
|
552 | assert node.added | |
553 | assert not node.changed |
|
553 | assert not node.changed | |
554 | assert not node.not_changed |
|
554 | assert not node.not_changed | |
555 | assert not node.removed |
|
555 | assert not node.removed | |
556 |
|
556 | |||
557 | node = self.repo \ |
|
557 | node = self.repo \ | |
558 | .get_changeset('33fa3223355104431402a888fa77a4e9956feb3e') \ |
|
558 | .get_changeset('33fa3223355104431402a888fa77a4e9956feb3e') \ | |
559 | .get_node('.hgignore') |
|
559 | .get_node('.hgignore') | |
560 | assert node.state, NodeState.CHANGED |
|
560 | assert node.state, NodeState.CHANGED | |
561 | assert not node.added |
|
561 | assert not node.added | |
562 | assert node.changed |
|
562 | assert node.changed | |
563 | assert not node.not_changed |
|
563 | assert not node.not_changed | |
564 | assert not node.removed |
|
564 | assert not node.removed | |
565 |
|
565 | |||
566 | node = self.repo \ |
|
566 | node = self.repo \ | |
567 | .get_changeset('e29b67bd158580fc90fc5e9111240b90e6e86064') \ |
|
567 | .get_changeset('e29b67bd158580fc90fc5e9111240b90e6e86064') \ | |
568 | .get_node('setup.py') |
|
568 | .get_node('setup.py') | |
569 | assert node.state, NodeState.NOT_CHANGED |
|
569 | assert node.state, NodeState.NOT_CHANGED | |
570 | assert not node.added |
|
570 | assert not node.added | |
571 | assert not node.changed |
|
571 | assert not node.changed | |
572 | assert node.not_changed |
|
572 | assert node.not_changed | |
573 | assert not node.removed |
|
573 | assert not node.removed | |
574 |
|
574 | |||
575 | # If node has REMOVED state then trying to fetch it would raise |
|
575 | # If node has REMOVED state then trying to fetch it would raise | |
576 | # ChangesetError exception |
|
576 | # ChangesetError exception | |
577 | chset = self.repo.get_changeset( |
|
577 | chset = self.repo.get_changeset( | |
578 | 'fa6600f6848800641328adbf7811fd2372c02ab2') |
|
578 | 'fa6600f6848800641328adbf7811fd2372c02ab2') | |
579 | path = 'vcs/backends/BaseRepository.py' |
|
579 | path = 'vcs/backends/BaseRepository.py' | |
580 | with pytest.raises(NodeDoesNotExistError): |
|
580 | with pytest.raises(NodeDoesNotExistError): | |
581 | chset.get_node(path) |
|
581 | chset.get_node(path) | |
582 | # but it would be one of ``removed`` (changeset's attribute) |
|
582 | # but it would be one of ``removed`` (changeset's attribute) | |
583 | assert path in [rf.path for rf in chset.removed] |
|
583 | assert path in [rf.path for rf in chset.removed] | |
584 |
|
584 | |||
585 | chset = self.repo.get_changeset( |
|
585 | chset = self.repo.get_changeset( | |
586 | '54386793436c938cff89326944d4c2702340037d') |
|
586 | '54386793436c938cff89326944d4c2702340037d') | |
587 | changed = ['setup.py', 'tests/test_nodes.py', 'vcs/backends/hg.py', |
|
587 | changed = ['setup.py', 'tests/test_nodes.py', 'vcs/backends/hg.py', | |
588 | 'vcs/nodes.py'] |
|
588 | 'vcs/nodes.py'] | |
589 | assert set(changed) == set([f.path for f in chset.changed]) |
|
589 | assert set(changed) == set([f.path for f in chset.changed]) | |
590 |
|
590 | |||
591 | def test_commit_message_is_unicode(self): |
|
591 | def test_commit_message_is_unicode(self): | |
592 | for cs in self.repo: |
|
592 | for cs in self.repo: | |
593 | assert isinstance(cs.message, unicode) |
|
593 | assert isinstance(cs.message, unicode) | |
594 |
|
594 | |||
595 | def test_changeset_author_is_unicode(self): |
|
595 | def test_changeset_author_is_unicode(self): | |
596 | for cs in self.repo: |
|
596 | for cs in self.repo: | |
597 | assert isinstance(cs.author, unicode) |
|
597 | assert isinstance(cs.author, unicode) | |
598 |
|
598 | |||
599 |
def test_repo_files_content_is_ |
|
599 | def test_repo_files_content_is_bytes(self): | |
600 | changeset = self.repo.get_changeset() |
|
600 | changeset = self.repo.get_changeset() | |
601 | for node in changeset.get_node('/'): |
|
601 | for node in changeset.get_node('/'): | |
602 | if node.is_file(): |
|
602 | if node.is_file(): | |
603 |
assert isinstance(node.content, |
|
603 | assert isinstance(node.content, bytes) | |
604 |
|
604 | |||
605 | def test_wrong_path(self): |
|
605 | def test_wrong_path(self): | |
606 | # There is 'setup.py' in the root dir but not there: |
|
606 | # There is 'setup.py' in the root dir but not there: | |
607 | path = 'foo/bar/setup.py' |
|
607 | path = 'foo/bar/setup.py' | |
608 | tip = self.repo.get_changeset() |
|
608 | tip = self.repo.get_changeset() | |
609 | with pytest.raises(VCSError): |
|
609 | with pytest.raises(VCSError): | |
610 | tip.get_node(path) |
|
610 | tip.get_node(path) | |
611 |
|
611 | |||
612 | def test_author_email(self): |
|
612 | def test_author_email(self): | |
613 | assert 'marcin@python-blog.com' == self.repo.get_changeset('c1214f7e79e02fc37156ff215cd71275450cffc3').author_email |
|
613 | assert 'marcin@python-blog.com' == self.repo.get_changeset('c1214f7e79e02fc37156ff215cd71275450cffc3').author_email | |
614 | assert 'lukasz.balcerzak@python-center.pl' == self.repo.get_changeset('ff7ca51e58c505fec0dd2491de52c622bb7a806b').author_email |
|
614 | assert 'lukasz.balcerzak@python-center.pl' == self.repo.get_changeset('ff7ca51e58c505fec0dd2491de52c622bb7a806b').author_email | |
615 | assert '' == self.repo.get_changeset('8430a588b43b5d6da365400117c89400326e7992').author_email |
|
615 | assert '' == self.repo.get_changeset('8430a588b43b5d6da365400117c89400326e7992').author_email | |
616 |
|
616 | |||
617 | def test_author_username(self): |
|
617 | def test_author_username(self): | |
618 | assert 'Marcin Kuzminski' == self.repo.get_changeset('c1214f7e79e02fc37156ff215cd71275450cffc3').author_name |
|
618 | assert 'Marcin Kuzminski' == self.repo.get_changeset('c1214f7e79e02fc37156ff215cd71275450cffc3').author_name | |
619 | assert 'Lukasz Balcerzak' == self.repo.get_changeset('ff7ca51e58c505fec0dd2491de52c622bb7a806b').author_name |
|
619 | assert 'Lukasz Balcerzak' == self.repo.get_changeset('ff7ca51e58c505fec0dd2491de52c622bb7a806b').author_name | |
620 | assert 'marcink none@none' == self.repo.get_changeset('8430a588b43b5d6da365400117c89400326e7992').author_name |
|
620 | assert 'marcink none@none' == self.repo.get_changeset('8430a588b43b5d6da365400117c89400326e7992').author_name | |
621 |
|
621 | |||
622 |
|
622 | |||
623 | class TestGitSpecificWithRepo(_BackendTestMixin): |
|
623 | class TestGitSpecificWithRepo(_BackendTestMixin): | |
624 | backend_alias = 'git' |
|
624 | backend_alias = 'git' | |
625 |
|
625 | |||
626 | @classmethod |
|
626 | @classmethod | |
627 | def _get_commits(cls): |
|
627 | def _get_commits(cls): | |
628 | return [ |
|
628 | return [ | |
629 | { |
|
629 | { | |
630 | 'message': 'Initial', |
|
630 | 'message': 'Initial', | |
631 | 'author': 'Joe Doe <joe.doe@example.com>', |
|
631 | 'author': 'Joe Doe <joe.doe@example.com>', | |
632 | 'date': datetime.datetime(2010, 1, 1, 20), |
|
632 | 'date': datetime.datetime(2010, 1, 1, 20), | |
633 | 'added': [ |
|
633 | 'added': [ | |
634 | FileNode('foobar/static/js/admin/base.js', content='base'), |
|
634 | FileNode('foobar/static/js/admin/base.js', content='base'), | |
635 | FileNode('foobar/static/admin', content='admin', |
|
635 | FileNode('foobar/static/admin', content='admin', | |
636 | mode=0o120000), # this is a link |
|
636 | mode=0o120000), # this is a link | |
637 | FileNode('foo', content='foo'), |
|
637 | FileNode('foo', content='foo'), | |
638 | ], |
|
638 | ], | |
639 | }, |
|
639 | }, | |
640 | { |
|
640 | { | |
641 | 'message': 'Second', |
|
641 | 'message': 'Second', | |
642 | 'author': 'Joe Doe <joe.doe@example.com>', |
|
642 | 'author': 'Joe Doe <joe.doe@example.com>', | |
643 | 'date': datetime.datetime(2010, 1, 1, 22), |
|
643 | 'date': datetime.datetime(2010, 1, 1, 22), | |
644 | 'added': [ |
|
644 | 'added': [ | |
645 | FileNode('foo2', content='foo2'), |
|
645 | FileNode('foo2', content='foo2'), | |
646 | ], |
|
646 | ], | |
647 | }, |
|
647 | }, | |
648 | ] |
|
648 | ] | |
649 |
|
649 | |||
650 | def test_paths_slow_traversing(self): |
|
650 | def test_paths_slow_traversing(self): | |
651 | cs = self.repo.get_changeset() |
|
651 | cs = self.repo.get_changeset() | |
652 | assert cs.get_node('foobar').get_node('static').get_node('js').get_node('admin').get_node('base.js').content == 'base' |
|
652 | assert cs.get_node('foobar').get_node('static').get_node('js').get_node('admin').get_node('base.js').content == 'base' | |
653 |
|
653 | |||
654 | def test_paths_fast_traversing(self): |
|
654 | def test_paths_fast_traversing(self): | |
655 | cs = self.repo.get_changeset() |
|
655 | cs = self.repo.get_changeset() | |
656 | assert cs.get_node('foobar/static/js/admin/base.js').content == 'base' |
|
656 | assert cs.get_node('foobar/static/js/admin/base.js').content == 'base' | |
657 |
|
657 | |||
658 | def test_workdir_get_branch(self): |
|
658 | def test_workdir_get_branch(self): | |
659 | self.repo.run_git_command(['checkout', '-b', 'production']) |
|
659 | self.repo.run_git_command(['checkout', '-b', 'production']) | |
660 | # Regression test: one of following would fail if we don't check |
|
660 | # Regression test: one of following would fail if we don't check | |
661 | # .git/HEAD file |
|
661 | # .git/HEAD file | |
662 | self.repo.run_git_command(['checkout', 'production']) |
|
662 | self.repo.run_git_command(['checkout', 'production']) | |
663 | assert self.repo.workdir.get_branch() == 'production' |
|
663 | assert self.repo.workdir.get_branch() == 'production' | |
664 | self.repo.run_git_command(['checkout', 'master']) |
|
664 | self.repo.run_git_command(['checkout', 'master']) | |
665 | assert self.repo.workdir.get_branch() == 'master' |
|
665 | assert self.repo.workdir.get_branch() == 'master' | |
666 |
|
666 | |||
667 | def test_get_diff_runs_git_command_with_hashes(self): |
|
667 | def test_get_diff_runs_git_command_with_hashes(self): | |
668 | self.repo._run_git_command = mock.Mock(return_value=(b'', b'')) |
|
668 | self.repo._run_git_command = mock.Mock(return_value=(b'', b'')) | |
669 | self.repo.get_diff(0, 1) |
|
669 | self.repo.get_diff(0, 1) | |
670 | self.repo._run_git_command.assert_called_once_with( |
|
670 | self.repo._run_git_command.assert_called_once_with( | |
671 | ['diff', '-U3', '--full-index', '--binary', '-p', '-M', '--abbrev=40', |
|
671 | ['diff', '-U3', '--full-index', '--binary', '-p', '-M', '--abbrev=40', | |
672 | self.repo._get_revision(0), self.repo._get_revision(1)], cwd=self.repo.path) |
|
672 | self.repo._get_revision(0), self.repo._get_revision(1)], cwd=self.repo.path) | |
673 |
|
673 | |||
674 | def test_get_diff_runs_git_command_with_str_hashes(self): |
|
674 | def test_get_diff_runs_git_command_with_str_hashes(self): | |
675 | self.repo._run_git_command = mock.Mock(return_value=(b'', b'')) |
|
675 | self.repo._run_git_command = mock.Mock(return_value=(b'', b'')) | |
676 | self.repo.get_diff(self.repo.EMPTY_CHANGESET, 1) |
|
676 | self.repo.get_diff(self.repo.EMPTY_CHANGESET, 1) | |
677 | self.repo._run_git_command.assert_called_once_with( |
|
677 | self.repo._run_git_command.assert_called_once_with( | |
678 | ['show', '-U3', '--full-index', '--binary', '-p', '-M', '--abbrev=40', |
|
678 | ['show', '-U3', '--full-index', '--binary', '-p', '-M', '--abbrev=40', | |
679 | self.repo._get_revision(1)], cwd=self.repo.path) |
|
679 | self.repo._get_revision(1)], cwd=self.repo.path) | |
680 |
|
680 | |||
681 | def test_get_diff_runs_git_command_with_path_if_its_given(self): |
|
681 | def test_get_diff_runs_git_command_with_path_if_its_given(self): | |
682 | self.repo._run_git_command = mock.Mock(return_value=(b'', b'')) |
|
682 | self.repo._run_git_command = mock.Mock(return_value=(b'', b'')) | |
683 | self.repo.get_diff(0, 1, 'foo') |
|
683 | self.repo.get_diff(0, 1, 'foo') | |
684 | self.repo._run_git_command.assert_called_once_with( |
|
684 | self.repo._run_git_command.assert_called_once_with( | |
685 | ['diff', '-U3', '--full-index', '--binary', '-p', '-M', '--abbrev=40', |
|
685 | ['diff', '-U3', '--full-index', '--binary', '-p', '-M', '--abbrev=40', | |
686 | self.repo._get_revision(0), self.repo._get_revision(1), '--', 'foo'], cwd=self.repo.path) |
|
686 | self.repo._get_revision(0), self.repo._get_revision(1), '--', 'foo'], cwd=self.repo.path) | |
687 |
|
687 | |||
688 | def test_get_diff_does_not_sanitize_valid_context(self): |
|
688 | def test_get_diff_does_not_sanitize_valid_context(self): | |
689 | almost_overflowed_long_int = 2**31-1 |
|
689 | almost_overflowed_long_int = 2**31-1 | |
690 |
|
690 | |||
691 | self.repo._run_git_command = mock.Mock(return_value=(b'', b'')) |
|
691 | self.repo._run_git_command = mock.Mock(return_value=(b'', b'')) | |
692 | self.repo.get_diff(0, 1, 'foo', context=almost_overflowed_long_int) |
|
692 | self.repo.get_diff(0, 1, 'foo', context=almost_overflowed_long_int) | |
693 | self.repo._run_git_command.assert_called_once_with( |
|
693 | self.repo._run_git_command.assert_called_once_with( | |
694 | ['diff', '-U' + str(almost_overflowed_long_int), '--full-index', '--binary', '-p', '-M', '--abbrev=40', |
|
694 | ['diff', '-U' + str(almost_overflowed_long_int), '--full-index', '--binary', '-p', '-M', '--abbrev=40', | |
695 | self.repo._get_revision(0), self.repo._get_revision(1), '--', 'foo'], cwd=self.repo.path) |
|
695 | self.repo._get_revision(0), self.repo._get_revision(1), '--', 'foo'], cwd=self.repo.path) | |
696 |
|
696 | |||
697 | def test_get_diff_sanitizes_overflowing_context(self): |
|
697 | def test_get_diff_sanitizes_overflowing_context(self): | |
698 | overflowed_long_int = 2**31 |
|
698 | overflowed_long_int = 2**31 | |
699 | sanitized_overflowed_long_int = overflowed_long_int-1 |
|
699 | sanitized_overflowed_long_int = overflowed_long_int-1 | |
700 |
|
700 | |||
701 | self.repo._run_git_command = mock.Mock(return_value=(b'', b'')) |
|
701 | self.repo._run_git_command = mock.Mock(return_value=(b'', b'')) | |
702 | self.repo.get_diff(0, 1, 'foo', context=overflowed_long_int) |
|
702 | self.repo.get_diff(0, 1, 'foo', context=overflowed_long_int) | |
703 |
|
703 | |||
704 | self.repo._run_git_command.assert_called_once_with( |
|
704 | self.repo._run_git_command.assert_called_once_with( | |
705 | ['diff', '-U' + str(sanitized_overflowed_long_int), '--full-index', '--binary', '-p', '-M', '--abbrev=40', |
|
705 | ['diff', '-U' + str(sanitized_overflowed_long_int), '--full-index', '--binary', '-p', '-M', '--abbrev=40', | |
706 | self.repo._get_revision(0), self.repo._get_revision(1), '--', 'foo'], cwd=self.repo.path) |
|
706 | self.repo._get_revision(0), self.repo._get_revision(1), '--', 'foo'], cwd=self.repo.path) | |
707 |
|
707 | |||
708 | def test_get_diff_does_not_sanitize_zero_context(self): |
|
708 | def test_get_diff_does_not_sanitize_zero_context(self): | |
709 | zero_context = 0 |
|
709 | zero_context = 0 | |
710 |
|
710 | |||
711 | self.repo._run_git_command = mock.Mock(return_value=(b'', b'')) |
|
711 | self.repo._run_git_command = mock.Mock(return_value=(b'', b'')) | |
712 | self.repo.get_diff(0, 1, 'foo', context=zero_context) |
|
712 | self.repo.get_diff(0, 1, 'foo', context=zero_context) | |
713 |
|
713 | |||
714 | self.repo._run_git_command.assert_called_once_with( |
|
714 | self.repo._run_git_command.assert_called_once_with( | |
715 | ['diff', '-U' + str(zero_context), '--full-index', '--binary', '-p', '-M', '--abbrev=40', |
|
715 | ['diff', '-U' + str(zero_context), '--full-index', '--binary', '-p', '-M', '--abbrev=40', | |
716 | self.repo._get_revision(0), self.repo._get_revision(1), '--', 'foo'], cwd=self.repo.path) |
|
716 | self.repo._get_revision(0), self.repo._get_revision(1), '--', 'foo'], cwd=self.repo.path) | |
717 |
|
717 | |||
718 | def test_get_diff_sanitizes_negative_context(self): |
|
718 | def test_get_diff_sanitizes_negative_context(self): | |
719 | negative_context = -10 |
|
719 | negative_context = -10 | |
720 |
|
720 | |||
721 | self.repo._run_git_command = mock.Mock(return_value=(b'', b'')) |
|
721 | self.repo._run_git_command = mock.Mock(return_value=(b'', b'')) | |
722 | self.repo.get_diff(0, 1, 'foo', context=negative_context) |
|
722 | self.repo.get_diff(0, 1, 'foo', context=negative_context) | |
723 |
|
723 | |||
724 | self.repo._run_git_command.assert_called_once_with( |
|
724 | self.repo._run_git_command.assert_called_once_with( | |
725 | ['diff', '-U0', '--full-index', '--binary', '-p', '-M', '--abbrev=40', |
|
725 | ['diff', '-U0', '--full-index', '--binary', '-p', '-M', '--abbrev=40', | |
726 | self.repo._get_revision(0), self.repo._get_revision(1), '--', 'foo'], cwd=self.repo.path) |
|
726 | self.repo._get_revision(0), self.repo._get_revision(1), '--', 'foo'], cwd=self.repo.path) | |
727 |
|
727 | |||
728 |
|
728 | |||
729 | class TestGitRegression(_BackendTestMixin): |
|
729 | class TestGitRegression(_BackendTestMixin): | |
730 | backend_alias = 'git' |
|
730 | backend_alias = 'git' | |
731 |
|
731 | |||
732 | @classmethod |
|
732 | @classmethod | |
733 | def _get_commits(cls): |
|
733 | def _get_commits(cls): | |
734 | return [ |
|
734 | return [ | |
735 | { |
|
735 | { | |
736 | 'message': 'Initial', |
|
736 | 'message': 'Initial', | |
737 | 'author': 'Joe Doe <joe.doe@example.com>', |
|
737 | 'author': 'Joe Doe <joe.doe@example.com>', | |
738 | 'date': datetime.datetime(2010, 1, 1, 20), |
|
738 | 'date': datetime.datetime(2010, 1, 1, 20), | |
739 | 'added': [ |
|
739 | 'added': [ | |
740 | FileNode('bot/__init__.py', content='base'), |
|
740 | FileNode('bot/__init__.py', content='base'), | |
741 | FileNode('bot/templates/404.html', content='base'), |
|
741 | FileNode('bot/templates/404.html', content='base'), | |
742 | FileNode('bot/templates/500.html', content='base'), |
|
742 | FileNode('bot/templates/500.html', content='base'), | |
743 | ], |
|
743 | ], | |
744 | }, |
|
744 | }, | |
745 | { |
|
745 | { | |
746 | 'message': 'Second', |
|
746 | 'message': 'Second', | |
747 | 'author': 'Joe Doe <joe.doe@example.com>', |
|
747 | 'author': 'Joe Doe <joe.doe@example.com>', | |
748 | 'date': datetime.datetime(2010, 1, 1, 22), |
|
748 | 'date': datetime.datetime(2010, 1, 1, 22), | |
749 | 'added': [ |
|
749 | 'added': [ | |
750 | FileNode('bot/build/migrations/1.py', content='foo2'), |
|
750 | FileNode('bot/build/migrations/1.py', content='foo2'), | |
751 | FileNode('bot/build/migrations/2.py', content='foo2'), |
|
751 | FileNode('bot/build/migrations/2.py', content='foo2'), | |
752 | FileNode('bot/build/static/templates/f.html', content='foo2'), |
|
752 | FileNode('bot/build/static/templates/f.html', content='foo2'), | |
753 | FileNode('bot/build/static/templates/f1.html', content='foo2'), |
|
753 | FileNode('bot/build/static/templates/f1.html', content='foo2'), | |
754 | FileNode('bot/build/templates/err.html', content='foo2'), |
|
754 | FileNode('bot/build/templates/err.html', content='foo2'), | |
755 | FileNode('bot/build/templates/err2.html', content='foo2'), |
|
755 | FileNode('bot/build/templates/err2.html', content='foo2'), | |
756 | ], |
|
756 | ], | |
757 | }, |
|
757 | }, | |
758 | ] |
|
758 | ] | |
759 |
|
759 | |||
760 | def test_similar_paths(self): |
|
760 | def test_similar_paths(self): | |
761 | cs = self.repo.get_changeset() |
|
761 | cs = self.repo.get_changeset() | |
762 | paths = lambda *n: [x.path for x in n] |
|
762 | paths = lambda *n: [x.path for x in n] | |
763 | assert paths(*cs.get_nodes('bot')) == ['bot/build', 'bot/templates', 'bot/__init__.py'] |
|
763 | assert paths(*cs.get_nodes('bot')) == ['bot/build', 'bot/templates', 'bot/__init__.py'] | |
764 | assert paths(*cs.get_nodes('bot/build')) == ['bot/build/migrations', 'bot/build/static', 'bot/build/templates'] |
|
764 | assert paths(*cs.get_nodes('bot/build')) == ['bot/build/migrations', 'bot/build/static', 'bot/build/templates'] | |
765 | assert paths(*cs.get_nodes('bot/build/static')) == ['bot/build/static/templates'] |
|
765 | assert paths(*cs.get_nodes('bot/build/static')) == ['bot/build/static/templates'] | |
766 | # this get_nodes below causes troubles ! |
|
766 | # this get_nodes below causes troubles ! | |
767 | assert paths(*cs.get_nodes('bot/build/static/templates')) == ['bot/build/static/templates/f.html', 'bot/build/static/templates/f1.html'] |
|
767 | assert paths(*cs.get_nodes('bot/build/static/templates')) == ['bot/build/static/templates/f.html', 'bot/build/static/templates/f1.html'] | |
768 | assert paths(*cs.get_nodes('bot/build/templates')) == ['bot/build/templates/err.html', 'bot/build/templates/err2.html'] |
|
768 | assert paths(*cs.get_nodes('bot/build/templates')) == ['bot/build/templates/err.html', 'bot/build/templates/err2.html'] | |
769 | assert paths(*cs.get_nodes('bot/templates/')) == ['bot/templates/404.html', 'bot/templates/500.html'] |
|
769 | assert paths(*cs.get_nodes('bot/templates/')) == ['bot/templates/404.html', 'bot/templates/500.html'] | |
770 |
|
770 | |||
771 |
|
771 | |||
772 | class TestGitHooks(object): |
|
772 | class TestGitHooks(object): | |
773 | """ |
|
773 | """ | |
774 | Tests related to hook functionality of Git repositories. |
|
774 | Tests related to hook functionality of Git repositories. | |
775 | """ |
|
775 | """ | |
776 |
|
776 | |||
777 | def setup_method(self): |
|
777 | def setup_method(self): | |
778 | # For each run we want a fresh repo. |
|
778 | # For each run we want a fresh repo. | |
779 | self.repo_directory = get_new_dir("githookrepo") |
|
779 | self.repo_directory = get_new_dir("githookrepo") | |
780 | self.repo = GitRepository(self.repo_directory, create=True) |
|
780 | self.repo = GitRepository(self.repo_directory, create=True) | |
781 |
|
781 | |||
782 | # Create a dictionary where keys are hook names, and values are paths to |
|
782 | # Create a dictionary where keys are hook names, and values are paths to | |
783 | # them. Deduplicates code in tests a bit. |
|
783 | # them. Deduplicates code in tests a bit. | |
784 | self.hook_directory = self.repo.get_hook_location() |
|
784 | self.hook_directory = self.repo.get_hook_location() | |
785 | self.kallithea_hooks = dict((h, os.path.join(self.hook_directory, h)) for h in ("pre-receive", "post-receive")) |
|
785 | self.kallithea_hooks = dict((h, os.path.join(self.hook_directory, h)) for h in ("pre-receive", "post-receive")) | |
786 |
|
786 | |||
787 | def test_hooks_created_if_missing(self): |
|
787 | def test_hooks_created_if_missing(self): | |
788 | """ |
|
788 | """ | |
789 | Tests if hooks are installed in repository if they are missing. |
|
789 | Tests if hooks are installed in repository if they are missing. | |
790 | """ |
|
790 | """ | |
791 |
|
791 | |||
792 | for hook, hook_path in self.kallithea_hooks.iteritems(): |
|
792 | for hook, hook_path in self.kallithea_hooks.iteritems(): | |
793 | if os.path.exists(hook_path): |
|
793 | if os.path.exists(hook_path): | |
794 | os.remove(hook_path) |
|
794 | os.remove(hook_path) | |
795 |
|
795 | |||
796 | ScmModel().install_git_hooks(repo=self.repo) |
|
796 | ScmModel().install_git_hooks(repo=self.repo) | |
797 |
|
797 | |||
798 | for hook, hook_path in self.kallithea_hooks.iteritems(): |
|
798 | for hook, hook_path in self.kallithea_hooks.iteritems(): | |
799 | assert os.path.exists(hook_path) |
|
799 | assert os.path.exists(hook_path) | |
800 |
|
800 | |||
801 | def test_kallithea_hooks_updated(self): |
|
801 | def test_kallithea_hooks_updated(self): | |
802 | """ |
|
802 | """ | |
803 | Tests if hooks are updated if they are Kallithea hooks already. |
|
803 | Tests if hooks are updated if they are Kallithea hooks already. | |
804 | """ |
|
804 | """ | |
805 |
|
805 | |||
806 | for hook, hook_path in self.kallithea_hooks.iteritems(): |
|
806 | for hook, hook_path in self.kallithea_hooks.iteritems(): | |
807 | with open(hook_path, "w") as f: |
|
807 | with open(hook_path, "w") as f: | |
808 | f.write("KALLITHEA_HOOK_VER=0.0.0\nJUST_BOGUS") |
|
808 | f.write("KALLITHEA_HOOK_VER=0.0.0\nJUST_BOGUS") | |
809 |
|
809 | |||
810 | ScmModel().install_git_hooks(repo=self.repo) |
|
810 | ScmModel().install_git_hooks(repo=self.repo) | |
811 |
|
811 | |||
812 | for hook, hook_path in self.kallithea_hooks.iteritems(): |
|
812 | for hook, hook_path in self.kallithea_hooks.iteritems(): | |
813 | with open(hook_path) as f: |
|
813 | with open(hook_path) as f: | |
814 | assert "JUST_BOGUS" not in f.read() |
|
814 | assert "JUST_BOGUS" not in f.read() | |
815 |
|
815 | |||
816 | def test_custom_hooks_untouched(self): |
|
816 | def test_custom_hooks_untouched(self): | |
817 | """ |
|
817 | """ | |
818 | Tests if hooks are left untouched if they are not Kallithea hooks. |
|
818 | Tests if hooks are left untouched if they are not Kallithea hooks. | |
819 | """ |
|
819 | """ | |
820 |
|
820 | |||
821 | for hook, hook_path in self.kallithea_hooks.iteritems(): |
|
821 | for hook, hook_path in self.kallithea_hooks.iteritems(): | |
822 | with open(hook_path, "w") as f: |
|
822 | with open(hook_path, "w") as f: | |
823 | f.write("#!/bin/bash\n#CUSTOM_HOOK") |
|
823 | f.write("#!/bin/bash\n#CUSTOM_HOOK") | |
824 |
|
824 | |||
825 | ScmModel().install_git_hooks(repo=self.repo) |
|
825 | ScmModel().install_git_hooks(repo=self.repo) | |
826 |
|
826 | |||
827 | for hook, hook_path in self.kallithea_hooks.iteritems(): |
|
827 | for hook, hook_path in self.kallithea_hooks.iteritems(): | |
828 | with open(hook_path) as f: |
|
828 | with open(hook_path) as f: | |
829 | assert "CUSTOM_HOOK" in f.read() |
|
829 | assert "CUSTOM_HOOK" in f.read() | |
830 |
|
830 | |||
831 | def test_custom_hooks_forced_update(self): |
|
831 | def test_custom_hooks_forced_update(self): | |
832 | """ |
|
832 | """ | |
833 | Tests if hooks are forcefully updated even though they are custom hooks. |
|
833 | Tests if hooks are forcefully updated even though they are custom hooks. | |
834 | """ |
|
834 | """ | |
835 |
|
835 | |||
836 | for hook, hook_path in self.kallithea_hooks.iteritems(): |
|
836 | for hook, hook_path in self.kallithea_hooks.iteritems(): | |
837 | with open(hook_path, "w") as f: |
|
837 | with open(hook_path, "w") as f: | |
838 | f.write("#!/bin/bash\n#CUSTOM_HOOK") |
|
838 | f.write("#!/bin/bash\n#CUSTOM_HOOK") | |
839 |
|
839 | |||
840 | ScmModel().install_git_hooks(repo=self.repo, force_create=True) |
|
840 | ScmModel().install_git_hooks(repo=self.repo, force_create=True) | |
841 |
|
841 | |||
842 | for hook, hook_path in self.kallithea_hooks.iteritems(): |
|
842 | for hook, hook_path in self.kallithea_hooks.iteritems(): | |
843 | with open(hook_path) as f: |
|
843 | with open(hook_path) as f: | |
844 | assert "KALLITHEA_HOOK_VER" in f.read() |
|
844 | assert "KALLITHEA_HOOK_VER" in f.read() |
@@ -1,592 +1,592 b'' | |||||
1 | import os |
|
1 | import os | |
2 |
|
2 | |||
3 | import mock |
|
3 | import mock | |
4 | import pytest |
|
4 | import pytest | |
5 |
|
5 | |||
6 | from kallithea.lib.vcs.backends.hg import MercurialChangeset, MercurialRepository |
|
6 | from kallithea.lib.vcs.backends.hg import MercurialChangeset, MercurialRepository | |
7 | from kallithea.lib.vcs.exceptions import NodeDoesNotExistError, RepositoryError, VCSError |
|
7 | from kallithea.lib.vcs.exceptions import NodeDoesNotExistError, RepositoryError, VCSError | |
8 | from kallithea.lib.vcs.nodes import NodeKind, NodeState |
|
8 | from kallithea.lib.vcs.nodes import NodeKind, NodeState | |
9 | from kallithea.lib.vcs.utils import safe_str |
|
9 | from kallithea.lib.vcs.utils import safe_str | |
10 | from kallithea.tests.vcs.conf import TEST_HG_REPO, TEST_HG_REPO_CLONE, TEST_HG_REPO_PULL, TESTS_TMP_PATH |
|
10 | from kallithea.tests.vcs.conf import TEST_HG_REPO, TEST_HG_REPO_CLONE, TEST_HG_REPO_PULL, TESTS_TMP_PATH | |
11 |
|
11 | |||
12 |
|
12 | |||
13 | class TestMercurialRepository(object): |
|
13 | class TestMercurialRepository(object): | |
14 |
|
14 | |||
15 | def __check_for_existing_repo(self): |
|
15 | def __check_for_existing_repo(self): | |
16 | if os.path.exists(TEST_HG_REPO_CLONE): |
|
16 | if os.path.exists(TEST_HG_REPO_CLONE): | |
17 | pytest.fail('Cannot test mercurial clone repo as location %s already ' |
|
17 | pytest.fail('Cannot test mercurial clone repo as location %s already ' | |
18 | 'exists. You should manually remove it first.' |
|
18 | 'exists. You should manually remove it first.' | |
19 | % TEST_HG_REPO_CLONE) |
|
19 | % TEST_HG_REPO_CLONE) | |
20 |
|
20 | |||
21 | def setup_method(self): |
|
21 | def setup_method(self): | |
22 | self.repo = MercurialRepository(safe_str(TEST_HG_REPO)) |
|
22 | self.repo = MercurialRepository(safe_str(TEST_HG_REPO)) | |
23 |
|
23 | |||
24 | def test_wrong_repo_path(self): |
|
24 | def test_wrong_repo_path(self): | |
25 | wrong_repo_path = os.path.join(TESTS_TMP_PATH, 'errorrepo') |
|
25 | wrong_repo_path = os.path.join(TESTS_TMP_PATH, 'errorrepo') | |
26 | with pytest.raises(RepositoryError): |
|
26 | with pytest.raises(RepositoryError): | |
27 | MercurialRepository(wrong_repo_path) |
|
27 | MercurialRepository(wrong_repo_path) | |
28 |
|
28 | |||
29 | def test_unicode_path_repo(self): |
|
29 | def test_unicode_path_repo(self): | |
30 | with pytest.raises(VCSError): |
|
30 | with pytest.raises(VCSError): | |
31 | MercurialRepository(u'iShouldFail') |
|
31 | MercurialRepository(u'iShouldFail') | |
32 |
|
32 | |||
33 | def test_repo_clone(self): |
|
33 | def test_repo_clone(self): | |
34 | self.__check_for_existing_repo() |
|
34 | self.__check_for_existing_repo() | |
35 | repo = MercurialRepository(safe_str(TEST_HG_REPO)) |
|
35 | repo = MercurialRepository(safe_str(TEST_HG_REPO)) | |
36 | repo_clone = MercurialRepository(TEST_HG_REPO_CLONE, |
|
36 | repo_clone = MercurialRepository(TEST_HG_REPO_CLONE, | |
37 | src_url=TEST_HG_REPO, update_after_clone=True) |
|
37 | src_url=TEST_HG_REPO, update_after_clone=True) | |
38 | assert len(repo.revisions) == len(repo_clone.revisions) |
|
38 | assert len(repo.revisions) == len(repo_clone.revisions) | |
39 | # Checking hashes of changesets should be enough |
|
39 | # Checking hashes of changesets should be enough | |
40 | for changeset in repo.get_changesets(): |
|
40 | for changeset in repo.get_changesets(): | |
41 | raw_id = changeset.raw_id |
|
41 | raw_id = changeset.raw_id | |
42 | assert raw_id == repo_clone.get_changeset(raw_id).raw_id |
|
42 | assert raw_id == repo_clone.get_changeset(raw_id).raw_id | |
43 |
|
43 | |||
44 | def test_repo_clone_with_update(self): |
|
44 | def test_repo_clone_with_update(self): | |
45 | repo = MercurialRepository(safe_str(TEST_HG_REPO)) |
|
45 | repo = MercurialRepository(safe_str(TEST_HG_REPO)) | |
46 | repo_clone = MercurialRepository(TEST_HG_REPO_CLONE + '_w_update', |
|
46 | repo_clone = MercurialRepository(TEST_HG_REPO_CLONE + '_w_update', | |
47 | src_url=TEST_HG_REPO, update_after_clone=True) |
|
47 | src_url=TEST_HG_REPO, update_after_clone=True) | |
48 | assert len(repo.revisions) == len(repo_clone.revisions) |
|
48 | assert len(repo.revisions) == len(repo_clone.revisions) | |
49 |
|
49 | |||
50 | # check if current workdir was updated |
|
50 | # check if current workdir was updated | |
51 | assert os.path.isfile( |
|
51 | assert os.path.isfile( | |
52 | os.path.join( |
|
52 | os.path.join( | |
53 | TEST_HG_REPO_CLONE + '_w_update', 'MANIFEST.in' |
|
53 | TEST_HG_REPO_CLONE + '_w_update', 'MANIFEST.in' | |
54 | ) |
|
54 | ) | |
55 | ) |
|
55 | ) | |
56 |
|
56 | |||
57 | def test_repo_clone_without_update(self): |
|
57 | def test_repo_clone_without_update(self): | |
58 | repo = MercurialRepository(safe_str(TEST_HG_REPO)) |
|
58 | repo = MercurialRepository(safe_str(TEST_HG_REPO)) | |
59 | repo_clone = MercurialRepository(TEST_HG_REPO_CLONE + '_wo_update', |
|
59 | repo_clone = MercurialRepository(TEST_HG_REPO_CLONE + '_wo_update', | |
60 | src_url=TEST_HG_REPO, update_after_clone=False) |
|
60 | src_url=TEST_HG_REPO, update_after_clone=False) | |
61 | assert len(repo.revisions) == len(repo_clone.revisions) |
|
61 | assert len(repo.revisions) == len(repo_clone.revisions) | |
62 | assert not os.path.isfile( |
|
62 | assert not os.path.isfile( | |
63 | os.path.join( |
|
63 | os.path.join( | |
64 | TEST_HG_REPO_CLONE + '_wo_update', 'MANIFEST.in' |
|
64 | TEST_HG_REPO_CLONE + '_wo_update', 'MANIFEST.in' | |
65 | ) |
|
65 | ) | |
66 | ) |
|
66 | ) | |
67 |
|
67 | |||
68 | def test_pull(self): |
|
68 | def test_pull(self): | |
69 | if os.path.exists(TEST_HG_REPO_PULL): |
|
69 | if os.path.exists(TEST_HG_REPO_PULL): | |
70 | pytest.fail('Cannot test mercurial pull command as location %s ' |
|
70 | pytest.fail('Cannot test mercurial pull command as location %s ' | |
71 | 'already exists. You should manually remove it first' |
|
71 | 'already exists. You should manually remove it first' | |
72 | % TEST_HG_REPO_PULL) |
|
72 | % TEST_HG_REPO_PULL) | |
73 | repo_new = MercurialRepository(TEST_HG_REPO_PULL, create=True) |
|
73 | repo_new = MercurialRepository(TEST_HG_REPO_PULL, create=True) | |
74 | assert len(self.repo.revisions) > len(repo_new.revisions) |
|
74 | assert len(self.repo.revisions) > len(repo_new.revisions) | |
75 |
|
75 | |||
76 | repo_new.pull(self.repo.path) |
|
76 | repo_new.pull(self.repo.path) | |
77 | repo_new = MercurialRepository(TEST_HG_REPO_PULL) |
|
77 | repo_new = MercurialRepository(TEST_HG_REPO_PULL) | |
78 | assert len(self.repo.revisions) == len(repo_new.revisions) |
|
78 | assert len(self.repo.revisions) == len(repo_new.revisions) | |
79 |
|
79 | |||
80 | def test_revisions(self): |
|
80 | def test_revisions(self): | |
81 | # there are 21 revisions at bitbucket now |
|
81 | # there are 21 revisions at bitbucket now | |
82 | # so we can assume they would be available from now on |
|
82 | # so we can assume they would be available from now on | |
83 | subset = set(['b986218ba1c9b0d6a259fac9b050b1724ed8e545', |
|
83 | subset = set(['b986218ba1c9b0d6a259fac9b050b1724ed8e545', | |
84 | '3d8f361e72ab303da48d799ff1ac40d5ac37c67e', |
|
84 | '3d8f361e72ab303da48d799ff1ac40d5ac37c67e', | |
85 | '6cba7170863a2411822803fa77a0a264f1310b35', |
|
85 | '6cba7170863a2411822803fa77a0a264f1310b35', | |
86 | '56349e29c2af3ac913b28bde9a2c6154436e615b', |
|
86 | '56349e29c2af3ac913b28bde9a2c6154436e615b', | |
87 | '2dda4e345facb0ccff1a191052dd1606dba6781d', |
|
87 | '2dda4e345facb0ccff1a191052dd1606dba6781d', | |
88 | '6fff84722075f1607a30f436523403845f84cd9e', |
|
88 | '6fff84722075f1607a30f436523403845f84cd9e', | |
89 | '7d4bc8ec6be56c0f10425afb40b6fc315a4c25e7', |
|
89 | '7d4bc8ec6be56c0f10425afb40b6fc315a4c25e7', | |
90 | '3803844fdbd3b711175fc3da9bdacfcd6d29a6fb', |
|
90 | '3803844fdbd3b711175fc3da9bdacfcd6d29a6fb', | |
91 | 'dc5d2c0661b61928834a785d3e64a3f80d3aad9c', |
|
91 | 'dc5d2c0661b61928834a785d3e64a3f80d3aad9c', | |
92 | 'be90031137367893f1c406e0a8683010fd115b79', |
|
92 | 'be90031137367893f1c406e0a8683010fd115b79', | |
93 | 'db8e58be770518cbb2b1cdfa69146e47cd481481', |
|
93 | 'db8e58be770518cbb2b1cdfa69146e47cd481481', | |
94 | '84478366594b424af694a6c784cb991a16b87c21', |
|
94 | '84478366594b424af694a6c784cb991a16b87c21', | |
95 | '17f8e105dddb9f339600389c6dc7175d395a535c', |
|
95 | '17f8e105dddb9f339600389c6dc7175d395a535c', | |
96 | '20a662e756499bde3095ffc9bc0643d1def2d0eb', |
|
96 | '20a662e756499bde3095ffc9bc0643d1def2d0eb', | |
97 | '2e319b85e70a707bba0beff866d9f9de032aa4f9', |
|
97 | '2e319b85e70a707bba0beff866d9f9de032aa4f9', | |
98 | '786facd2c61deb9cf91e9534735124fb8fc11842', |
|
98 | '786facd2c61deb9cf91e9534735124fb8fc11842', | |
99 | '94593d2128d38210a2fcd1aabff6dda0d6d9edf8', |
|
99 | '94593d2128d38210a2fcd1aabff6dda0d6d9edf8', | |
100 | 'aa6a0de05b7612707db567078e130a6cd114a9a7', |
|
100 | 'aa6a0de05b7612707db567078e130a6cd114a9a7', | |
101 | 'eada5a770da98ab0dd7325e29d00e0714f228d09' |
|
101 | 'eada5a770da98ab0dd7325e29d00e0714f228d09' | |
102 | ]) |
|
102 | ]) | |
103 | assert subset.issubset(set(self.repo.revisions)) |
|
103 | assert subset.issubset(set(self.repo.revisions)) | |
104 |
|
104 | |||
105 | # check if we have the proper order of revisions |
|
105 | # check if we have the proper order of revisions | |
106 | org = ['b986218ba1c9b0d6a259fac9b050b1724ed8e545', |
|
106 | org = ['b986218ba1c9b0d6a259fac9b050b1724ed8e545', | |
107 | '3d8f361e72ab303da48d799ff1ac40d5ac37c67e', |
|
107 | '3d8f361e72ab303da48d799ff1ac40d5ac37c67e', | |
108 | '6cba7170863a2411822803fa77a0a264f1310b35', |
|
108 | '6cba7170863a2411822803fa77a0a264f1310b35', | |
109 | '56349e29c2af3ac913b28bde9a2c6154436e615b', |
|
109 | '56349e29c2af3ac913b28bde9a2c6154436e615b', | |
110 | '2dda4e345facb0ccff1a191052dd1606dba6781d', |
|
110 | '2dda4e345facb0ccff1a191052dd1606dba6781d', | |
111 | '6fff84722075f1607a30f436523403845f84cd9e', |
|
111 | '6fff84722075f1607a30f436523403845f84cd9e', | |
112 | '7d4bc8ec6be56c0f10425afb40b6fc315a4c25e7', |
|
112 | '7d4bc8ec6be56c0f10425afb40b6fc315a4c25e7', | |
113 | '3803844fdbd3b711175fc3da9bdacfcd6d29a6fb', |
|
113 | '3803844fdbd3b711175fc3da9bdacfcd6d29a6fb', | |
114 | 'dc5d2c0661b61928834a785d3e64a3f80d3aad9c', |
|
114 | 'dc5d2c0661b61928834a785d3e64a3f80d3aad9c', | |
115 | 'be90031137367893f1c406e0a8683010fd115b79', |
|
115 | 'be90031137367893f1c406e0a8683010fd115b79', | |
116 | 'db8e58be770518cbb2b1cdfa69146e47cd481481', |
|
116 | 'db8e58be770518cbb2b1cdfa69146e47cd481481', | |
117 | '84478366594b424af694a6c784cb991a16b87c21', |
|
117 | '84478366594b424af694a6c784cb991a16b87c21', | |
118 | '17f8e105dddb9f339600389c6dc7175d395a535c', |
|
118 | '17f8e105dddb9f339600389c6dc7175d395a535c', | |
119 | '20a662e756499bde3095ffc9bc0643d1def2d0eb', |
|
119 | '20a662e756499bde3095ffc9bc0643d1def2d0eb', | |
120 | '2e319b85e70a707bba0beff866d9f9de032aa4f9', |
|
120 | '2e319b85e70a707bba0beff866d9f9de032aa4f9', | |
121 | '786facd2c61deb9cf91e9534735124fb8fc11842', |
|
121 | '786facd2c61deb9cf91e9534735124fb8fc11842', | |
122 | '94593d2128d38210a2fcd1aabff6dda0d6d9edf8', |
|
122 | '94593d2128d38210a2fcd1aabff6dda0d6d9edf8', | |
123 | 'aa6a0de05b7612707db567078e130a6cd114a9a7', |
|
123 | 'aa6a0de05b7612707db567078e130a6cd114a9a7', | |
124 | 'eada5a770da98ab0dd7325e29d00e0714f228d09', |
|
124 | 'eada5a770da98ab0dd7325e29d00e0714f228d09', | |
125 | '2c1885c735575ca478bf9e17b0029dca68824458', |
|
125 | '2c1885c735575ca478bf9e17b0029dca68824458', | |
126 | 'd9bcd465040bf869799b09ad732c04e0eea99fe9', |
|
126 | 'd9bcd465040bf869799b09ad732c04e0eea99fe9', | |
127 | '469e9c847fe1f6f7a697b8b25b4bc5b48780c1a7', |
|
127 | '469e9c847fe1f6f7a697b8b25b4bc5b48780c1a7', | |
128 | '4fb8326d78e5120da2c7468dcf7098997be385da', |
|
128 | '4fb8326d78e5120da2c7468dcf7098997be385da', | |
129 | '62b4a097164940bd66030c4db51687f3ec035eed', |
|
129 | '62b4a097164940bd66030c4db51687f3ec035eed', | |
130 | '536c1a19428381cfea92ac44985304f6a8049569', |
|
130 | '536c1a19428381cfea92ac44985304f6a8049569', | |
131 | '965e8ab3c44b070cdaa5bf727ddef0ada980ecc4', |
|
131 | '965e8ab3c44b070cdaa5bf727ddef0ada980ecc4', | |
132 | '9bb326a04ae5d98d437dece54be04f830cf1edd9', |
|
132 | '9bb326a04ae5d98d437dece54be04f830cf1edd9', | |
133 | 'f8940bcb890a98c4702319fbe36db75ea309b475', |
|
133 | 'f8940bcb890a98c4702319fbe36db75ea309b475', | |
134 | 'ff5ab059786ebc7411e559a2cc309dfae3625a3b', |
|
134 | 'ff5ab059786ebc7411e559a2cc309dfae3625a3b', | |
135 | '6b6ad5f82ad5bb6190037671bd254bd4e1f4bf08', |
|
135 | '6b6ad5f82ad5bb6190037671bd254bd4e1f4bf08', | |
136 | 'ee87846a61c12153b51543bf860e1026c6d3dcba', ] |
|
136 | 'ee87846a61c12153b51543bf860e1026c6d3dcba', ] | |
137 | assert org == self.repo.revisions[:31] |
|
137 | assert org == self.repo.revisions[:31] | |
138 |
|
138 | |||
139 | def test_iter_slice(self): |
|
139 | def test_iter_slice(self): | |
140 | sliced = list(self.repo[:10]) |
|
140 | sliced = list(self.repo[:10]) | |
141 | itered = list(self.repo)[:10] |
|
141 | itered = list(self.repo)[:10] | |
142 | assert sliced == itered |
|
142 | assert sliced == itered | |
143 |
|
143 | |||
144 | def test_slicing(self): |
|
144 | def test_slicing(self): | |
145 | # 4 1 5 10 95 |
|
145 | # 4 1 5 10 95 | |
146 | for sfrom, sto, size in [(0, 4, 4), (1, 2, 1), (10, 15, 5), |
|
146 | for sfrom, sto, size in [(0, 4, 4), (1, 2, 1), (10, 15, 5), | |
147 | (10, 20, 10), (5, 100, 95)]: |
|
147 | (10, 20, 10), (5, 100, 95)]: | |
148 | revs = list(self.repo[sfrom:sto]) |
|
148 | revs = list(self.repo[sfrom:sto]) | |
149 | assert len(revs) == size |
|
149 | assert len(revs) == size | |
150 | assert revs[0] == self.repo.get_changeset(sfrom) |
|
150 | assert revs[0] == self.repo.get_changeset(sfrom) | |
151 | assert revs[-1] == self.repo.get_changeset(sto - 1) |
|
151 | assert revs[-1] == self.repo.get_changeset(sto - 1) | |
152 |
|
152 | |||
153 | def test_branches(self): |
|
153 | def test_branches(self): | |
154 | # TODO: Need more tests here |
|
154 | # TODO: Need more tests here | |
155 |
|
155 | |||
156 | # active branches |
|
156 | # active branches | |
157 | assert 'default' in self.repo.branches |
|
157 | assert 'default' in self.repo.branches | |
158 | assert 'stable' in self.repo.branches |
|
158 | assert 'stable' in self.repo.branches | |
159 |
|
159 | |||
160 | # closed |
|
160 | # closed | |
161 | assert 'git' in self.repo._get_branches(closed=True) |
|
161 | assert 'git' in self.repo._get_branches(closed=True) | |
162 | assert 'web' in self.repo._get_branches(closed=True) |
|
162 | assert 'web' in self.repo._get_branches(closed=True) | |
163 |
|
163 | |||
164 | for name, id in self.repo.branches.items(): |
|
164 | for name, id in self.repo.branches.items(): | |
165 | assert isinstance(self.repo.get_changeset(id), MercurialChangeset) |
|
165 | assert isinstance(self.repo.get_changeset(id), MercurialChangeset) | |
166 |
|
166 | |||
167 | def test_tip_in_tags(self): |
|
167 | def test_tip_in_tags(self): | |
168 | # tip is always a tag |
|
168 | # tip is always a tag | |
169 | assert 'tip' in self.repo.tags |
|
169 | assert 'tip' in self.repo.tags | |
170 |
|
170 | |||
171 | def test_tip_changeset_in_tags(self): |
|
171 | def test_tip_changeset_in_tags(self): | |
172 | tip = self.repo.get_changeset() |
|
172 | tip = self.repo.get_changeset() | |
173 | assert self.repo.tags['tip'] == tip.raw_id |
|
173 | assert self.repo.tags['tip'] == tip.raw_id | |
174 |
|
174 | |||
175 | def test_initial_changeset(self): |
|
175 | def test_initial_changeset(self): | |
176 |
|
176 | |||
177 | init_chset = self.repo.get_changeset(0) |
|
177 | init_chset = self.repo.get_changeset(0) | |
178 | assert init_chset.message == 'initial import' |
|
178 | assert init_chset.message == 'initial import' | |
179 | assert init_chset.author == 'Marcin Kuzminski <marcin@python-blog.com>' |
|
179 | assert init_chset.author == 'Marcin Kuzminski <marcin@python-blog.com>' | |
180 | assert sorted(init_chset._file_paths) == sorted([ |
|
180 | assert sorted(init_chset._file_paths) == sorted([ | |
181 | 'vcs/__init__.py', |
|
181 | 'vcs/__init__.py', | |
182 | 'vcs/backends/BaseRepository.py', |
|
182 | 'vcs/backends/BaseRepository.py', | |
183 | 'vcs/backends/__init__.py', |
|
183 | 'vcs/backends/__init__.py', | |
184 | ]) |
|
184 | ]) | |
185 |
|
185 | |||
186 | assert sorted(init_chset._dir_paths) == sorted(['', 'vcs', 'vcs/backends']) |
|
186 | assert sorted(init_chset._dir_paths) == sorted(['', 'vcs', 'vcs/backends']) | |
187 |
|
187 | |||
188 | with pytest.raises(NodeDoesNotExistError): |
|
188 | with pytest.raises(NodeDoesNotExistError): | |
189 | init_chset.get_node(path='foobar') |
|
189 | init_chset.get_node(path='foobar') | |
190 |
|
190 | |||
191 | node = init_chset.get_node('vcs/') |
|
191 | node = init_chset.get_node('vcs/') | |
192 | assert hasattr(node, 'kind') |
|
192 | assert hasattr(node, 'kind') | |
193 | assert node.kind == NodeKind.DIR |
|
193 | assert node.kind == NodeKind.DIR | |
194 |
|
194 | |||
195 | node = init_chset.get_node('vcs') |
|
195 | node = init_chset.get_node('vcs') | |
196 | assert hasattr(node, 'kind') |
|
196 | assert hasattr(node, 'kind') | |
197 | assert node.kind == NodeKind.DIR |
|
197 | assert node.kind == NodeKind.DIR | |
198 |
|
198 | |||
199 | node = init_chset.get_node('vcs/__init__.py') |
|
199 | node = init_chset.get_node('vcs/__init__.py') | |
200 | assert hasattr(node, 'kind') |
|
200 | assert hasattr(node, 'kind') | |
201 | assert node.kind == NodeKind.FILE |
|
201 | assert node.kind == NodeKind.FILE | |
202 |
|
202 | |||
203 | def test_not_existing_changeset(self): |
|
203 | def test_not_existing_changeset(self): | |
204 | # rawid |
|
204 | # rawid | |
205 | with pytest.raises(RepositoryError): |
|
205 | with pytest.raises(RepositoryError): | |
206 | self.repo.get_changeset('abcd' * 10) |
|
206 | self.repo.get_changeset('abcd' * 10) | |
207 | # shortid |
|
207 | # shortid | |
208 | with pytest.raises(RepositoryError): |
|
208 | with pytest.raises(RepositoryError): | |
209 | self.repo.get_changeset('erro' * 4) |
|
209 | self.repo.get_changeset('erro' * 4) | |
210 | # numeric |
|
210 | # numeric | |
211 | with pytest.raises(RepositoryError): |
|
211 | with pytest.raises(RepositoryError): | |
212 | self.repo.get_changeset(self.repo.count() + 1) |
|
212 | self.repo.get_changeset(self.repo.count() + 1) | |
213 |
|
213 | |||
214 | # Small chance we ever get to this one |
|
214 | # Small chance we ever get to this one | |
215 | revision = pow(2, 30) |
|
215 | revision = pow(2, 30) | |
216 | with pytest.raises(RepositoryError): |
|
216 | with pytest.raises(RepositoryError): | |
217 | self.repo.get_changeset(revision) |
|
217 | self.repo.get_changeset(revision) | |
218 |
|
218 | |||
219 | def test_changeset10(self): |
|
219 | def test_changeset10(self): | |
220 |
|
220 | |||
221 | chset10 = self.repo.get_changeset(10) |
|
221 | chset10 = self.repo.get_changeset(10) | |
222 | readme = """=== |
|
222 | readme = """=== | |
223 | VCS |
|
223 | VCS | |
224 | === |
|
224 | === | |
225 |
|
225 | |||
226 | Various Version Control System management abstraction layer for Python. |
|
226 | Various Version Control System management abstraction layer for Python. | |
227 |
|
227 | |||
228 | Introduction |
|
228 | Introduction | |
229 | ------------ |
|
229 | ------------ | |
230 |
|
230 | |||
231 | TODO: To be written... |
|
231 | TODO: To be written... | |
232 |
|
232 | |||
233 | """ |
|
233 | """ | |
234 | node = chset10.get_node('README.rst') |
|
234 | node = chset10.get_node('README.rst') | |
235 | assert node.kind == NodeKind.FILE |
|
235 | assert node.kind == NodeKind.FILE | |
236 | assert node.content == readme |
|
236 | assert node.content == readme | |
237 |
|
237 | |||
238 | @mock.patch('kallithea.lib.vcs.backends.hg.repository.diffopts') |
|
238 | @mock.patch('kallithea.lib.vcs.backends.hg.repository.diffopts') | |
239 | def test_get_diff_does_not_sanitize_zero_context(self, mock_diffopts): |
|
239 | def test_get_diff_does_not_sanitize_zero_context(self, mock_diffopts): | |
240 | zero_context = 0 |
|
240 | zero_context = 0 | |
241 |
|
241 | |||
242 | self.repo.get_diff(0, 1, 'foo', context=zero_context) |
|
242 | self.repo.get_diff(0, 1, 'foo', context=zero_context) | |
243 |
|
243 | |||
244 | mock_diffopts.assert_called_once_with(git=True, showfunc=True, ignorews=False, context=zero_context) |
|
244 | mock_diffopts.assert_called_once_with(git=True, showfunc=True, ignorews=False, context=zero_context) | |
245 |
|
245 | |||
246 | @mock.patch('kallithea.lib.vcs.backends.hg.repository.diffopts') |
|
246 | @mock.patch('kallithea.lib.vcs.backends.hg.repository.diffopts') | |
247 | def test_get_diff_sanitizes_negative_context(self, mock_diffopts): |
|
247 | def test_get_diff_sanitizes_negative_context(self, mock_diffopts): | |
248 | negative_context = -10 |
|
248 | negative_context = -10 | |
249 | zero_context = 0 |
|
249 | zero_context = 0 | |
250 |
|
250 | |||
251 | self.repo.get_diff(0, 1, 'foo', context=negative_context) |
|
251 | self.repo.get_diff(0, 1, 'foo', context=negative_context) | |
252 |
|
252 | |||
253 | mock_diffopts.assert_called_once_with(git=True, showfunc=True, ignorews=False, context=zero_context) |
|
253 | mock_diffopts.assert_called_once_with(git=True, showfunc=True, ignorews=False, context=zero_context) | |
254 |
|
254 | |||
255 |
|
255 | |||
256 | class TestMercurialChangeset(object): |
|
256 | class TestMercurialChangeset(object): | |
257 |
|
257 | |||
258 | def setup_method(self): |
|
258 | def setup_method(self): | |
259 | self.repo = MercurialRepository(safe_str(TEST_HG_REPO)) |
|
259 | self.repo = MercurialRepository(safe_str(TEST_HG_REPO)) | |
260 |
|
260 | |||
261 | def _test_equality(self, changeset): |
|
261 | def _test_equality(self, changeset): | |
262 | revision = changeset.revision |
|
262 | revision = changeset.revision | |
263 | assert changeset == self.repo.get_changeset(revision) |
|
263 | assert changeset == self.repo.get_changeset(revision) | |
264 |
|
264 | |||
265 | def test_equality(self): |
|
265 | def test_equality(self): | |
266 | revs = [0, 10, 20] |
|
266 | revs = [0, 10, 20] | |
267 | changesets = [self.repo.get_changeset(rev) for rev in revs] |
|
267 | changesets = [self.repo.get_changeset(rev) for rev in revs] | |
268 | for changeset in changesets: |
|
268 | for changeset in changesets: | |
269 | self._test_equality(changeset) |
|
269 | self._test_equality(changeset) | |
270 |
|
270 | |||
271 | def test_default_changeset(self): |
|
271 | def test_default_changeset(self): | |
272 | tip = self.repo.get_changeset('tip') |
|
272 | tip = self.repo.get_changeset('tip') | |
273 | assert tip == self.repo.get_changeset() |
|
273 | assert tip == self.repo.get_changeset() | |
274 | assert tip == self.repo.get_changeset(revision=None) |
|
274 | assert tip == self.repo.get_changeset(revision=None) | |
275 | assert tip == list(self.repo[-1:])[0] |
|
275 | assert tip == list(self.repo[-1:])[0] | |
276 |
|
276 | |||
277 | def test_root_node(self): |
|
277 | def test_root_node(self): | |
278 | tip = self.repo.get_changeset('tip') |
|
278 | tip = self.repo.get_changeset('tip') | |
279 | assert tip.root is tip.get_node('') |
|
279 | assert tip.root is tip.get_node('') | |
280 |
|
280 | |||
281 | def test_lazy_fetch(self): |
|
281 | def test_lazy_fetch(self): | |
282 | """ |
|
282 | """ | |
283 | Test if changeset's nodes expands and are cached as we walk through |
|
283 | Test if changeset's nodes expands and are cached as we walk through | |
284 | the revision. This test is somewhat hard to write as order of tests |
|
284 | the revision. This test is somewhat hard to write as order of tests | |
285 | is a key here. Written by running command after command in a shell. |
|
285 | is a key here. Written by running command after command in a shell. | |
286 | """ |
|
286 | """ | |
287 | chset = self.repo.get_changeset(45) |
|
287 | chset = self.repo.get_changeset(45) | |
288 | assert len(chset.nodes) == 0 |
|
288 | assert len(chset.nodes) == 0 | |
289 | root = chset.root |
|
289 | root = chset.root | |
290 | assert len(chset.nodes) == 1 |
|
290 | assert len(chset.nodes) == 1 | |
291 | assert len(root.nodes) == 8 |
|
291 | assert len(root.nodes) == 8 | |
292 | # accessing root.nodes updates chset.nodes |
|
292 | # accessing root.nodes updates chset.nodes | |
293 | assert len(chset.nodes) == 9 |
|
293 | assert len(chset.nodes) == 9 | |
294 |
|
294 | |||
295 | docs = root.get_node('docs') |
|
295 | docs = root.get_node('docs') | |
296 | # we haven't yet accessed anything new as docs dir was already cached |
|
296 | # we haven't yet accessed anything new as docs dir was already cached | |
297 | assert len(chset.nodes) == 9 |
|
297 | assert len(chset.nodes) == 9 | |
298 | assert len(docs.nodes) == 8 |
|
298 | assert len(docs.nodes) == 8 | |
299 | # accessing docs.nodes updates chset.nodes |
|
299 | # accessing docs.nodes updates chset.nodes | |
300 | assert len(chset.nodes) == 17 |
|
300 | assert len(chset.nodes) == 17 | |
301 |
|
301 | |||
302 | assert docs is chset.get_node('docs') |
|
302 | assert docs is chset.get_node('docs') | |
303 | assert docs is root.nodes[0] |
|
303 | assert docs is root.nodes[0] | |
304 | assert docs is root.dirs[0] |
|
304 | assert docs is root.dirs[0] | |
305 | assert docs is chset.get_node('docs') |
|
305 | assert docs is chset.get_node('docs') | |
306 |
|
306 | |||
307 | def test_nodes_with_changeset(self): |
|
307 | def test_nodes_with_changeset(self): | |
308 | chset = self.repo.get_changeset(45) |
|
308 | chset = self.repo.get_changeset(45) | |
309 | root = chset.root |
|
309 | root = chset.root | |
310 | docs = root.get_node('docs') |
|
310 | docs = root.get_node('docs') | |
311 | assert docs is chset.get_node('docs') |
|
311 | assert docs is chset.get_node('docs') | |
312 | api = docs.get_node('api') |
|
312 | api = docs.get_node('api') | |
313 | assert api is chset.get_node('docs/api') |
|
313 | assert api is chset.get_node('docs/api') | |
314 | index = api.get_node('index.rst') |
|
314 | index = api.get_node('index.rst') | |
315 | assert index is chset.get_node('docs/api/index.rst') |
|
315 | assert index is chset.get_node('docs/api/index.rst') | |
316 | assert index is chset.get_node('docs').get_node('api').get_node('index.rst') |
|
316 | assert index is chset.get_node('docs').get_node('api').get_node('index.rst') | |
317 |
|
317 | |||
318 | def test_branch_and_tags(self): |
|
318 | def test_branch_and_tags(self): | |
319 | chset0 = self.repo.get_changeset(0) |
|
319 | chset0 = self.repo.get_changeset(0) | |
320 | assert chset0.branch == 'default' |
|
320 | assert chset0.branch == 'default' | |
321 | assert chset0.branches == ['default'] |
|
321 | assert chset0.branches == ['default'] | |
322 | assert chset0.tags == [] |
|
322 | assert chset0.tags == [] | |
323 |
|
323 | |||
324 | chset10 = self.repo.get_changeset(10) |
|
324 | chset10 = self.repo.get_changeset(10) | |
325 | assert chset10.branch == 'default' |
|
325 | assert chset10.branch == 'default' | |
326 | assert chset10.branches == ['default'] |
|
326 | assert chset10.branches == ['default'] | |
327 | assert chset10.tags == [] |
|
327 | assert chset10.tags == [] | |
328 |
|
328 | |||
329 | chset44 = self.repo.get_changeset(44) |
|
329 | chset44 = self.repo.get_changeset(44) | |
330 | assert chset44.branch == 'web' |
|
330 | assert chset44.branch == 'web' | |
331 | assert chset44.branches == ['web'] |
|
331 | assert chset44.branches == ['web'] | |
332 |
|
332 | |||
333 | tip = self.repo.get_changeset('tip') |
|
333 | tip = self.repo.get_changeset('tip') | |
334 | assert 'tip' in tip.tags |
|
334 | assert 'tip' in tip.tags | |
335 |
|
335 | |||
336 | def _test_file_size(self, revision, path, size): |
|
336 | def _test_file_size(self, revision, path, size): | |
337 | node = self.repo.get_changeset(revision).get_node(path) |
|
337 | node = self.repo.get_changeset(revision).get_node(path) | |
338 | assert node.is_file() |
|
338 | assert node.is_file() | |
339 | assert node.size == size |
|
339 | assert node.size == size | |
340 |
|
340 | |||
341 | def test_file_size(self): |
|
341 | def test_file_size(self): | |
342 | to_check = ( |
|
342 | to_check = ( | |
343 | (10, 'setup.py', 1068), |
|
343 | (10, 'setup.py', 1068), | |
344 | (20, 'setup.py', 1106), |
|
344 | (20, 'setup.py', 1106), | |
345 | (60, 'setup.py', 1074), |
|
345 | (60, 'setup.py', 1074), | |
346 |
|
346 | |||
347 | (10, 'vcs/backends/base.py', 2921), |
|
347 | (10, 'vcs/backends/base.py', 2921), | |
348 | (20, 'vcs/backends/base.py', 3936), |
|
348 | (20, 'vcs/backends/base.py', 3936), | |
349 | (60, 'vcs/backends/base.py', 6189), |
|
349 | (60, 'vcs/backends/base.py', 6189), | |
350 | ) |
|
350 | ) | |
351 | for revision, path, size in to_check: |
|
351 | for revision, path, size in to_check: | |
352 | self._test_file_size(revision, path, size) |
|
352 | self._test_file_size(revision, path, size) | |
353 |
|
353 | |||
354 | def _test_dir_size(self, revision, path, size): |
|
354 | def _test_dir_size(self, revision, path, size): | |
355 | node = self.repo.get_changeset(revision).get_node(path) |
|
355 | node = self.repo.get_changeset(revision).get_node(path) | |
356 | assert not node.is_file() |
|
356 | assert not node.is_file() | |
357 | assert node.size == size |
|
357 | assert node.size == size | |
358 |
|
358 | |||
359 | def test_dir_size(self): |
|
359 | def test_dir_size(self): | |
360 | to_check = ( |
|
360 | to_check = ( | |
361 | ('96507bd11ecc', '/', 682421), |
|
361 | ('96507bd11ecc', '/', 682421), | |
362 | ('a53d9201d4bc', '/', 682410), |
|
362 | ('a53d9201d4bc', '/', 682410), | |
363 | ('90243de06161', '/', 682006), |
|
363 | ('90243de06161', '/', 682006), | |
364 | ) |
|
364 | ) | |
365 | for revision, path, size in to_check: |
|
365 | for revision, path, size in to_check: | |
366 | self._test_dir_size(revision, path, size) |
|
366 | self._test_dir_size(revision, path, size) | |
367 |
|
367 | |||
368 | def test_repo_size(self): |
|
368 | def test_repo_size(self): | |
369 | assert self.repo.size == 682421 |
|
369 | assert self.repo.size == 682421 | |
370 |
|
370 | |||
371 | def test_file_history(self): |
|
371 | def test_file_history(self): | |
372 | # we can only check if those revisions are present in the history |
|
372 | # we can only check if those revisions are present in the history | |
373 | # as we cannot update this test every time file is changed |
|
373 | # as we cannot update this test every time file is changed | |
374 | files = { |
|
374 | files = { | |
375 | 'setup.py': [7, 18, 45, 46, 47, 69, 77], |
|
375 | 'setup.py': [7, 18, 45, 46, 47, 69, 77], | |
376 | 'vcs/nodes.py': [7, 8, 24, 26, 30, 45, 47, 49, 56, 57, 58, 59, 60, |
|
376 | 'vcs/nodes.py': [7, 8, 24, 26, 30, 45, 47, 49, 56, 57, 58, 59, 60, | |
377 | 61, 73, 76], |
|
377 | 61, 73, 76], | |
378 | 'vcs/backends/hg.py': [4, 5, 6, 11, 12, 13, 14, 15, 16, 21, 22, 23, |
|
378 | 'vcs/backends/hg.py': [4, 5, 6, 11, 12, 13, 14, 15, 16, 21, 22, 23, | |
379 | 26, 27, 28, 30, 31, 33, 35, 36, 37, 38, 39, 40, 41, 44, 45, 47, |
|
379 | 26, 27, 28, 30, 31, 33, 35, 36, 37, 38, 39, 40, 41, 44, 45, 47, | |
380 | 48, 49, 53, 54, 55, 58, 60, 61, 67, 68, 69, 70, 73, 77, 78, 79, |
|
380 | 48, 49, 53, 54, 55, 58, 60, 61, 67, 68, 69, 70, 73, 77, 78, 79, | |
381 | 82], |
|
381 | 82], | |
382 | } |
|
382 | } | |
383 | for path, revs in files.items(): |
|
383 | for path, revs in files.items(): | |
384 | tip = self.repo.get_changeset(revs[-1]) |
|
384 | tip = self.repo.get_changeset(revs[-1]) | |
385 | node = tip.get_node(path) |
|
385 | node = tip.get_node(path) | |
386 | node_revs = [chset.revision for chset in node.history] |
|
386 | node_revs = [chset.revision for chset in node.history] | |
387 | assert set(revs).issubset(set(node_revs)), \ |
|
387 | assert set(revs).issubset(set(node_revs)), \ | |
388 | "We assumed that %s is subset of revisions for which file %s " \ |
|
388 | "We assumed that %s is subset of revisions for which file %s " \ | |
389 | "has been changed, and history of that node returned: %s" \ |
|
389 | "has been changed, and history of that node returned: %s" \ | |
390 | % (revs, path, node_revs) |
|
390 | % (revs, path, node_revs) | |
391 |
|
391 | |||
392 | def test_file_annotate(self): |
|
392 | def test_file_annotate(self): | |
393 | files = { |
|
393 | files = { | |
394 | 'vcs/backends/__init__.py': |
|
394 | 'vcs/backends/__init__.py': | |
395 | {89: {'lines_no': 31, |
|
395 | {89: {'lines_no': 31, | |
396 | 'changesets': [32, 32, 61, 32, 32, 37, 32, 32, 32, 44, |
|
396 | 'changesets': [32, 32, 61, 32, 32, 37, 32, 32, 32, 44, | |
397 | 37, 37, 37, 37, 45, 37, 44, 37, 37, 37, |
|
397 | 37, 37, 37, 37, 45, 37, 44, 37, 37, 37, | |
398 | 32, 32, 32, 32, 37, 32, 37, 37, 32, |
|
398 | 32, 32, 32, 32, 37, 32, 37, 37, 32, | |
399 | 32, 32]}, |
|
399 | 32, 32]}, | |
400 | 20: {'lines_no': 1, |
|
400 | 20: {'lines_no': 1, | |
401 | 'changesets': [4]}, |
|
401 | 'changesets': [4]}, | |
402 | 55: {'lines_no': 31, |
|
402 | 55: {'lines_no': 31, | |
403 | 'changesets': [32, 32, 45, 32, 32, 37, 32, 32, 32, 44, |
|
403 | 'changesets': [32, 32, 45, 32, 32, 37, 32, 32, 32, 44, | |
404 | 37, 37, 37, 37, 45, 37, 44, 37, 37, 37, |
|
404 | 37, 37, 37, 37, 45, 37, 44, 37, 37, 37, | |
405 | 32, 32, 32, 32, 37, 32, 37, 37, 32, |
|
405 | 32, 32, 32, 32, 37, 32, 37, 37, 32, | |
406 | 32, 32]}}, |
|
406 | 32, 32]}}, | |
407 | 'vcs/exceptions.py': |
|
407 | 'vcs/exceptions.py': | |
408 | {89: {'lines_no': 18, |
|
408 | {89: {'lines_no': 18, | |
409 | 'changesets': [16, 16, 16, 16, 16, 16, 16, 16, 16, 16, |
|
409 | 'changesets': [16, 16, 16, 16, 16, 16, 16, 16, 16, 16, | |
410 | 16, 16, 17, 16, 16, 18, 18, 18]}, |
|
410 | 16, 16, 17, 16, 16, 18, 18, 18]}, | |
411 | 20: {'lines_no': 18, |
|
411 | 20: {'lines_no': 18, | |
412 | 'changesets': [16, 16, 16, 16, 16, 16, 16, 16, 16, 16, |
|
412 | 'changesets': [16, 16, 16, 16, 16, 16, 16, 16, 16, 16, | |
413 | 16, 16, 17, 16, 16, 18, 18, 18]}, |
|
413 | 16, 16, 17, 16, 16, 18, 18, 18]}, | |
414 | 55: {'lines_no': 18, 'changesets': [16, 16, 16, 16, 16, 16, |
|
414 | 55: {'lines_no': 18, 'changesets': [16, 16, 16, 16, 16, 16, | |
415 | 16, 16, 16, 16, 16, 16, |
|
415 | 16, 16, 16, 16, 16, 16, | |
416 | 17, 16, 16, 18, 18, 18]}}, |
|
416 | 17, 16, 16, 18, 18, 18]}}, | |
417 | 'MANIFEST.in': {89: {'lines_no': 5, |
|
417 | 'MANIFEST.in': {89: {'lines_no': 5, | |
418 | 'changesets': [7, 7, 7, 71, 71]}, |
|
418 | 'changesets': [7, 7, 7, 71, 71]}, | |
419 | 20: {'lines_no': 3, |
|
419 | 20: {'lines_no': 3, | |
420 | 'changesets': [7, 7, 7]}, |
|
420 | 'changesets': [7, 7, 7]}, | |
421 | 55: {'lines_no': 3, |
|
421 | 55: {'lines_no': 3, | |
422 | 'changesets': [7, 7, 7]}}} |
|
422 | 'changesets': [7, 7, 7]}}} | |
423 |
|
423 | |||
424 | for fname, revision_dict in files.items(): |
|
424 | for fname, revision_dict in files.items(): | |
425 | for rev, data in revision_dict.items(): |
|
425 | for rev, data in revision_dict.items(): | |
426 | cs = self.repo.get_changeset(rev) |
|
426 | cs = self.repo.get_changeset(rev) | |
427 | l1_1 = [x[1] for x in cs.get_file_annotate(fname)] |
|
427 | l1_1 = [x[1] for x in cs.get_file_annotate(fname)] | |
428 | l1_2 = [x[2]().raw_id for x in cs.get_file_annotate(fname)] |
|
428 | l1_2 = [x[2]().raw_id for x in cs.get_file_annotate(fname)] | |
429 | assert l1_1 == l1_2 |
|
429 | assert l1_1 == l1_2 | |
430 | l1 = l1_2 = [x[2]().revision for x in cs.get_file_annotate(fname)] |
|
430 | l1 = l1_2 = [x[2]().revision for x in cs.get_file_annotate(fname)] | |
431 | l2 = files[fname][rev]['changesets'] |
|
431 | l2 = files[fname][rev]['changesets'] | |
432 | assert l1 == l2, "The lists of revision for %s@rev%s" \ |
|
432 | assert l1 == l2, "The lists of revision for %s@rev%s" \ | |
433 | "from annotation list should match each other," \ |
|
433 | "from annotation list should match each other," \ | |
434 | "got \n%s \nvs \n%s " % (fname, rev, l1, l2) |
|
434 | "got \n%s \nvs \n%s " % (fname, rev, l1, l2) | |
435 |
|
435 | |||
436 | def test_changeset_state(self): |
|
436 | def test_changeset_state(self): | |
437 | """ |
|
437 | """ | |
438 | Tests which files have been added/changed/removed at particular revision |
|
438 | Tests which files have been added/changed/removed at particular revision | |
439 | """ |
|
439 | """ | |
440 |
|
440 | |||
441 | # rev 46ad32a4f974: |
|
441 | # rev 46ad32a4f974: | |
442 | # hg st --rev 46ad32a4f974 |
|
442 | # hg st --rev 46ad32a4f974 | |
443 | # changed: 13 |
|
443 | # changed: 13 | |
444 | # added: 20 |
|
444 | # added: 20 | |
445 | # removed: 1 |
|
445 | # removed: 1 | |
446 | changed = set(['.hgignore' |
|
446 | changed = set(['.hgignore' | |
447 | , 'README.rst', 'docs/conf.py', 'docs/index.rst', 'setup.py' |
|
447 | , 'README.rst', 'docs/conf.py', 'docs/index.rst', 'setup.py' | |
448 | , 'tests/test_hg.py', 'tests/test_nodes.py', 'vcs/__init__.py' |
|
448 | , 'tests/test_hg.py', 'tests/test_nodes.py', 'vcs/__init__.py' | |
449 | , 'vcs/backends/__init__.py', 'vcs/backends/base.py' |
|
449 | , 'vcs/backends/__init__.py', 'vcs/backends/base.py' | |
450 | , 'vcs/backends/hg.py', 'vcs/nodes.py', 'vcs/utils/__init__.py']) |
|
450 | , 'vcs/backends/hg.py', 'vcs/nodes.py', 'vcs/utils/__init__.py']) | |
451 |
|
451 | |||
452 | added = set(['docs/api/backends/hg.rst' |
|
452 | added = set(['docs/api/backends/hg.rst' | |
453 | , 'docs/api/backends/index.rst', 'docs/api/index.rst' |
|
453 | , 'docs/api/backends/index.rst', 'docs/api/index.rst' | |
454 | , 'docs/api/nodes.rst', 'docs/api/web/index.rst' |
|
454 | , 'docs/api/nodes.rst', 'docs/api/web/index.rst' | |
455 | , 'docs/api/web/simplevcs.rst', 'docs/installation.rst' |
|
455 | , 'docs/api/web/simplevcs.rst', 'docs/installation.rst' | |
456 | , 'docs/quickstart.rst', 'setup.cfg', 'vcs/utils/baseui_config.py' |
|
456 | , 'docs/quickstart.rst', 'setup.cfg', 'vcs/utils/baseui_config.py' | |
457 | , 'vcs/utils/web.py', 'vcs/web/__init__.py', 'vcs/web/exceptions.py' |
|
457 | , 'vcs/utils/web.py', 'vcs/web/__init__.py', 'vcs/web/exceptions.py' | |
458 | , 'vcs/web/simplevcs/__init__.py', 'vcs/web/simplevcs/exceptions.py' |
|
458 | , 'vcs/web/simplevcs/__init__.py', 'vcs/web/simplevcs/exceptions.py' | |
459 | , 'vcs/web/simplevcs/middleware.py', 'vcs/web/simplevcs/models.py' |
|
459 | , 'vcs/web/simplevcs/middleware.py', 'vcs/web/simplevcs/models.py' | |
460 | , 'vcs/web/simplevcs/settings.py', 'vcs/web/simplevcs/utils.py' |
|
460 | , 'vcs/web/simplevcs/settings.py', 'vcs/web/simplevcs/utils.py' | |
461 | , 'vcs/web/simplevcs/views.py']) |
|
461 | , 'vcs/web/simplevcs/views.py']) | |
462 |
|
462 | |||
463 | removed = set(['docs/api.rst']) |
|
463 | removed = set(['docs/api.rst']) | |
464 |
|
464 | |||
465 | chset64 = self.repo.get_changeset('46ad32a4f974') |
|
465 | chset64 = self.repo.get_changeset('46ad32a4f974') | |
466 | assert set((node.path for node in chset64.added)) == added |
|
466 | assert set((node.path for node in chset64.added)) == added | |
467 | assert set((node.path for node in chset64.changed)) == changed |
|
467 | assert set((node.path for node in chset64.changed)) == changed | |
468 | assert set((node.path for node in chset64.removed)) == removed |
|
468 | assert set((node.path for node in chset64.removed)) == removed | |
469 |
|
469 | |||
470 | # rev b090f22d27d6: |
|
470 | # rev b090f22d27d6: | |
471 | # hg st --rev b090f22d27d6 |
|
471 | # hg st --rev b090f22d27d6 | |
472 | # changed: 13 |
|
472 | # changed: 13 | |
473 | # added: 20 |
|
473 | # added: 20 | |
474 | # removed: 1 |
|
474 | # removed: 1 | |
475 | chset88 = self.repo.get_changeset('b090f22d27d6') |
|
475 | chset88 = self.repo.get_changeset('b090f22d27d6') | |
476 | assert set((node.path for node in chset88.added)) == set() |
|
476 | assert set((node.path for node in chset88.added)) == set() | |
477 | assert set((node.path for node in chset88.changed)) == set(['.hgignore']) |
|
477 | assert set((node.path for node in chset88.changed)) == set(['.hgignore']) | |
478 | assert set((node.path for node in chset88.removed)) == set() |
|
478 | assert set((node.path for node in chset88.removed)) == set() | |
479 |
|
479 | |||
480 | # 85: |
|
480 | # 85: | |
481 | # added: 2 ['vcs/utils/diffs.py', 'vcs/web/simplevcs/views/diffs.py'] |
|
481 | # added: 2 ['vcs/utils/diffs.py', 'vcs/web/simplevcs/views/diffs.py'] | |
482 | # changed: 4 ['vcs/web/simplevcs/models.py', ...] |
|
482 | # changed: 4 ['vcs/web/simplevcs/models.py', ...] | |
483 | # removed: 1 ['vcs/utils/web.py'] |
|
483 | # removed: 1 ['vcs/utils/web.py'] | |
484 | chset85 = self.repo.get_changeset(85) |
|
484 | chset85 = self.repo.get_changeset(85) | |
485 | assert set((node.path for node in chset85.added)) == set([ |
|
485 | assert set((node.path for node in chset85.added)) == set([ | |
486 | 'vcs/utils/diffs.py', |
|
486 | 'vcs/utils/diffs.py', | |
487 | 'vcs/web/simplevcs/views/diffs.py' |
|
487 | 'vcs/web/simplevcs/views/diffs.py' | |
488 | ]) |
|
488 | ]) | |
489 |
|
489 | |||
490 | assert set((node.path for node in chset85.changed)) == set([ |
|
490 | assert set((node.path for node in chset85.changed)) == set([ | |
491 | 'vcs/web/simplevcs/models.py', |
|
491 | 'vcs/web/simplevcs/models.py', | |
492 | 'vcs/web/simplevcs/utils.py', |
|
492 | 'vcs/web/simplevcs/utils.py', | |
493 | 'vcs/web/simplevcs/views/__init__.py', |
|
493 | 'vcs/web/simplevcs/views/__init__.py', | |
494 | 'vcs/web/simplevcs/views/repository.py', |
|
494 | 'vcs/web/simplevcs/views/repository.py', | |
495 | ]) |
|
495 | ]) | |
496 |
|
496 | |||
497 | assert set((node.path for node in chset85.removed)) == set([ |
|
497 | assert set((node.path for node in chset85.removed)) == set([ | |
498 | 'vcs/utils/web.py' |
|
498 | 'vcs/utils/web.py' | |
499 | ]) |
|
499 | ]) | |
500 |
|
500 | |||
501 |
|
501 | |||
502 | def test_files_state(self): |
|
502 | def test_files_state(self): | |
503 | """ |
|
503 | """ | |
504 | Tests state of FileNodes. |
|
504 | Tests state of FileNodes. | |
505 | """ |
|
505 | """ | |
506 | chset = self.repo.get_changeset(85) |
|
506 | chset = self.repo.get_changeset(85) | |
507 | node = chset.get_node('vcs/utils/diffs.py') |
|
507 | node = chset.get_node('vcs/utils/diffs.py') | |
508 | assert node.state, NodeState.ADDED |
|
508 | assert node.state, NodeState.ADDED | |
509 | assert node.added |
|
509 | assert node.added | |
510 | assert not node.changed |
|
510 | assert not node.changed | |
511 | assert not node.not_changed |
|
511 | assert not node.not_changed | |
512 | assert not node.removed |
|
512 | assert not node.removed | |
513 |
|
513 | |||
514 | chset = self.repo.get_changeset(88) |
|
514 | chset = self.repo.get_changeset(88) | |
515 | node = chset.get_node('.hgignore') |
|
515 | node = chset.get_node('.hgignore') | |
516 | assert node.state, NodeState.CHANGED |
|
516 | assert node.state, NodeState.CHANGED | |
517 | assert not node.added |
|
517 | assert not node.added | |
518 | assert node.changed |
|
518 | assert node.changed | |
519 | assert not node.not_changed |
|
519 | assert not node.not_changed | |
520 | assert not node.removed |
|
520 | assert not node.removed | |
521 |
|
521 | |||
522 | chset = self.repo.get_changeset(85) |
|
522 | chset = self.repo.get_changeset(85) | |
523 | node = chset.get_node('setup.py') |
|
523 | node = chset.get_node('setup.py') | |
524 | assert node.state, NodeState.NOT_CHANGED |
|
524 | assert node.state, NodeState.NOT_CHANGED | |
525 | assert not node.added |
|
525 | assert not node.added | |
526 | assert not node.changed |
|
526 | assert not node.changed | |
527 | assert node.not_changed |
|
527 | assert node.not_changed | |
528 | assert not node.removed |
|
528 | assert not node.removed | |
529 |
|
529 | |||
530 | # If node has REMOVED state then trying to fetch it would raise |
|
530 | # If node has REMOVED state then trying to fetch it would raise | |
531 | # ChangesetError exception |
|
531 | # ChangesetError exception | |
532 | chset = self.repo.get_changeset(2) |
|
532 | chset = self.repo.get_changeset(2) | |
533 | path = 'vcs/backends/BaseRepository.py' |
|
533 | path = 'vcs/backends/BaseRepository.py' | |
534 | with pytest.raises(NodeDoesNotExistError): |
|
534 | with pytest.raises(NodeDoesNotExistError): | |
535 | chset.get_node(path) |
|
535 | chset.get_node(path) | |
536 | # but it would be one of ``removed`` (changeset's attribute) |
|
536 | # but it would be one of ``removed`` (changeset's attribute) | |
537 | assert path in [rf.path for rf in chset.removed] |
|
537 | assert path in [rf.path for rf in chset.removed] | |
538 |
|
538 | |||
539 | def test_commit_message_is_unicode(self): |
|
539 | def test_commit_message_is_unicode(self): | |
540 | for cm in self.repo: |
|
540 | for cm in self.repo: | |
541 | assert isinstance(cm.message, unicode) |
|
541 | assert isinstance(cm.message, unicode) | |
542 |
|
542 | |||
543 | def test_changeset_author_is_unicode(self): |
|
543 | def test_changeset_author_is_unicode(self): | |
544 | for cm in self.repo: |
|
544 | for cm in self.repo: | |
545 | assert isinstance(cm.author, unicode) |
|
545 | assert isinstance(cm.author, unicode) | |
546 |
|
546 | |||
547 |
def test_repo_files_content_is_ |
|
547 | def test_repo_files_content_is_bytes(self): | |
548 | test_changeset = self.repo.get_changeset(100) |
|
548 | test_changeset = self.repo.get_changeset(100) | |
549 | for node in test_changeset.get_node('/'): |
|
549 | for node in test_changeset.get_node('/'): | |
550 | if node.is_file(): |
|
550 | if node.is_file(): | |
551 |
assert isinstance(node.content, |
|
551 | assert isinstance(node.content, bytes) | |
552 |
|
552 | |||
553 | def test_wrong_path(self): |
|
553 | def test_wrong_path(self): | |
554 | # There is 'setup.py' in the root dir but not there: |
|
554 | # There is 'setup.py' in the root dir but not there: | |
555 | path = 'foo/bar/setup.py' |
|
555 | path = 'foo/bar/setup.py' | |
556 | with pytest.raises(VCSError): |
|
556 | with pytest.raises(VCSError): | |
557 | self.repo.get_changeset().get_node(path) |
|
557 | self.repo.get_changeset().get_node(path) | |
558 |
|
558 | |||
559 | def test_archival_file(self): |
|
559 | def test_archival_file(self): | |
560 | # TODO: |
|
560 | # TODO: | |
561 | pass |
|
561 | pass | |
562 |
|
562 | |||
563 | def test_archival_as_generator(self): |
|
563 | def test_archival_as_generator(self): | |
564 | # TODO: |
|
564 | # TODO: | |
565 | pass |
|
565 | pass | |
566 |
|
566 | |||
567 | def test_archival_wrong_kind(self): |
|
567 | def test_archival_wrong_kind(self): | |
568 | tip = self.repo.get_changeset() |
|
568 | tip = self.repo.get_changeset() | |
569 | with pytest.raises(VCSError): |
|
569 | with pytest.raises(VCSError): | |
570 | tip.fill_archive(kind='error') |
|
570 | tip.fill_archive(kind='error') | |
571 |
|
571 | |||
572 | def test_archival_empty_prefix(self): |
|
572 | def test_archival_empty_prefix(self): | |
573 | # TODO: |
|
573 | # TODO: | |
574 | pass |
|
574 | pass | |
575 |
|
575 | |||
576 | def test_author_email(self): |
|
576 | def test_author_email(self): | |
577 | assert 'marcin@python-blog.com' == self.repo.get_changeset('b986218ba1c9').author_email |
|
577 | assert 'marcin@python-blog.com' == self.repo.get_changeset('b986218ba1c9').author_email | |
578 | assert 'lukasz.balcerzak@python-center.pl' == self.repo.get_changeset('3803844fdbd3').author_email |
|
578 | assert 'lukasz.balcerzak@python-center.pl' == self.repo.get_changeset('3803844fdbd3').author_email | |
579 | assert '' == self.repo.get_changeset('84478366594b').author_email |
|
579 | assert '' == self.repo.get_changeset('84478366594b').author_email | |
580 |
|
580 | |||
581 | def test_author_username(self): |
|
581 | def test_author_username(self): | |
582 | assert 'Marcin Kuzminski' == self.repo.get_changeset('b986218ba1c9').author_name |
|
582 | assert 'Marcin Kuzminski' == self.repo.get_changeset('b986218ba1c9').author_name | |
583 | assert 'Lukasz Balcerzak' == self.repo.get_changeset('3803844fdbd3').author_name |
|
583 | assert 'Lukasz Balcerzak' == self.repo.get_changeset('3803844fdbd3').author_name | |
584 | assert 'marcink' == self.repo.get_changeset('84478366594b').author_name |
|
584 | assert 'marcink' == self.repo.get_changeset('84478366594b').author_name | |
585 |
|
585 | |||
586 | def test_successors(self): |
|
586 | def test_successors(self): | |
587 | init_chset = self.repo.get_changeset(0) |
|
587 | init_chset = self.repo.get_changeset(0) | |
588 | assert init_chset.successors == [] |
|
588 | assert init_chset.successors == [] | |
589 |
|
589 | |||
590 | def test_predecessors(self): |
|
590 | def test_predecessors(self): | |
591 | init_chset = self.repo.get_changeset(0) |
|
591 | init_chset = self.repo.get_changeset(0) | |
592 | assert len(init_chset.predecessors) == 0 |
|
592 | assert len(init_chset.predecessors) == 0 |
General Comments 0
You need to be logged in to leave comments.
Login now