Show More
@@ -1,534 +1,557 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 |
|
2 | |||
3 | # Copyright (C) 2011-2018 RhodeCode GmbH |
|
3 | # Copyright (C) 2011-2018 RhodeCode GmbH | |
4 | # |
|
4 | # | |
5 | # This program is free software: you can redistribute it and/or modify |
|
5 | # This program is free software: you can redistribute it and/or modify | |
6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
6 | # it under the terms of the GNU Affero General Public License, version 3 | |
7 | # (only), as published by the Free Software Foundation. |
|
7 | # (only), as published by the Free Software Foundation. | |
8 | # |
|
8 | # | |
9 | # This program is distributed in the hope that it will be useful, |
|
9 | # This program is distributed in the hope that it will be useful, | |
10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | # GNU General Public License for more details. |
|
12 | # GNU General Public License for more details. | |
13 | # |
|
13 | # | |
14 | # You should have received a copy of the GNU Affero General Public License |
|
14 | # You should have received a copy of the GNU Affero General Public License | |
15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | # |
|
16 | # | |
17 | # This program is dual-licensed. If you wish to learn more about the |
|
17 | # This program is dual-licensed. If you wish to learn more about the | |
18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
18 | # RhodeCode Enterprise Edition, including its added features, Support services, | |
19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |
20 |
|
20 | |||
21 |
|
21 | |||
22 | """ |
|
22 | """ | |
23 | Renderer for markup languages with ability to parse using rst or markdown |
|
23 | Renderer for markup languages with ability to parse using rst or markdown | |
24 | """ |
|
24 | """ | |
25 |
|
25 | |||
26 | import re |
|
26 | import re | |
27 | import os |
|
27 | import os | |
28 | import lxml |
|
28 | import lxml | |
29 | import logging |
|
29 | import logging | |
30 | import urlparse |
|
30 | import urlparse | |
31 | import bleach |
|
31 | import bleach | |
32 |
|
32 | |||
33 | from mako.lookup import TemplateLookup |
|
33 | from mako.lookup import TemplateLookup | |
34 | from mako.template import Template as MakoTemplate |
|
34 | from mako.template import Template as MakoTemplate | |
35 |
|
35 | |||
36 | from docutils.core import publish_parts |
|
36 | from docutils.core import publish_parts | |
37 | from docutils.parsers.rst import directives |
|
37 | from docutils.parsers.rst import directives | |
38 | from docutils import writers |
|
38 | from docutils import writers | |
39 | from docutils.writers import html4css1 |
|
39 | from docutils.writers import html4css1 | |
40 | import markdown |
|
40 | import markdown | |
41 |
|
41 | |||
42 | from rhodecode.lib.markdown_ext import GithubFlavoredMarkdownExtension |
|
42 | from rhodecode.lib.markdown_ext import GithubFlavoredMarkdownExtension | |
43 | from rhodecode.lib.utils2 import ( |
|
43 | from rhodecode.lib.utils2 import (safe_unicode, md5_safe, MENTIONS_REGEX) | |
44 | safe_str, safe_unicode, md5_safe, MENTIONS_REGEX) |
|
|||
45 |
|
44 | |||
46 | log = logging.getLogger(__name__) |
|
45 | log = logging.getLogger(__name__) | |
47 |
|
46 | |||
48 | # default renderer used to generate automated comments |
|
47 | # default renderer used to generate automated comments | |
49 | DEFAULT_COMMENTS_RENDERER = 'rst' |
|
48 | DEFAULT_COMMENTS_RENDERER = 'rst' | |
50 |
|
49 | |||
51 |
|
50 | |||
52 | class CustomHTMLTranslator(writers.html4css1.HTMLTranslator): |
|
51 | class CustomHTMLTranslator(writers.html4css1.HTMLTranslator): | |
53 | """ |
|
52 | """ | |
54 | Custom HTML Translator used for sandboxing potential |
|
53 | Custom HTML Translator used for sandboxing potential | |
55 | JS injections in ref links |
|
54 | JS injections in ref links | |
56 | """ |
|
55 | """ | |
57 |
|
56 | |||
58 | def visit_reference(self, node): |
|
57 | def visit_reference(self, node): | |
59 | if 'refuri' in node.attributes: |
|
58 | if 'refuri' in node.attributes: | |
60 | refuri = node['refuri'] |
|
59 | refuri = node['refuri'] | |
61 | if ':' in refuri: |
|
60 | if ':' in refuri: | |
62 | prefix, link = refuri.lstrip().split(':', 1) |
|
61 | prefix, link = refuri.lstrip().split(':', 1) | |
63 | prefix = prefix or '' |
|
62 | prefix = prefix or '' | |
64 |
|
63 | |||
65 | if prefix.lower() == 'javascript': |
|
64 | if prefix.lower() == 'javascript': | |
66 | # we don't allow javascript type of refs... |
|
65 | # we don't allow javascript type of refs... | |
67 | node['refuri'] = 'javascript:alert("SandBoxedJavascript")' |
|
66 | node['refuri'] = 'javascript:alert("SandBoxedJavascript")' | |
68 |
|
67 | |||
69 | # old style class requires this... |
|
68 | # old style class requires this... | |
70 | return html4css1.HTMLTranslator.visit_reference(self, node) |
|
69 | return html4css1.HTMLTranslator.visit_reference(self, node) | |
71 |
|
70 | |||
72 |
|
71 | |||
73 | class RhodeCodeWriter(writers.html4css1.Writer): |
|
72 | class RhodeCodeWriter(writers.html4css1.Writer): | |
74 | def __init__(self): |
|
73 | def __init__(self): | |
75 | writers.Writer.__init__(self) |
|
74 | writers.Writer.__init__(self) | |
76 | self.translator_class = CustomHTMLTranslator |
|
75 | self.translator_class = CustomHTMLTranslator | |
77 |
|
76 | |||
78 |
|
77 | |||
79 | def relative_links(html_source, server_paths): |
|
78 | def relative_links(html_source, server_paths): | |
80 | if not html_source: |
|
79 | if not html_source: | |
81 | return html_source |
|
80 | return html_source | |
82 |
|
81 | |||
83 | try: |
|
82 | try: | |
84 | from lxml.html import fromstring |
|
83 | from lxml.html import fromstring | |
85 | from lxml.html import tostring |
|
84 | from lxml.html import tostring | |
86 | except ImportError: |
|
85 | except ImportError: | |
87 | log.exception('Failed to import lxml') |
|
86 | log.exception('Failed to import lxml') | |
88 | return html_source |
|
87 | return html_source | |
89 |
|
88 | |||
90 | try: |
|
89 | try: | |
91 | doc = lxml.html.fromstring(html_source) |
|
90 | doc = lxml.html.fromstring(html_source) | |
92 | except Exception: |
|
91 | except Exception: | |
93 | return html_source |
|
92 | return html_source | |
94 |
|
93 | |||
95 | for el in doc.cssselect('img, video'): |
|
94 | for el in doc.cssselect('img, video'): | |
96 | src = el.attrib.get('src') |
|
95 | src = el.attrib.get('src') | |
97 | if src: |
|
96 | if src: | |
98 | el.attrib['src'] = relative_path(src, server_paths['raw']) |
|
97 | el.attrib['src'] = relative_path(src, server_paths['raw']) | |
99 |
|
98 | |||
100 | for el in doc.cssselect('a:not(.gfm)'): |
|
99 | for el in doc.cssselect('a:not(.gfm)'): | |
101 | src = el.attrib.get('href') |
|
100 | src = el.attrib.get('href') | |
102 | if src: |
|
101 | if src: | |
103 | raw_mode = el.attrib['href'].endswith('?raw=1') |
|
102 | raw_mode = el.attrib['href'].endswith('?raw=1') | |
104 | if raw_mode: |
|
103 | if raw_mode: | |
105 | el.attrib['href'] = relative_path(src, server_paths['raw']) |
|
104 | el.attrib['href'] = relative_path(src, server_paths['raw']) | |
106 | else: |
|
105 | else: | |
107 | el.attrib['href'] = relative_path(src, server_paths['standard']) |
|
106 | el.attrib['href'] = relative_path(src, server_paths['standard']) | |
108 |
|
107 | |||
109 | return lxml.html.tostring(doc) |
|
108 | return lxml.html.tostring(doc) | |
110 |
|
109 | |||
111 |
|
110 | |||
112 | def relative_path(path, request_path, is_repo_file=None): |
|
111 | def relative_path(path, request_path, is_repo_file=None): | |
113 | """ |
|
112 | """ | |
114 | relative link support, path is a rel path, and request_path is current |
|
113 | relative link support, path is a rel path, and request_path is current | |
115 | server path (not absolute) |
|
114 | server path (not absolute) | |
116 |
|
115 | |||
117 | e.g. |
|
116 | e.g. | |
118 |
|
117 | |||
119 | path = '../logo.png' |
|
118 | path = '../logo.png' | |
120 | request_path= '/repo/files/path/file.md' |
|
119 | request_path= '/repo/files/path/file.md' | |
121 | produces: '/repo/files/logo.png' |
|
120 | produces: '/repo/files/logo.png' | |
122 | """ |
|
121 | """ | |
123 | # TODO(marcink): unicode/str support ? |
|
122 | # TODO(marcink): unicode/str support ? | |
124 | # maybe=> safe_unicode(urllib.quote(safe_str(final_path), '/:')) |
|
123 | # maybe=> safe_unicode(urllib.quote(safe_str(final_path), '/:')) | |
125 |
|
124 | |||
126 | def dummy_check(p): |
|
125 | def dummy_check(p): | |
127 | return True # assume default is a valid file path |
|
126 | return True # assume default is a valid file path | |
128 |
|
127 | |||
129 | is_repo_file = is_repo_file or dummy_check |
|
128 | is_repo_file = is_repo_file or dummy_check | |
130 | if not path: |
|
129 | if not path: | |
131 | return request_path |
|
130 | return request_path | |
132 |
|
131 | |||
133 | path = safe_unicode(path) |
|
132 | path = safe_unicode(path) | |
134 | request_path = safe_unicode(request_path) |
|
133 | request_path = safe_unicode(request_path) | |
135 |
|
134 | |||
136 | if path.startswith((u'data:', u'javascript:', u'#', u':')): |
|
135 | if path.startswith((u'data:', u'javascript:', u'#', u':')): | |
137 | # skip data, anchor, invalid links |
|
136 | # skip data, anchor, invalid links | |
138 | return path |
|
137 | return path | |
139 |
|
138 | |||
140 | is_absolute = bool(urlparse.urlparse(path).netloc) |
|
139 | is_absolute = bool(urlparse.urlparse(path).netloc) | |
141 | if is_absolute: |
|
140 | if is_absolute: | |
142 | return path |
|
141 | return path | |
143 |
|
142 | |||
144 | if not request_path: |
|
143 | if not request_path: | |
145 | return path |
|
144 | return path | |
146 |
|
145 | |||
147 | if path.startswith(u'/'): |
|
146 | if path.startswith(u'/'): | |
148 | path = path[1:] |
|
147 | path = path[1:] | |
149 |
|
148 | |||
150 | if path.startswith(u'./'): |
|
149 | if path.startswith(u'./'): | |
151 | path = path[2:] |
|
150 | path = path[2:] | |
152 |
|
151 | |||
153 | parts = request_path.split('/') |
|
152 | parts = request_path.split('/') | |
154 | # compute how deep we need to traverse the request_path |
|
153 | # compute how deep we need to traverse the request_path | |
155 | depth = 0 |
|
154 | depth = 0 | |
156 |
|
155 | |||
157 | if is_repo_file(request_path): |
|
156 | if is_repo_file(request_path): | |
158 | # if request path is a VALID file, we use a relative path with |
|
157 | # if request path is a VALID file, we use a relative path with | |
159 | # one level up |
|
158 | # one level up | |
160 | depth += 1 |
|
159 | depth += 1 | |
161 |
|
160 | |||
162 | while path.startswith(u'../'): |
|
161 | while path.startswith(u'../'): | |
163 | depth += 1 |
|
162 | depth += 1 | |
164 | path = path[3:] |
|
163 | path = path[3:] | |
165 |
|
164 | |||
166 | if depth > 0: |
|
165 | if depth > 0: | |
167 | parts = parts[:-depth] |
|
166 | parts = parts[:-depth] | |
168 |
|
167 | |||
169 | parts.append(path) |
|
168 | parts.append(path) | |
170 | final_path = u'/'.join(parts).lstrip(u'/') |
|
169 | final_path = u'/'.join(parts).lstrip(u'/') | |
171 |
|
170 | |||
172 | return u'/' + final_path |
|
171 | return u'/' + final_path | |
173 |
|
172 | |||
174 |
|
173 | |||
|
174 | _cached_markdown_renderer = None | |||
|
175 | ||||
|
176 | ||||
|
177 | def get_markdown_renderer(extensions, output_format): | |||
|
178 | global _cached_markdown_renderer | |||
|
179 | ||||
|
180 | if _cached_markdown_renderer is None: | |||
|
181 | _cached_markdown_renderer = markdown.Markdown( | |||
|
182 | extensions=extensions, | |||
|
183 | enable_attributes=False, output_format=output_format) | |||
|
184 | return _cached_markdown_renderer | |||
|
185 | ||||
|
186 | ||||
|
187 | _cached_markdown_renderer_flavored = None | |||
|
188 | ||||
|
189 | ||||
|
190 | def get_markdown_renderer_flavored(extensions, output_format): | |||
|
191 | global _cached_markdown_renderer_flavored | |||
|
192 | ||||
|
193 | if _cached_markdown_renderer_flavored is None: | |||
|
194 | _cached_markdown_renderer_flavored = markdown.Markdown( | |||
|
195 | extensions=extensions + [GithubFlavoredMarkdownExtension()], | |||
|
196 | enable_attributes=False, output_format=output_format) | |||
|
197 | return _cached_markdown_renderer_flavored | |||
|
198 | ||||
|
199 | ||||
175 | class MarkupRenderer(object): |
|
200 | class MarkupRenderer(object): | |
176 | RESTRUCTUREDTEXT_DISALLOWED_DIRECTIVES = ['include', 'meta', 'raw'] |
|
201 | RESTRUCTUREDTEXT_DISALLOWED_DIRECTIVES = ['include', 'meta', 'raw'] | |
177 |
|
202 | |||
178 | MARKDOWN_PAT = re.compile(r'\.(md|mkdn?|mdown|markdown)$', re.IGNORECASE) |
|
203 | MARKDOWN_PAT = re.compile(r'\.(md|mkdn?|mdown|markdown)$', re.IGNORECASE) | |
179 | RST_PAT = re.compile(r'\.re?st$', re.IGNORECASE) |
|
204 | RST_PAT = re.compile(r'\.re?st$', re.IGNORECASE) | |
180 | JUPYTER_PAT = re.compile(r'\.(ipynb)$', re.IGNORECASE) |
|
205 | JUPYTER_PAT = re.compile(r'\.(ipynb)$', re.IGNORECASE) | |
181 | PLAIN_PAT = re.compile(r'^readme$', re.IGNORECASE) |
|
206 | PLAIN_PAT = re.compile(r'^readme$', re.IGNORECASE) | |
182 |
|
207 | |||
183 | URL_PAT = re.compile(r'(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]' |
|
208 | URL_PAT = re.compile(r'(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]' | |
184 | r'|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)') |
|
209 | r'|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)') | |
185 |
|
210 | |||
186 | extensions = ['codehilite', 'extra', 'def_list', 'sane_lists'] |
|
211 | extensions = ['markdown.extensions.codehilite', 'markdown.extensions.extra', | |
|
212 | 'markdown.extensions.def_list', 'markdown.extensions.sane_lists'] | |||
|
213 | ||||
187 | output_format = 'html4' |
|
214 | output_format = 'html4' | |
188 | markdown_renderer = markdown.Markdown( |
|
|||
189 | extensions, enable_attributes=False, output_format=output_format) |
|
|||
190 |
|
||||
191 | markdown_renderer_flavored = markdown.Markdown( |
|
|||
192 | extensions + [GithubFlavoredMarkdownExtension()], |
|
|||
193 | enable_attributes=False, output_format=output_format) |
|
|||
194 |
|
215 | |||
195 | # extension together with weights. Lower is first means we control how |
|
216 | # extension together with weights. Lower is first means we control how | |
196 | # extensions are attached to readme names with those. |
|
217 | # extensions are attached to readme names with those. | |
197 | PLAIN_EXTS = [ |
|
218 | PLAIN_EXTS = [ | |
198 | # prefer no extension |
|
219 | # prefer no extension | |
199 | ('', 0), # special case that renders READMES names without extension |
|
220 | ('', 0), # special case that renders READMES names without extension | |
200 | ('.text', 2), ('.TEXT', 2), |
|
221 | ('.text', 2), ('.TEXT', 2), | |
201 | ('.txt', 3), ('.TXT', 3) |
|
222 | ('.txt', 3), ('.TXT', 3) | |
202 | ] |
|
223 | ] | |
203 |
|
224 | |||
204 | RST_EXTS = [ |
|
225 | RST_EXTS = [ | |
205 | ('.rst', 1), ('.rest', 1), |
|
226 | ('.rst', 1), ('.rest', 1), | |
206 | ('.RST', 2), ('.REST', 2) |
|
227 | ('.RST', 2), ('.REST', 2) | |
207 | ] |
|
228 | ] | |
208 |
|
229 | |||
209 | MARKDOWN_EXTS = [ |
|
230 | MARKDOWN_EXTS = [ | |
210 | ('.md', 1), ('.MD', 1), |
|
231 | ('.md', 1), ('.MD', 1), | |
211 | ('.mkdn', 2), ('.MKDN', 2), |
|
232 | ('.mkdn', 2), ('.MKDN', 2), | |
212 | ('.mdown', 3), ('.MDOWN', 3), |
|
233 | ('.mdown', 3), ('.MDOWN', 3), | |
213 | ('.markdown', 4), ('.MARKDOWN', 4) |
|
234 | ('.markdown', 4), ('.MARKDOWN', 4) | |
214 | ] |
|
235 | ] | |
215 |
|
236 | |||
216 | def _detect_renderer(self, source, filename=None): |
|
237 | def _detect_renderer(self, source, filename=None): | |
217 | """ |
|
238 | """ | |
218 | runs detection of what renderer should be used for generating html |
|
239 | runs detection of what renderer should be used for generating html | |
219 | from a markup language |
|
240 | from a markup language | |
220 |
|
241 | |||
221 | filename can be also explicitly a renderer name |
|
242 | filename can be also explicitly a renderer name | |
222 |
|
243 | |||
223 | :param source: |
|
244 | :param source: | |
224 | :param filename: |
|
245 | :param filename: | |
225 | """ |
|
246 | """ | |
226 |
|
247 | |||
227 | if MarkupRenderer.MARKDOWN_PAT.findall(filename): |
|
248 | if MarkupRenderer.MARKDOWN_PAT.findall(filename): | |
228 | detected_renderer = 'markdown' |
|
249 | detected_renderer = 'markdown' | |
229 | elif MarkupRenderer.RST_PAT.findall(filename): |
|
250 | elif MarkupRenderer.RST_PAT.findall(filename): | |
230 | detected_renderer = 'rst' |
|
251 | detected_renderer = 'rst' | |
231 | elif MarkupRenderer.JUPYTER_PAT.findall(filename): |
|
252 | elif MarkupRenderer.JUPYTER_PAT.findall(filename): | |
232 | detected_renderer = 'jupyter' |
|
253 | detected_renderer = 'jupyter' | |
233 | elif MarkupRenderer.PLAIN_PAT.findall(filename): |
|
254 | elif MarkupRenderer.PLAIN_PAT.findall(filename): | |
234 | detected_renderer = 'plain' |
|
255 | detected_renderer = 'plain' | |
235 | else: |
|
256 | else: | |
236 | detected_renderer = 'plain' |
|
257 | detected_renderer = 'plain' | |
237 |
|
258 | |||
238 | return getattr(MarkupRenderer, detected_renderer) |
|
259 | return getattr(MarkupRenderer, detected_renderer) | |
239 |
|
260 | |||
240 | @classmethod |
|
261 | @classmethod | |
241 | def bleach_clean(cls, text): |
|
262 | def bleach_clean(cls, text): | |
242 | from .bleach_whitelist import markdown_attrs, markdown_tags |
|
263 | from .bleach_whitelist import markdown_attrs, markdown_tags | |
243 | allowed_tags = markdown_tags |
|
264 | allowed_tags = markdown_tags | |
244 | allowed_attrs = markdown_attrs |
|
265 | allowed_attrs = markdown_attrs | |
245 |
|
266 | |||
246 | try: |
|
267 | try: | |
247 | return bleach.clean(text, tags=allowed_tags, attributes=allowed_attrs) |
|
268 | return bleach.clean(text, tags=allowed_tags, attributes=allowed_attrs) | |
248 | except Exception: |
|
269 | except Exception: | |
249 | return 'UNPARSEABLE TEXT' |
|
270 | return 'UNPARSEABLE TEXT' | |
250 |
|
271 | |||
251 | @classmethod |
|
272 | @classmethod | |
252 | def renderer_from_filename(cls, filename, exclude): |
|
273 | def renderer_from_filename(cls, filename, exclude): | |
253 | """ |
|
274 | """ | |
254 | Detect renderer markdown/rst from filename and optionally use exclude |
|
275 | Detect renderer markdown/rst from filename and optionally use exclude | |
255 | list to remove some options. This is mostly used in helpers. |
|
276 | list to remove some options. This is mostly used in helpers. | |
256 | Returns None when no renderer can be detected. |
|
277 | Returns None when no renderer can be detected. | |
257 | """ |
|
278 | """ | |
258 | def _filter(elements): |
|
279 | def _filter(elements): | |
259 | if isinstance(exclude, (list, tuple)): |
|
280 | if isinstance(exclude, (list, tuple)): | |
260 | return [x for x in elements if x not in exclude] |
|
281 | return [x for x in elements if x not in exclude] | |
261 | return elements |
|
282 | return elements | |
262 |
|
283 | |||
263 | if filename.endswith( |
|
284 | if filename.endswith( | |
264 | tuple(_filter([x[0] for x in cls.MARKDOWN_EXTS if x[0]]))): |
|
285 | tuple(_filter([x[0] for x in cls.MARKDOWN_EXTS if x[0]]))): | |
265 | return 'markdown' |
|
286 | return 'markdown' | |
266 | if filename.endswith(tuple(_filter([x[0] for x in cls.RST_EXTS if x[0]]))): |
|
287 | if filename.endswith(tuple(_filter([x[0] for x in cls.RST_EXTS if x[0]]))): | |
267 | return 'rst' |
|
288 | return 'rst' | |
268 |
|
289 | |||
269 | return None |
|
290 | return None | |
270 |
|
291 | |||
271 | def render(self, source, filename=None): |
|
292 | def render(self, source, filename=None): | |
272 | """ |
|
293 | """ | |
273 | Renders a given filename using detected renderer |
|
294 | Renders a given filename using detected renderer | |
274 | it detects renderers based on file extension or mimetype. |
|
295 | it detects renderers based on file extension or mimetype. | |
275 | At last it will just do a simple html replacing new lines with <br/> |
|
296 | At last it will just do a simple html replacing new lines with <br/> | |
276 |
|
297 | |||
277 | :param file_name: |
|
298 | :param file_name: | |
278 | :param source: |
|
299 | :param source: | |
279 | """ |
|
300 | """ | |
280 |
|
301 | |||
281 | renderer = self._detect_renderer(source, filename) |
|
302 | renderer = self._detect_renderer(source, filename) | |
282 | readme_data = renderer(source) |
|
303 | readme_data = renderer(source) | |
283 | return readme_data |
|
304 | return readme_data | |
284 |
|
305 | |||
285 | @classmethod |
|
306 | @classmethod | |
286 | def _flavored_markdown(cls, text): |
|
307 | def _flavored_markdown(cls, text): | |
287 | """ |
|
308 | """ | |
288 | Github style flavored markdown |
|
309 | Github style flavored markdown | |
289 |
|
310 | |||
290 | :param text: |
|
311 | :param text: | |
291 | """ |
|
312 | """ | |
292 |
|
313 | |||
293 | # Extract pre blocks. |
|
314 | # Extract pre blocks. | |
294 | extractions = {} |
|
315 | extractions = {} | |
295 |
|
316 | |||
296 | def pre_extraction_callback(matchobj): |
|
317 | def pre_extraction_callback(matchobj): | |
297 | digest = md5_safe(matchobj.group(0)) |
|
318 | digest = md5_safe(matchobj.group(0)) | |
298 | extractions[digest] = matchobj.group(0) |
|
319 | extractions[digest] = matchobj.group(0) | |
299 | return "{gfm-extraction-%s}" % digest |
|
320 | return "{gfm-extraction-%s}" % digest | |
300 | pattern = re.compile(r'<pre>.*?</pre>', re.MULTILINE | re.DOTALL) |
|
321 | pattern = re.compile(r'<pre>.*?</pre>', re.MULTILINE | re.DOTALL) | |
301 | text = re.sub(pattern, pre_extraction_callback, text) |
|
322 | text = re.sub(pattern, pre_extraction_callback, text) | |
302 |
|
323 | |||
303 | # Prevent foo_bar_baz from ending up with an italic word in the middle. |
|
324 | # Prevent foo_bar_baz from ending up with an italic word in the middle. | |
304 | def italic_callback(matchobj): |
|
325 | def italic_callback(matchobj): | |
305 | s = matchobj.group(0) |
|
326 | s = matchobj.group(0) | |
306 | if list(s).count('_') >= 2: |
|
327 | if list(s).count('_') >= 2: | |
307 | return s.replace('_', r'\_') |
|
328 | return s.replace('_', r'\_') | |
308 | return s |
|
329 | return s | |
309 | text = re.sub(r'^(?! {4}|\t)\w+_\w+_\w[\w_]*', italic_callback, text) |
|
330 | text = re.sub(r'^(?! {4}|\t)\w+_\w+_\w[\w_]*', italic_callback, text) | |
310 |
|
331 | |||
311 | # Insert pre block extractions. |
|
332 | # Insert pre block extractions. | |
312 | def pre_insert_callback(matchobj): |
|
333 | def pre_insert_callback(matchobj): | |
313 | return '\n\n' + extractions[matchobj.group(1)] |
|
334 | return '\n\n' + extractions[matchobj.group(1)] | |
314 | text = re.sub(r'\{gfm-extraction-([0-9a-f]{32})\}', |
|
335 | text = re.sub(r'\{gfm-extraction-([0-9a-f]{32})\}', | |
315 | pre_insert_callback, text) |
|
336 | pre_insert_callback, text) | |
316 |
|
337 | |||
317 | return text |
|
338 | return text | |
318 |
|
339 | |||
319 | @classmethod |
|
340 | @classmethod | |
320 | def urlify_text(cls, text): |
|
341 | def urlify_text(cls, text): | |
321 | def url_func(match_obj): |
|
342 | def url_func(match_obj): | |
322 | url_full = match_obj.groups()[0] |
|
343 | url_full = match_obj.groups()[0] | |
323 | return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full}) |
|
344 | return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full}) | |
324 |
|
345 | |||
325 | return cls.URL_PAT.sub(url_func, text) |
|
346 | return cls.URL_PAT.sub(url_func, text) | |
326 |
|
347 | |||
327 | @classmethod |
|
348 | @classmethod | |
328 | def plain(cls, source, universal_newline=True, leading_newline=True): |
|
349 | def plain(cls, source, universal_newline=True, leading_newline=True): | |
329 | source = safe_unicode(source) |
|
350 | source = safe_unicode(source) | |
330 | if universal_newline: |
|
351 | if universal_newline: | |
331 | newline = '\n' |
|
352 | newline = '\n' | |
332 | source = newline.join(source.splitlines()) |
|
353 | source = newline.join(source.splitlines()) | |
333 |
|
354 | |||
334 | rendered_source = cls.urlify_text(source) |
|
355 | rendered_source = cls.urlify_text(source) | |
335 | source = '' |
|
356 | source = '' | |
336 | if leading_newline: |
|
357 | if leading_newline: | |
337 | source += '<br />' |
|
358 | source += '<br />' | |
338 | source += rendered_source.replace("\n", '<br />') |
|
359 | source += rendered_source.replace("\n", '<br />') | |
339 | return source |
|
360 | return source | |
340 |
|
361 | |||
341 | @classmethod |
|
362 | @classmethod | |
342 | def markdown(cls, source, safe=True, flavored=True, mentions=False, |
|
363 | def markdown(cls, source, safe=True, flavored=True, mentions=False, | |
343 | clean_html=True): |
|
364 | clean_html=True): | |
344 | """ |
|
365 | """ | |
345 | returns markdown rendered code cleaned by the bleach library |
|
366 | returns markdown rendered code cleaned by the bleach library | |
346 | """ |
|
367 | """ | |
347 |
|
368 | |||
348 | if flavored: |
|
369 | if flavored: | |
349 |
markdown_renderer = |
|
370 | markdown_renderer = get_markdown_renderer_flavored( | |
|
371 | cls.extensions, cls.output_format) | |||
350 | else: |
|
372 | else: | |
351 |
markdown_renderer = |
|
373 | markdown_renderer = get_markdown_renderer( | |
|
374 | cls.extensions, cls.output_format) | |||
352 |
|
375 | |||
353 | if mentions: |
|
376 | if mentions: | |
354 | mention_pat = re.compile(MENTIONS_REGEX) |
|
377 | mention_pat = re.compile(MENTIONS_REGEX) | |
355 |
|
378 | |||
356 | def wrapp(match_obj): |
|
379 | def wrapp(match_obj): | |
357 | uname = match_obj.groups()[0] |
|
380 | uname = match_obj.groups()[0] | |
358 | return ' **@%(uname)s** ' % {'uname': uname} |
|
381 | return ' **@%(uname)s** ' % {'uname': uname} | |
359 | mention_hl = mention_pat.sub(wrapp, source).strip() |
|
382 | mention_hl = mention_pat.sub(wrapp, source).strip() | |
360 | # we extracted mentions render with this using Mentions false |
|
383 | # we extracted mentions render with this using Mentions false | |
361 | return cls.markdown(mention_hl, safe=safe, flavored=flavored, |
|
384 | return cls.markdown(mention_hl, safe=safe, flavored=flavored, | |
362 | mentions=False) |
|
385 | mentions=False) | |
363 |
|
386 | |||
364 | source = safe_unicode(source) |
|
387 | source = safe_unicode(source) | |
365 |
|
388 | |||
366 | try: |
|
389 | try: | |
367 | if flavored: |
|
390 | if flavored: | |
368 | source = cls._flavored_markdown(source) |
|
391 | source = cls._flavored_markdown(source) | |
369 | rendered = markdown_renderer.convert(source) |
|
392 | rendered = markdown_renderer.convert(source) | |
370 | except Exception: |
|
393 | except Exception: | |
371 | log.exception('Error when rendering Markdown') |
|
394 | log.exception('Error when rendering Markdown') | |
372 | if safe: |
|
395 | if safe: | |
373 | log.debug('Fallback to render in plain mode') |
|
396 | log.debug('Fallback to render in plain mode') | |
374 | rendered = cls.plain(source) |
|
397 | rendered = cls.plain(source) | |
375 | else: |
|
398 | else: | |
376 | raise |
|
399 | raise | |
377 |
|
400 | |||
378 | if clean_html: |
|
401 | if clean_html: | |
379 | rendered = cls.bleach_clean(rendered) |
|
402 | rendered = cls.bleach_clean(rendered) | |
380 | return rendered |
|
403 | return rendered | |
381 |
|
404 | |||
382 | @classmethod |
|
405 | @classmethod | |
383 | def rst(cls, source, safe=True, mentions=False, clean_html=False): |
|
406 | def rst(cls, source, safe=True, mentions=False, clean_html=False): | |
384 | if mentions: |
|
407 | if mentions: | |
385 | mention_pat = re.compile(MENTIONS_REGEX) |
|
408 | mention_pat = re.compile(MENTIONS_REGEX) | |
386 |
|
409 | |||
387 | def wrapp(match_obj): |
|
410 | def wrapp(match_obj): | |
388 | uname = match_obj.groups()[0] |
|
411 | uname = match_obj.groups()[0] | |
389 | return ' **@%(uname)s** ' % {'uname': uname} |
|
412 | return ' **@%(uname)s** ' % {'uname': uname} | |
390 | mention_hl = mention_pat.sub(wrapp, source).strip() |
|
413 | mention_hl = mention_pat.sub(wrapp, source).strip() | |
391 | # we extracted mentions render with this using Mentions false |
|
414 | # we extracted mentions render with this using Mentions false | |
392 | return cls.rst(mention_hl, safe=safe, mentions=False) |
|
415 | return cls.rst(mention_hl, safe=safe, mentions=False) | |
393 |
|
416 | |||
394 | source = safe_unicode(source) |
|
417 | source = safe_unicode(source) | |
395 | try: |
|
418 | try: | |
396 | docutils_settings = dict( |
|
419 | docutils_settings = dict( | |
397 | [(alias, None) for alias in |
|
420 | [(alias, None) for alias in | |
398 | cls.RESTRUCTUREDTEXT_DISALLOWED_DIRECTIVES]) |
|
421 | cls.RESTRUCTUREDTEXT_DISALLOWED_DIRECTIVES]) | |
399 |
|
422 | |||
400 | docutils_settings.update({ |
|
423 | docutils_settings.update({ | |
401 | 'input_encoding': 'unicode', 'report_level': 4}) |
|
424 | 'input_encoding': 'unicode', 'report_level': 4}) | |
402 |
|
425 | |||
403 | for k, v in docutils_settings.iteritems(): |
|
426 | for k, v in docutils_settings.iteritems(): | |
404 | directives.register_directive(k, v) |
|
427 | directives.register_directive(k, v) | |
405 |
|
428 | |||
406 | parts = publish_parts(source=source, |
|
429 | parts = publish_parts(source=source, | |
407 | writer=RhodeCodeWriter(), |
|
430 | writer=RhodeCodeWriter(), | |
408 | settings_overrides=docutils_settings) |
|
431 | settings_overrides=docutils_settings) | |
409 | rendered = parts["fragment"] |
|
432 | rendered = parts["fragment"] | |
410 | if clean_html: |
|
433 | if clean_html: | |
411 | rendered = cls.bleach_clean(rendered) |
|
434 | rendered = cls.bleach_clean(rendered) | |
412 | return parts['html_title'] + rendered |
|
435 | return parts['html_title'] + rendered | |
413 | except Exception: |
|
436 | except Exception: | |
414 | log.exception('Error when rendering RST') |
|
437 | log.exception('Error when rendering RST') | |
415 | if safe: |
|
438 | if safe: | |
416 | log.debug('Fallbacking to render in plain mode') |
|
439 | log.debug('Fallbacking to render in plain mode') | |
417 | return cls.plain(source) |
|
440 | return cls.plain(source) | |
418 | else: |
|
441 | else: | |
419 | raise |
|
442 | raise | |
420 |
|
443 | |||
421 | @classmethod |
|
444 | @classmethod | |
422 | def jupyter(cls, source, safe=True): |
|
445 | def jupyter(cls, source, safe=True): | |
423 | from rhodecode.lib import helpers |
|
446 | from rhodecode.lib import helpers | |
424 |
|
447 | |||
425 | from traitlets.config import Config |
|
448 | from traitlets.config import Config | |
426 | import nbformat |
|
449 | import nbformat | |
427 | from nbconvert import HTMLExporter |
|
450 | from nbconvert import HTMLExporter | |
428 | from nbconvert.preprocessors import Preprocessor |
|
451 | from nbconvert.preprocessors import Preprocessor | |
429 |
|
452 | |||
430 | class CustomHTMLExporter(HTMLExporter): |
|
453 | class CustomHTMLExporter(HTMLExporter): | |
431 | def _template_file_default(self): |
|
454 | def _template_file_default(self): | |
432 | return 'basic' |
|
455 | return 'basic' | |
433 |
|
456 | |||
434 | class Sandbox(Preprocessor): |
|
457 | class Sandbox(Preprocessor): | |
435 |
|
458 | |||
436 | def preprocess(self, nb, resources): |
|
459 | def preprocess(self, nb, resources): | |
437 | sandbox_text = 'SandBoxed(IPython.core.display.Javascript object)' |
|
460 | sandbox_text = 'SandBoxed(IPython.core.display.Javascript object)' | |
438 | for cell in nb['cells']: |
|
461 | for cell in nb['cells']: | |
439 | if not safe: |
|
462 | if not safe: | |
440 | continue |
|
463 | continue | |
441 |
|
464 | |||
442 | if 'outputs' in cell: |
|
465 | if 'outputs' in cell: | |
443 | for cell_output in cell['outputs']: |
|
466 | for cell_output in cell['outputs']: | |
444 | if 'data' in cell_output: |
|
467 | if 'data' in cell_output: | |
445 | if 'application/javascript' in cell_output['data']: |
|
468 | if 'application/javascript' in cell_output['data']: | |
446 | cell_output['data']['text/plain'] = sandbox_text |
|
469 | cell_output['data']['text/plain'] = sandbox_text | |
447 | cell_output['data'].pop('application/javascript', None) |
|
470 | cell_output['data'].pop('application/javascript', None) | |
448 |
|
471 | |||
449 | if 'source' in cell and cell['cell_type'] == 'markdown': |
|
472 | if 'source' in cell and cell['cell_type'] == 'markdown': | |
450 | # sanitize similar like in markdown |
|
473 | # sanitize similar like in markdown | |
451 | cell['source'] = cls.bleach_clean(cell['source']) |
|
474 | cell['source'] = cls.bleach_clean(cell['source']) | |
452 |
|
475 | |||
453 | return nb, resources |
|
476 | return nb, resources | |
454 |
|
477 | |||
455 | def _sanitize_resources(resources): |
|
478 | def _sanitize_resources(input_resources): | |
456 | """ |
|
479 | """ | |
457 | Skip/sanitize some of the CSS generated and included in jupyter |
|
480 | Skip/sanitize some of the CSS generated and included in jupyter | |
458 | so it doesn't messes up UI so much |
|
481 | so it doesn't messes up UI so much | |
459 | """ |
|
482 | """ | |
460 |
|
483 | |||
461 | # TODO(marcink): probably we should replace this with whole custom |
|
484 | # TODO(marcink): probably we should replace this with whole custom | |
462 | # CSS set that doesn't screw up, but jupyter generated html has some |
|
485 | # CSS set that doesn't screw up, but jupyter generated html has some | |
463 | # special markers, so it requires Custom HTML exporter template with |
|
486 | # special markers, so it requires Custom HTML exporter template with | |
464 | # _default_template_path_default, to achieve that |
|
487 | # _default_template_path_default, to achieve that | |
465 |
|
488 | |||
466 | # strip the reset CSS |
|
489 | # strip the reset CSS | |
467 | resources[0] = resources[0][resources[0].find('/*! Source'):] |
|
490 | input_resources[0] = input_resources[0][input_resources[0].find('/*! Source'):] | |
468 | return resources |
|
491 | return input_resources | |
469 |
|
492 | |||
470 | def as_html(notebook): |
|
493 | def as_html(notebook): | |
471 | conf = Config() |
|
494 | conf = Config() | |
472 | conf.CustomHTMLExporter.preprocessors = [Sandbox] |
|
495 | conf.CustomHTMLExporter.preprocessors = [Sandbox] | |
473 | html_exporter = CustomHTMLExporter(config=conf) |
|
496 | html_exporter = CustomHTMLExporter(config=conf) | |
474 |
|
497 | |||
475 | (body, resources) = html_exporter.from_notebook_node(notebook) |
|
498 | (body, resources) = html_exporter.from_notebook_node(notebook) | |
476 | header = '<!-- ## IPYTHON NOTEBOOK RENDERING ## -->' |
|
499 | header = '<!-- ## IPYTHON NOTEBOOK RENDERING ## -->' | |
477 | js = MakoTemplate(r''' |
|
500 | js = MakoTemplate(r''' | |
478 | <!-- Load mathjax --> |
|
501 | <!-- Load mathjax --> | |
479 | <!-- MathJax configuration --> |
|
502 | <!-- MathJax configuration --> | |
480 | <script type="text/x-mathjax-config"> |
|
503 | <script type="text/x-mathjax-config"> | |
481 | MathJax.Hub.Config({ |
|
504 | MathJax.Hub.Config({ | |
482 | jax: ["input/TeX","output/HTML-CSS", "output/PreviewHTML"], |
|
505 | jax: ["input/TeX","output/HTML-CSS", "output/PreviewHTML"], | |
483 | extensions: ["tex2jax.js","MathMenu.js","MathZoom.js", "fast-preview.js", "AssistiveMML.js", "[Contrib]/a11y/accessibility-menu.js"], |
|
506 | extensions: ["tex2jax.js","MathMenu.js","MathZoom.js", "fast-preview.js", "AssistiveMML.js", "[Contrib]/a11y/accessibility-menu.js"], | |
484 | TeX: { |
|
507 | TeX: { | |
485 | extensions: ["AMSmath.js","AMSsymbols.js","noErrors.js","noUndefined.js"] |
|
508 | extensions: ["AMSmath.js","AMSsymbols.js","noErrors.js","noUndefined.js"] | |
486 | }, |
|
509 | }, | |
487 | tex2jax: { |
|
510 | tex2jax: { | |
488 | inlineMath: [ ['$','$'], ["\\(","\\)"] ], |
|
511 | inlineMath: [ ['$','$'], ["\\(","\\)"] ], | |
489 | displayMath: [ ['$$','$$'], ["\\[","\\]"] ], |
|
512 | displayMath: [ ['$$','$$'], ["\\[","\\]"] ], | |
490 | processEscapes: true, |
|
513 | processEscapes: true, | |
491 | processEnvironments: true |
|
514 | processEnvironments: true | |
492 | }, |
|
515 | }, | |
493 | // Center justify equations in code and markdown cells. Elsewhere |
|
516 | // Center justify equations in code and markdown cells. Elsewhere | |
494 | // we use CSS to left justify single line equations in code cells. |
|
517 | // we use CSS to left justify single line equations in code cells. | |
495 | displayAlign: 'center', |
|
518 | displayAlign: 'center', | |
496 | "HTML-CSS": { |
|
519 | "HTML-CSS": { | |
497 | styles: {'.MathJax_Display': {"margin": 0}}, |
|
520 | styles: {'.MathJax_Display': {"margin": 0}}, | |
498 | linebreaks: { automatic: true }, |
|
521 | linebreaks: { automatic: true }, | |
499 | availableFonts: ["STIX", "TeX"] |
|
522 | availableFonts: ["STIX", "TeX"] | |
500 | }, |
|
523 | }, | |
501 | showMathMenu: false |
|
524 | showMathMenu: false | |
502 | }); |
|
525 | }); | |
503 | </script> |
|
526 | </script> | |
504 | <!-- End of mathjax configuration --> |
|
527 | <!-- End of mathjax configuration --> | |
505 | <script src="${h.asset('js/src/math_jax/MathJax.js')}"></script> |
|
528 | <script src="${h.asset('js/src/math_jax/MathJax.js')}"></script> | |
506 | ''').render(h=helpers) |
|
529 | ''').render(h=helpers) | |
507 |
|
530 | |||
508 | css = '<style>{}</style>'.format( |
|
531 | css = '<style>{}</style>'.format( | |
509 | ''.join(_sanitize_resources(resources['inlining']['css']))) |
|
532 | ''.join(_sanitize_resources(resources['inlining']['css']))) | |
510 |
|
533 | |||
511 | body = '\n'.join([header, css, js, body]) |
|
534 | body = '\n'.join([header, css, js, body]) | |
512 | return body, resources |
|
535 | return body, resources | |
513 |
|
536 | |||
514 | notebook = nbformat.reads(source, as_version=4) |
|
537 | notebook = nbformat.reads(source, as_version=4) | |
515 | (body, resources) = as_html(notebook) |
|
538 | (body, resources) = as_html(notebook) | |
516 | return body |
|
539 | return body | |
517 |
|
540 | |||
518 |
|
541 | |||
519 | class RstTemplateRenderer(object): |
|
542 | class RstTemplateRenderer(object): | |
520 |
|
543 | |||
521 | def __init__(self): |
|
544 | def __init__(self): | |
522 | base = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) |
|
545 | base = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) | |
523 | rst_template_dirs = [os.path.join(base, 'templates', 'rst_templates')] |
|
546 | rst_template_dirs = [os.path.join(base, 'templates', 'rst_templates')] | |
524 | self.template_store = TemplateLookup( |
|
547 | self.template_store = TemplateLookup( | |
525 | directories=rst_template_dirs, |
|
548 | directories=rst_template_dirs, | |
526 | input_encoding='utf-8', |
|
549 | input_encoding='utf-8', | |
527 | imports=['from rhodecode.lib import helpers as h']) |
|
550 | imports=['from rhodecode.lib import helpers as h']) | |
528 |
|
551 | |||
529 | def _get_template(self, templatename): |
|
552 | def _get_template(self, templatename): | |
530 | return self.template_store.get_template(templatename) |
|
553 | return self.template_store.get_template(templatename) | |
531 |
|
554 | |||
532 | def render(self, template_name, **kwargs): |
|
555 | def render(self, template_name, **kwargs): | |
533 | template = self._get_template(template_name) |
|
556 | template = self._get_template(template_name) | |
534 | return template.render(**kwargs) |
|
557 | return template.render(**kwargs) |
General Comments 0
You need to be logged in to leave comments.
Login now