Show More
@@ -1,177 +1,177 b'' | |||
|
1 | 1 | """Module containing single call export functions.""" |
|
2 | 2 | |
|
3 | 3 | # Copyright (c) IPython Development Team. |
|
4 | 4 | # Distributed under the terms of the Modified BSD License. |
|
5 | 5 | |
|
6 | 6 | from functools import wraps |
|
7 | 7 | |
|
8 | 8 | from IPython.nbformat import NotebookNode |
|
9 | 9 | from IPython.utils.decorators import undoc |
|
10 | 10 | from IPython.utils.py3compat import string_types |
|
11 | 11 | |
|
12 | 12 | from .exporter import Exporter |
|
13 | 13 | from .templateexporter import TemplateExporter |
|
14 | 14 | from .html import HTMLExporter |
|
15 | 15 | from .slides import SlidesExporter |
|
16 | 16 | from .latex import LatexExporter |
|
17 | 17 | from .pdf import PDFExporter |
|
18 | 18 | from .markdown import MarkdownExporter |
|
19 | 19 | from .python import PythonExporter |
|
20 | 20 | from .rst import RSTExporter |
|
21 | 21 | from .notebook import NotebookExporter |
|
22 | 22 | from .script import ScriptExporter |
|
23 | 23 | |
|
24 | 24 | #----------------------------------------------------------------------------- |
|
25 | 25 | # Classes |
|
26 | 26 | #----------------------------------------------------------------------------- |
|
27 | 27 | |
|
28 | 28 | @undoc |
|
29 | 29 | def DocDecorator(f): |
|
30 | 30 | |
|
31 | 31 | #Set docstring of function |
|
32 | 32 | f.__doc__ = f.__doc__ + """ |
|
33 | 33 | nb : :class:`~IPython.nbformat.NotebookNode` |
|
34 | 34 | The notebook to export. |
|
35 | 35 | config : config (optional, keyword arg) |
|
36 | 36 | User configuration instance. |
|
37 | 37 | resources : dict (optional, keyword arg) |
|
38 | 38 | Resources used in the conversion process. |
|
39 | 39 | |
|
40 | 40 | Returns |
|
41 | 41 | ------- |
|
42 | 42 | tuple- output, resources, exporter_instance |
|
43 | 43 | output : str |
|
44 | 44 | Jinja 2 output. This is the resulting converted notebook. |
|
45 | 45 | resources : dictionary |
|
46 | 46 | Dictionary of resources used prior to and during the conversion |
|
47 | 47 | process. |
|
48 | 48 | exporter_instance : Exporter |
|
49 | 49 | Instance of the Exporter class used to export the document. Useful |
|
50 | 50 | to caller because it provides a 'file_extension' property which |
|
51 | 51 | specifies what extension the output should be saved as. |
|
52 | 52 | |
|
53 | 53 | Notes |
|
54 | 54 | ----- |
|
55 | 55 | WARNING: API WILL CHANGE IN FUTURE RELEASES OF NBCONVERT |
|
56 | 56 | """ |
|
57 | 57 | |
|
58 | 58 | @wraps(f) |
|
59 | 59 | def decorator(*args, **kwargs): |
|
60 | 60 | return f(*args, **kwargs) |
|
61 | 61 | |
|
62 | 62 | return decorator |
|
63 | 63 | |
|
64 | 64 | |
|
65 | 65 | #----------------------------------------------------------------------------- |
|
66 | 66 | # Functions |
|
67 | 67 | #----------------------------------------------------------------------------- |
|
68 | 68 | |
|
69 | 69 | __all__ = [ |
|
70 | 70 | 'export', |
|
71 | 71 | 'export_html', |
|
72 | 72 | 'export_custom', |
|
73 | 73 | 'export_slides', |
|
74 | 74 | 'export_latex', |
|
75 | 75 | 'export_pdf', |
|
76 | 76 | 'export_markdown', |
|
77 | 77 | 'export_python', |
|
78 | 78 | 'export_script', |
|
79 | 79 | 'export_rst', |
|
80 | 80 | 'export_by_name', |
|
81 | 81 | 'get_export_names', |
|
82 | 82 | 'ExporterNameError' |
|
83 | 83 | ] |
|
84 | 84 | |
|
85 | 85 | |
|
86 | 86 | class ExporterNameError(NameError): |
|
87 | 87 | pass |
|
88 | 88 | |
|
89 | 89 | @DocDecorator |
|
90 | 90 | def export(exporter, nb, **kw): |
|
91 | 91 | """ |
|
92 | 92 | Export a notebook object using specific exporter class. |
|
93 | 93 | |
|
94 | 94 | Parameters |
|
95 | 95 | ---------- |
|
96 |
exporter : class:`~ |
|
|
96 | exporter : class:`~jupyter_nbconvert.exporters.exporter.Exporter` class or instance | |
|
97 | 97 | Class type or instance of the exporter that should be used. If the |
|
98 | 98 | method initializes it's own instance of the class, it is ASSUMED that |
|
99 | 99 | the class type provided exposes a constructor (``__init__``) with the same |
|
100 | 100 | signature as the base Exporter class. |
|
101 | 101 | """ |
|
102 | 102 | |
|
103 | 103 | #Check arguments |
|
104 | 104 | if exporter is None: |
|
105 | 105 | raise TypeError("Exporter is None") |
|
106 | 106 | elif not isinstance(exporter, Exporter) and not issubclass(exporter, Exporter): |
|
107 | 107 | raise TypeError("exporter does not inherit from Exporter (base)") |
|
108 | 108 | if nb is None: |
|
109 | 109 | raise TypeError("nb is None") |
|
110 | 110 | |
|
111 | 111 | #Create the exporter |
|
112 | 112 | resources = kw.pop('resources', None) |
|
113 | 113 | if isinstance(exporter, Exporter): |
|
114 | 114 | exporter_instance = exporter |
|
115 | 115 | else: |
|
116 | 116 | exporter_instance = exporter(**kw) |
|
117 | 117 | |
|
118 | 118 | #Try to convert the notebook using the appropriate conversion function. |
|
119 | 119 | if isinstance(nb, NotebookNode): |
|
120 | 120 | output, resources = exporter_instance.from_notebook_node(nb, resources) |
|
121 | 121 | elif isinstance(nb, string_types): |
|
122 | 122 | output, resources = exporter_instance.from_filename(nb, resources) |
|
123 | 123 | else: |
|
124 | 124 | output, resources = exporter_instance.from_file(nb, resources) |
|
125 | 125 | return output, resources |
|
126 | 126 | |
|
127 | 127 | exporter_map = dict( |
|
128 | 128 | custom=TemplateExporter, |
|
129 | 129 | html=HTMLExporter, |
|
130 | 130 | slides=SlidesExporter, |
|
131 | 131 | latex=LatexExporter, |
|
132 | 132 | pdf=PDFExporter, |
|
133 | 133 | markdown=MarkdownExporter, |
|
134 | 134 | python=PythonExporter, |
|
135 | 135 | rst=RSTExporter, |
|
136 | 136 | notebook=NotebookExporter, |
|
137 | 137 | script=ScriptExporter, |
|
138 | 138 | ) |
|
139 | 139 | |
|
140 | 140 | def _make_exporter(name, E): |
|
141 | 141 | """make an export_foo function from a short key and Exporter class E""" |
|
142 | 142 | def _export(nb, **kw): |
|
143 | 143 | return export(E, nb, **kw) |
|
144 | 144 | _export.__doc__ = """Export a notebook object to {0} format""".format(name) |
|
145 | 145 | return _export |
|
146 | 146 | |
|
147 | 147 | g = globals() |
|
148 | 148 | |
|
149 | 149 | for name, E in exporter_map.items(): |
|
150 | 150 | g['export_%s' % name] = DocDecorator(_make_exporter(name, E)) |
|
151 | 151 | |
|
152 | 152 | @DocDecorator |
|
153 | 153 | def export_by_name(format_name, nb, **kw): |
|
154 | 154 | """ |
|
155 | 155 | Export a notebook object to a template type by its name. Reflection |
|
156 | 156 | (Inspect) is used to find the template's corresponding explicit export |
|
157 | 157 | method defined in this module. That method is then called directly. |
|
158 | 158 | |
|
159 | 159 | Parameters |
|
160 | 160 | ---------- |
|
161 | 161 | format_name : str |
|
162 | 162 | Name of the template style to export to. |
|
163 | 163 | """ |
|
164 | 164 | |
|
165 | 165 | function_name = "export_" + format_name.lower() |
|
166 | 166 | |
|
167 | 167 | if function_name in globals(): |
|
168 | 168 | return globals()[function_name](nb, **kw) |
|
169 | 169 | else: |
|
170 | 170 | raise ExporterNameError("template for `%s` not found" % function_name) |
|
171 | 171 | |
|
172 | 172 | |
|
173 | 173 | def get_export_names(): |
|
174 | 174 | """Return a list of the currently supported export targets |
|
175 | 175 | |
|
176 | 176 | WARNING: API WILL CHANGE IN FUTURE RELEASES OF NBCONVERT""" |
|
177 | 177 | return sorted(exporter_map.keys()) |
@@ -1,280 +1,280 b'' | |||
|
1 | 1 | """This module defines a base Exporter class. For Jinja template-based export, |
|
2 | 2 | see templateexporter.py. |
|
3 | 3 | """ |
|
4 | 4 | |
|
5 | 5 | |
|
6 | 6 | from __future__ import print_function, absolute_import |
|
7 | 7 | |
|
8 | 8 | import io |
|
9 | 9 | import os |
|
10 | 10 | import copy |
|
11 | 11 | import collections |
|
12 | 12 | import datetime |
|
13 | 13 | |
|
14 | 14 | from IPython.config.configurable import LoggingConfigurable |
|
15 | 15 | from IPython.config import Config |
|
16 | 16 | from IPython import nbformat |
|
17 | 17 | from IPython.utils.traitlets import MetaHasTraits, Unicode, List, TraitError |
|
18 | 18 | from IPython.utils.importstring import import_item |
|
19 | 19 | from IPython.utils import text, py3compat |
|
20 | 20 | |
|
21 | 21 | |
|
22 | 22 | class ResourcesDict(collections.defaultdict): |
|
23 | 23 | def __missing__(self, key): |
|
24 | 24 | return '' |
|
25 | 25 | |
|
26 | 26 | |
|
27 | 27 | class FilenameExtension(Unicode): |
|
28 | 28 | """A trait for filename extensions.""" |
|
29 | 29 | |
|
30 | 30 | default_value = u'' |
|
31 | 31 | info_text = 'a filename extension, beginning with a dot' |
|
32 | 32 | |
|
33 | 33 | def validate(self, obj, value): |
|
34 | 34 | # cast to proper unicode |
|
35 | 35 | value = super(FilenameExtension, self).validate(obj, value) |
|
36 | 36 | |
|
37 | 37 | # check that it starts with a dot |
|
38 | 38 | if value and not value.startswith('.'): |
|
39 | 39 | msg = "FileExtension trait '{}' does not begin with a dot: {!r}" |
|
40 | 40 | raise TraitError(msg.format(self.name, value)) |
|
41 | 41 | |
|
42 | 42 | return value |
|
43 | 43 | |
|
44 | 44 | |
|
45 | 45 | class Exporter(LoggingConfigurable): |
|
46 | 46 | """ |
|
47 | 47 | Class containing methods that sequentially run a list of preprocessors on a |
|
48 | 48 | NotebookNode object and then return the modified NotebookNode object and |
|
49 | 49 | accompanying resources dict. |
|
50 | 50 | """ |
|
51 | 51 | |
|
52 | 52 | file_extension = FilenameExtension( |
|
53 | 53 | '.txt', config=True, |
|
54 | 54 | help="Extension of the file that should be written to disk" |
|
55 | 55 | ) |
|
56 | 56 | |
|
57 | 57 | # MIME type of the result file, for HTTP response headers. |
|
58 | 58 | # This is *not* a traitlet, because we want to be able to access it from |
|
59 | 59 | # the class, not just on instances. |
|
60 | 60 | output_mimetype = '' |
|
61 | 61 | |
|
62 | 62 | #Configurability, allows the user to easily add filters and preprocessors. |
|
63 | 63 | preprocessors = List(config=True, |
|
64 | 64 | help="""List of preprocessors, by name or namespace, to enable.""") |
|
65 | 65 | |
|
66 | 66 | _preprocessors = List() |
|
67 | 67 | |
|
68 |
default_preprocessors = List([' |
|
|
69 |
' |
|
|
70 |
' |
|
|
71 |
' |
|
|
72 |
' |
|
|
73 |
' |
|
|
74 |
' |
|
|
75 |
' |
|
|
76 |
' |
|
|
68 | default_preprocessors = List(['jupyter_nbconvert.preprocessors.coalesce_streams', | |
|
69 | 'jupyter_nbconvert.preprocessors.SVG2PDFPreprocessor', | |
|
70 | 'jupyter_nbconvert.preprocessors.ExtractOutputPreprocessor', | |
|
71 | 'jupyter_nbconvert.preprocessors.CSSHTMLHeaderPreprocessor', | |
|
72 | 'jupyter_nbconvert.preprocessors.RevealHelpPreprocessor', | |
|
73 | 'jupyter_nbconvert.preprocessors.LatexPreprocessor', | |
|
74 | 'jupyter_nbconvert.preprocessors.ClearOutputPreprocessor', | |
|
75 | 'jupyter_nbconvert.preprocessors.ExecutePreprocessor', | |
|
76 | 'jupyter_nbconvert.preprocessors.HighlightMagicsPreprocessor'], | |
|
77 | 77 | config=True, |
|
78 | 78 | help="""List of preprocessors available by default, by name, namespace, |
|
79 | 79 | instance, or type.""") |
|
80 | 80 | |
|
81 | 81 | |
|
82 | 82 | def __init__(self, config=None, **kw): |
|
83 | 83 | """ |
|
84 | 84 | Public constructor |
|
85 | 85 | |
|
86 | 86 | Parameters |
|
87 | 87 | ---------- |
|
88 | 88 | config : config |
|
89 | 89 | User configuration instance. |
|
90 | 90 | """ |
|
91 | 91 | with_default_config = self.default_config |
|
92 | 92 | if config: |
|
93 | 93 | with_default_config.merge(config) |
|
94 | 94 | |
|
95 | 95 | super(Exporter, self).__init__(config=with_default_config, **kw) |
|
96 | 96 | |
|
97 | 97 | self._init_preprocessors() |
|
98 | 98 | |
|
99 | 99 | |
|
100 | 100 | @property |
|
101 | 101 | def default_config(self): |
|
102 | 102 | return Config() |
|
103 | 103 | |
|
104 | 104 | def from_notebook_node(self, nb, resources=None, **kw): |
|
105 | 105 | """ |
|
106 | 106 | Convert a notebook from a notebook node instance. |
|
107 | 107 | |
|
108 | 108 | Parameters |
|
109 | 109 | ---------- |
|
110 | 110 | nb : :class:`~IPython.nbformat.NotebookNode` |
|
111 | 111 | Notebook node (dict-like with attr-access) |
|
112 | 112 | resources : dict |
|
113 | 113 | Additional resources that can be accessed read/write by |
|
114 | 114 | preprocessors and filters. |
|
115 | 115 | **kw |
|
116 | 116 | Ignored (?) |
|
117 | 117 | """ |
|
118 | 118 | nb_copy = copy.deepcopy(nb) |
|
119 | 119 | resources = self._init_resources(resources) |
|
120 | 120 | |
|
121 | 121 | if 'language' in nb['metadata']: |
|
122 | 122 | resources['language'] = nb['metadata']['language'].lower() |
|
123 | 123 | |
|
124 | 124 | # Preprocess |
|
125 | 125 | nb_copy, resources = self._preprocess(nb_copy, resources) |
|
126 | 126 | |
|
127 | 127 | return nb_copy, resources |
|
128 | 128 | |
|
129 | 129 | |
|
130 | 130 | def from_filename(self, filename, resources=None, **kw): |
|
131 | 131 | """ |
|
132 | 132 | Convert a notebook from a notebook file. |
|
133 | 133 | |
|
134 | 134 | Parameters |
|
135 | 135 | ---------- |
|
136 | 136 | filename : str |
|
137 | 137 | Full filename of the notebook file to open and convert. |
|
138 | 138 | """ |
|
139 | 139 | |
|
140 | 140 | # Pull the metadata from the filesystem. |
|
141 | 141 | if resources is None: |
|
142 | 142 | resources = ResourcesDict() |
|
143 | 143 | if not 'metadata' in resources or resources['metadata'] == '': |
|
144 | 144 | resources['metadata'] = ResourcesDict() |
|
145 | 145 | path, basename = os.path.split(filename) |
|
146 | 146 | notebook_name = basename[:basename.rfind('.')] |
|
147 | 147 | resources['metadata']['name'] = notebook_name |
|
148 | 148 | resources['metadata']['path'] = path |
|
149 | 149 | |
|
150 | 150 | modified_date = datetime.datetime.fromtimestamp(os.path.getmtime(filename)) |
|
151 | 151 | resources['metadata']['modified_date'] = modified_date.strftime(text.date_format) |
|
152 | 152 | |
|
153 | 153 | with io.open(filename, encoding='utf-8') as f: |
|
154 | 154 | return self.from_notebook_node(nbformat.read(f, as_version=4), resources=resources, **kw) |
|
155 | 155 | |
|
156 | 156 | |
|
157 | 157 | def from_file(self, file_stream, resources=None, **kw): |
|
158 | 158 | """ |
|
159 | 159 | Convert a notebook from a notebook file. |
|
160 | 160 | |
|
161 | 161 | Parameters |
|
162 | 162 | ---------- |
|
163 | 163 | file_stream : file-like object |
|
164 | 164 | Notebook file-like object to convert. |
|
165 | 165 | """ |
|
166 | 166 | return self.from_notebook_node(nbformat.read(file_stream, as_version=4), resources=resources, **kw) |
|
167 | 167 | |
|
168 | 168 | |
|
169 | 169 | def register_preprocessor(self, preprocessor, enabled=False): |
|
170 | 170 | """ |
|
171 | 171 | Register a preprocessor. |
|
172 | 172 | Preprocessors are classes that act upon the notebook before it is |
|
173 | 173 | passed into the Jinja templating engine. preprocessors are also |
|
174 | 174 | capable of passing additional information to the Jinja |
|
175 | 175 | templating engine. |
|
176 | 176 | |
|
177 | 177 | Parameters |
|
178 | 178 | ---------- |
|
179 | 179 | preprocessor : preprocessor |
|
180 | 180 | """ |
|
181 | 181 | if preprocessor is None: |
|
182 | 182 | raise TypeError('preprocessor') |
|
183 | 183 | isclass = isinstance(preprocessor, type) |
|
184 | 184 | constructed = not isclass |
|
185 | 185 | |
|
186 | 186 | # Handle preprocessor's registration based on it's type |
|
187 | 187 | if constructed and isinstance(preprocessor, py3compat.string_types): |
|
188 | 188 | # Preprocessor is a string, import the namespace and recursively call |
|
189 | 189 | # this register_preprocessor method |
|
190 | 190 | preprocessor_cls = import_item(preprocessor) |
|
191 | 191 | return self.register_preprocessor(preprocessor_cls, enabled) |
|
192 | 192 | |
|
193 | 193 | if constructed and hasattr(preprocessor, '__call__'): |
|
194 | 194 | # Preprocessor is a function, no need to construct it. |
|
195 | 195 | # Register and return the preprocessor. |
|
196 | 196 | if enabled: |
|
197 | 197 | preprocessor.enabled = True |
|
198 | 198 | self._preprocessors.append(preprocessor) |
|
199 | 199 | return preprocessor |
|
200 | 200 | |
|
201 | 201 | elif isclass and isinstance(preprocessor, MetaHasTraits): |
|
202 | 202 | # Preprocessor is configurable. Make sure to pass in new default for |
|
203 | 203 | # the enabled flag if one was specified. |
|
204 | 204 | self.register_preprocessor(preprocessor(parent=self), enabled) |
|
205 | 205 | |
|
206 | 206 | elif isclass: |
|
207 | 207 | # Preprocessor is not configurable, construct it |
|
208 | 208 | self.register_preprocessor(preprocessor(), enabled) |
|
209 | 209 | |
|
210 | 210 | else: |
|
211 | 211 | # Preprocessor is an instance of something without a __call__ |
|
212 | 212 | # attribute. |
|
213 | 213 | raise TypeError('preprocessor') |
|
214 | 214 | |
|
215 | 215 | |
|
216 | 216 | def _init_preprocessors(self): |
|
217 | 217 | """ |
|
218 | 218 | Register all of the preprocessors needed for this exporter, disabled |
|
219 | 219 | unless specified explicitly. |
|
220 | 220 | """ |
|
221 | 221 | self._preprocessors = [] |
|
222 | 222 | |
|
223 | 223 | # Load default preprocessors (not necessarly enabled by default). |
|
224 | 224 | for preprocessor in self.default_preprocessors: |
|
225 | 225 | self.register_preprocessor(preprocessor) |
|
226 | 226 | |
|
227 | 227 | # Load user-specified preprocessors. Enable by default. |
|
228 | 228 | for preprocessor in self.preprocessors: |
|
229 | 229 | self.register_preprocessor(preprocessor, enabled=True) |
|
230 | 230 | |
|
231 | 231 | |
|
232 | 232 | def _init_resources(self, resources): |
|
233 | 233 | |
|
234 | 234 | #Make sure the resources dict is of ResourcesDict type. |
|
235 | 235 | if resources is None: |
|
236 | 236 | resources = ResourcesDict() |
|
237 | 237 | if not isinstance(resources, ResourcesDict): |
|
238 | 238 | new_resources = ResourcesDict() |
|
239 | 239 | new_resources.update(resources) |
|
240 | 240 | resources = new_resources |
|
241 | 241 | |
|
242 | 242 | #Make sure the metadata extension exists in resources |
|
243 | 243 | if 'metadata' in resources: |
|
244 | 244 | if not isinstance(resources['metadata'], ResourcesDict): |
|
245 | 245 | new_metadata = ResourcesDict() |
|
246 | 246 | new_metadata.update(resources['metadata']) |
|
247 | 247 | resources['metadata'] = new_metadata |
|
248 | 248 | else: |
|
249 | 249 | resources['metadata'] = ResourcesDict() |
|
250 | 250 | if not resources['metadata']['name']: |
|
251 | 251 | resources['metadata']['name'] = 'Notebook' |
|
252 | 252 | |
|
253 | 253 | #Set the output extension |
|
254 | 254 | resources['output_extension'] = self.file_extension |
|
255 | 255 | return resources |
|
256 | 256 | |
|
257 | 257 | |
|
258 | 258 | def _preprocess(self, nb, resources): |
|
259 | 259 | """ |
|
260 | 260 | Preprocess the notebook before passing it into the Jinja engine. |
|
261 | 261 | To preprocess the notebook is to apply all of the |
|
262 | 262 | |
|
263 | 263 | Parameters |
|
264 | 264 | ---------- |
|
265 | 265 | nb : notebook node |
|
266 | 266 | notebook that is being exported. |
|
267 | 267 | resources : a dict of additional resources that |
|
268 | 268 | can be accessed read/write by preprocessors |
|
269 | 269 | """ |
|
270 | 270 | |
|
271 | 271 | # Do a copy.deepcopy first, |
|
272 | 272 | # we are never safe enough with what the preprocessors could do. |
|
273 | 273 | nbc = copy.deepcopy(nb) |
|
274 | 274 | resc = copy.deepcopy(resources) |
|
275 | 275 | |
|
276 | 276 | #Run each preprocessor on the notebook. Carry the output along |
|
277 | 277 | #to each preprocessor |
|
278 | 278 | for preprocessor in self._preprocessors: |
|
279 | 279 | nbc, resc = preprocessor(nbc, resc) |
|
280 | 280 | return nbc, resc |
@@ -1,57 +1,57 b'' | |||
|
1 | 1 | """HTML Exporter class""" |
|
2 | 2 | |
|
3 | 3 | # Copyright (c) IPython Development Team. |
|
4 | 4 | # Distributed under the terms of the Modified BSD License. |
|
5 | 5 | |
|
6 | 6 | import os |
|
7 | 7 | |
|
8 |
from |
|
|
8 | from jupyter_nbconvert.filters.highlight import Highlight2HTML | |
|
9 | 9 | from IPython.config import Config |
|
10 | 10 | |
|
11 | 11 | from .templateexporter import TemplateExporter |
|
12 | 12 | |
|
13 | 13 | #----------------------------------------------------------------------------- |
|
14 | 14 | # Classes |
|
15 | 15 | #----------------------------------------------------------------------------- |
|
16 | 16 | |
|
17 | 17 | class HTMLExporter(TemplateExporter): |
|
18 | 18 | """ |
|
19 | 19 | Exports a basic HTML document. This exporter assists with the export of |
|
20 | 20 | HTML. Inherit from it if you are writing your own HTML template and need |
|
21 | 21 | custom preprocessors/filters. If you don't need custom preprocessors/ |
|
22 | 22 | filters, just change the 'template_file' config option. |
|
23 | 23 | """ |
|
24 | 24 | |
|
25 | 25 | def _file_extension_default(self): |
|
26 | 26 | return '.html' |
|
27 | 27 | |
|
28 | 28 | def _default_template_path_default(self): |
|
29 | 29 | return os.path.join("..", "templates", "html") |
|
30 | 30 | |
|
31 | 31 | def _template_file_default(self): |
|
32 | 32 | return 'full' |
|
33 | 33 | |
|
34 | 34 | output_mimetype = 'text/html' |
|
35 | 35 | |
|
36 | 36 | @property |
|
37 | 37 | def default_config(self): |
|
38 | 38 | c = Config({ |
|
39 | 39 | 'NbConvertBase': { |
|
40 | 40 | 'display_data_priority' : ['application/javascript', 'text/html', 'text/markdown', 'application/pdf', 'image/svg+xml', 'text/latex', 'image/png', 'image/jpeg', 'text/plain'] |
|
41 | 41 | }, |
|
42 | 42 | 'CSSHTMLHeaderPreprocessor':{ |
|
43 | 43 | 'enabled':True |
|
44 | 44 | }, |
|
45 | 45 | 'HighlightMagicsPreprocessor': { |
|
46 | 46 | 'enabled':True |
|
47 | 47 | } |
|
48 | 48 | }) |
|
49 | 49 | c.merge(super(HTMLExporter,self).default_config) |
|
50 | 50 | return c |
|
51 | 51 | |
|
52 | 52 | def from_notebook_node(self, nb, resources=None, **kw): |
|
53 | 53 | langinfo = nb.metadata.get('language_info', {}) |
|
54 | 54 | lexer = langinfo.get('pygments_lexer', langinfo.get('name', None)) |
|
55 | 55 | self.register_filter('highlight_code', |
|
56 | 56 | Highlight2HTML(pygments_lexer=lexer, parent=self)) |
|
57 | 57 | return super(HTMLExporter, self).from_notebook_node(nb, resources, **kw) |
@@ -1,96 +1,96 b'' | |||
|
1 | 1 | """LaTeX Exporter class""" |
|
2 | 2 | |
|
3 | 3 | #----------------------------------------------------------------------------- |
|
4 | 4 | # Copyright (c) 2013, the IPython Development Team. |
|
5 | 5 | # |
|
6 | 6 | # Distributed under the terms of the Modified BSD License. |
|
7 | 7 | # |
|
8 | 8 | # The full license is in the file COPYING.txt, distributed with this software. |
|
9 | 9 | #----------------------------------------------------------------------------- |
|
10 | 10 | |
|
11 | 11 | #----------------------------------------------------------------------------- |
|
12 | 12 | # Imports |
|
13 | 13 | #----------------------------------------------------------------------------- |
|
14 | 14 | |
|
15 | 15 | # Stdlib imports |
|
16 | 16 | import os |
|
17 | 17 | |
|
18 | 18 | # IPython imports |
|
19 | 19 | from IPython.utils.traitlets import Unicode |
|
20 | 20 | from IPython.config import Config |
|
21 | 21 | |
|
22 |
from |
|
|
22 | from jupyter_nbconvert.filters.highlight import Highlight2Latex | |
|
23 | 23 | from .templateexporter import TemplateExporter |
|
24 | 24 | |
|
25 | 25 | #----------------------------------------------------------------------------- |
|
26 | 26 | # Classes and functions |
|
27 | 27 | #----------------------------------------------------------------------------- |
|
28 | 28 | |
|
29 | 29 | class LatexExporter(TemplateExporter): |
|
30 | 30 | """ |
|
31 | 31 | Exports to a Latex template. Inherit from this class if your template is |
|
32 | 32 | LaTeX based and you need custom tranformers/filters. Inherit from it if |
|
33 | 33 | you are writing your own HTML template and need custom tranformers/filters. |
|
34 | 34 | If you don't need custom tranformers/filters, just change the |
|
35 | 35 | 'template_file' config option. Place your template in the special "/latex" |
|
36 | 36 | subfolder of the "../templates" folder. |
|
37 | 37 | """ |
|
38 | 38 | |
|
39 | 39 | def _file_extension_default(self): |
|
40 | 40 | return '.tex' |
|
41 | 41 | |
|
42 | 42 | def _template_file_default(self): |
|
43 | 43 | return 'article' |
|
44 | 44 | |
|
45 | 45 | #Latex constants |
|
46 | 46 | def _default_template_path_default(self): |
|
47 | 47 | return os.path.join("..", "templates", "latex") |
|
48 | 48 | |
|
49 | 49 | def _template_skeleton_path_default(self): |
|
50 | 50 | return os.path.join("..", "templates", "latex", "skeleton") |
|
51 | 51 | |
|
52 | 52 | #Special Jinja2 syntax that will not conflict when exporting latex. |
|
53 | 53 | jinja_comment_block_start = Unicode("((=", config=True) |
|
54 | 54 | jinja_comment_block_end = Unicode("=))", config=True) |
|
55 | 55 | jinja_variable_block_start = Unicode("(((", config=True) |
|
56 | 56 | jinja_variable_block_end = Unicode(")))", config=True) |
|
57 | 57 | jinja_logic_block_start = Unicode("((*", config=True) |
|
58 | 58 | jinja_logic_block_end = Unicode("*))", config=True) |
|
59 | 59 | |
|
60 | 60 | #Extension that the template files use. |
|
61 | 61 | template_extension = Unicode(".tplx", config=True) |
|
62 | 62 | |
|
63 | 63 | output_mimetype = 'text/latex' |
|
64 | 64 | |
|
65 | 65 | |
|
66 | 66 | @property |
|
67 | 67 | def default_config(self): |
|
68 | 68 | c = Config({ |
|
69 | 69 | 'NbConvertBase': { |
|
70 | 70 | 'display_data_priority' : ['text/latex', 'application/pdf', 'image/png', 'image/jpeg', 'image/svg+xml', 'text/plain'] |
|
71 | 71 | }, |
|
72 | 72 | 'ExtractOutputPreprocessor': { |
|
73 | 73 | 'enabled':True |
|
74 | 74 | }, |
|
75 | 75 | 'SVG2PDFPreprocessor': { |
|
76 | 76 | 'enabled':True |
|
77 | 77 | }, |
|
78 | 78 | 'LatexPreprocessor': { |
|
79 | 79 | 'enabled':True |
|
80 | 80 | }, |
|
81 | 81 | 'SphinxPreprocessor': { |
|
82 | 82 | 'enabled':True |
|
83 | 83 | }, |
|
84 | 84 | 'HighlightMagicsPreprocessor': { |
|
85 | 85 | 'enabled':True |
|
86 | 86 | } |
|
87 | 87 | }) |
|
88 | 88 | c.merge(super(LatexExporter,self).default_config) |
|
89 | 89 | return c |
|
90 | 90 | |
|
91 | 91 | def from_notebook_node(self, nb, resources=None, **kw): |
|
92 | 92 | langinfo = nb.metadata.get('language_info', {}) |
|
93 | 93 | lexer = langinfo.get('pygments_lexer', langinfo.get('name', None)) |
|
94 | 94 | self.register_filter('highlight_code', |
|
95 | 95 | Highlight2Latex(pygments_lexer=lexer, parent=self)) |
|
96 | 96 | return super(LatexExporter, self).from_notebook_node(nb, resources, **kw) |
@@ -1,147 +1,147 b'' | |||
|
1 | 1 | """Export to PDF via latex""" |
|
2 | 2 | |
|
3 | 3 | # Copyright (c) IPython Development Team. |
|
4 | 4 | # Distributed under the terms of the Modified BSD License. |
|
5 | 5 | |
|
6 | 6 | import subprocess |
|
7 | 7 | import os |
|
8 | 8 | import sys |
|
9 | 9 | |
|
10 | 10 | from IPython.utils.process import find_cmd |
|
11 | 11 | from IPython.utils.traitlets import Integer, List, Bool, Instance |
|
12 | 12 | from IPython.utils.tempdir import TemporaryWorkingDirectory |
|
13 | 13 | from .latex import LatexExporter |
|
14 | 14 | |
|
15 | 15 | |
|
16 | 16 | class PDFExporter(LatexExporter): |
|
17 | 17 | """Writer designed to write to PDF files""" |
|
18 | 18 | |
|
19 | 19 | latex_count = Integer(3, config=True, |
|
20 | 20 | help="How many times latex will be called." |
|
21 | 21 | ) |
|
22 | 22 | |
|
23 | 23 | latex_command = List([u"pdflatex", u"{filename}"], config=True, |
|
24 | 24 | help="Shell command used to compile latex." |
|
25 | 25 | ) |
|
26 | 26 | |
|
27 | 27 | bib_command = List([u"bibtex", u"{filename}"], config=True, |
|
28 | 28 | help="Shell command used to run bibtex." |
|
29 | 29 | ) |
|
30 | 30 | |
|
31 | 31 | verbose = Bool(False, config=True, |
|
32 | 32 | help="Whether to display the output of latex commands." |
|
33 | 33 | ) |
|
34 | 34 | |
|
35 | 35 | temp_file_exts = List(['.aux', '.bbl', '.blg', '.idx', '.log', '.out'], config=True, |
|
36 | 36 | help="File extensions of temp files to remove after running." |
|
37 | 37 | ) |
|
38 | 38 | |
|
39 |
writer = Instance(" |
|
|
39 | writer = Instance("jupyter_nbconvert.writers.FilesWriter", args=()) | |
|
40 | 40 | |
|
41 | 41 | def run_command(self, command_list, filename, count, log_function): |
|
42 | 42 | """Run command_list count times. |
|
43 | 43 | |
|
44 | 44 | Parameters |
|
45 | 45 | ---------- |
|
46 | 46 | command_list : list |
|
47 | 47 | A list of args to provide to Popen. Each element of this |
|
48 | 48 | list will be interpolated with the filename to convert. |
|
49 | 49 | filename : unicode |
|
50 | 50 | The name of the file to convert. |
|
51 | 51 | count : int |
|
52 | 52 | How many times to run the command. |
|
53 | 53 | |
|
54 | 54 | Returns |
|
55 | 55 | ------- |
|
56 | 56 | success : bool |
|
57 | 57 | A boolean indicating if the command was successful (True) |
|
58 | 58 | or failed (False). |
|
59 | 59 | """ |
|
60 | 60 | command = [c.format(filename=filename) for c in command_list] |
|
61 | 61 | |
|
62 | 62 | # On windows with python 2.x there is a bug in subprocess.Popen and |
|
63 | 63 | # unicode commands are not supported |
|
64 | 64 | if sys.platform == 'win32' and sys.version_info < (3,0): |
|
65 | 65 | #We must use cp1252 encoding for calling subprocess.Popen |
|
66 | 66 | #Note that sys.stdin.encoding and encoding.DEFAULT_ENCODING |
|
67 | 67 | # could be different (cp437 in case of dos console) |
|
68 | 68 | command = [c.encode('cp1252') for c in command] |
|
69 | 69 | |
|
70 | 70 | # This will throw a clearer error if the command is not found |
|
71 | 71 | find_cmd(command_list[0]) |
|
72 | 72 | |
|
73 | 73 | times = 'time' if count == 1 else 'times' |
|
74 | 74 | self.log.info("Running %s %i %s: %s", command_list[0], count, times, command) |
|
75 | 75 | with open(os.devnull, 'rb') as null: |
|
76 | 76 | stdout = subprocess.PIPE if not self.verbose else None |
|
77 | 77 | for index in range(count): |
|
78 | 78 | p = subprocess.Popen(command, stdout=stdout, stdin=null) |
|
79 | 79 | out, err = p.communicate() |
|
80 | 80 | if p.returncode: |
|
81 | 81 | if self.verbose: |
|
82 | 82 | # verbose means I didn't capture stdout with PIPE, |
|
83 | 83 | # so it's already been displayed and `out` is None. |
|
84 | 84 | out = u'' |
|
85 | 85 | else: |
|
86 | 86 | out = out.decode('utf-8', 'replace') |
|
87 | 87 | log_function(command, out) |
|
88 | 88 | return False # failure |
|
89 | 89 | return True # success |
|
90 | 90 | |
|
91 | 91 | def run_latex(self, filename): |
|
92 | 92 | """Run pdflatex self.latex_count times.""" |
|
93 | 93 | |
|
94 | 94 | def log_error(command, out): |
|
95 | 95 | self.log.critical(u"%s failed: %s\n%s", command[0], command, out) |
|
96 | 96 | |
|
97 | 97 | return self.run_command(self.latex_command, filename, |
|
98 | 98 | self.latex_count, log_error) |
|
99 | 99 | |
|
100 | 100 | def run_bib(self, filename): |
|
101 | 101 | """Run bibtex self.latex_count times.""" |
|
102 | 102 | filename = os.path.splitext(filename)[0] |
|
103 | 103 | |
|
104 | 104 | def log_error(command, out): |
|
105 | 105 | self.log.warn('%s had problems, most likely because there were no citations', |
|
106 | 106 | command[0]) |
|
107 | 107 | self.log.debug(u"%s output: %s\n%s", command[0], command, out) |
|
108 | 108 | |
|
109 | 109 | return self.run_command(self.bib_command, filename, 1, log_error) |
|
110 | 110 | |
|
111 | 111 | def clean_temp_files(self, filename): |
|
112 | 112 | """Remove temporary files created by pdflatex/bibtex.""" |
|
113 | 113 | self.log.info("Removing temporary LaTeX files") |
|
114 | 114 | filename = os.path.splitext(filename)[0] |
|
115 | 115 | for ext in self.temp_file_exts: |
|
116 | 116 | try: |
|
117 | 117 | os.remove(filename+ext) |
|
118 | 118 | except OSError: |
|
119 | 119 | pass |
|
120 | 120 | |
|
121 | 121 | def from_notebook_node(self, nb, resources=None, **kw): |
|
122 | 122 | latex, resources = super(PDFExporter, self).from_notebook_node( |
|
123 | 123 | nb, resources=resources, **kw |
|
124 | 124 | ) |
|
125 | 125 | with TemporaryWorkingDirectory() as td: |
|
126 | 126 | notebook_name = "notebook" |
|
127 | 127 | tex_file = self.writer.write(latex, resources, notebook_name=notebook_name) |
|
128 | 128 | self.log.info("Building PDF") |
|
129 | 129 | rc = self.run_latex(tex_file) |
|
130 | 130 | if not rc: |
|
131 | 131 | rc = self.run_bib(tex_file) |
|
132 | 132 | if not rc: |
|
133 | 133 | rc = self.run_latex(tex_file) |
|
134 | 134 | |
|
135 | 135 | pdf_file = notebook_name + '.pdf' |
|
136 | 136 | if not os.path.isfile(pdf_file): |
|
137 | 137 | raise RuntimeError("PDF creating failed") |
|
138 | 138 | self.log.info('PDF successfully created') |
|
139 | 139 | with open(pdf_file, 'rb') as f: |
|
140 | 140 | pdf_data = f.read() |
|
141 | 141 | |
|
142 | 142 | # convert output extension to pdf |
|
143 | 143 | # the writer above required it to be tex |
|
144 | 144 | resources['output_extension'] = '.pdf' |
|
145 | 145 | |
|
146 | 146 | return pdf_data, resources |
|
147 | 147 |
@@ -1,43 +1,43 b'' | |||
|
1 | 1 | """HTML slide show Exporter class""" |
|
2 | 2 | |
|
3 | 3 | #----------------------------------------------------------------------------- |
|
4 | 4 | # Copyright (c) 2013, the IPython Development Team. |
|
5 | 5 | # |
|
6 | 6 | # Distributed under the terms of the Modified BSD License. |
|
7 | 7 | # |
|
8 | 8 | # The full license is in the file COPYING.txt, distributed with this software. |
|
9 | 9 | #----------------------------------------------------------------------------- |
|
10 | 10 | |
|
11 | 11 | #----------------------------------------------------------------------------- |
|
12 | 12 | # Imports |
|
13 | 13 | #----------------------------------------------------------------------------- |
|
14 | 14 | |
|
15 |
from |
|
|
15 | from jupyter_nbconvert import preprocessors | |
|
16 | 16 | from IPython.config import Config |
|
17 | 17 | |
|
18 | 18 | from .html import HTMLExporter |
|
19 | 19 | |
|
20 | 20 | #----------------------------------------------------------------------------- |
|
21 | 21 | # Classes |
|
22 | 22 | #----------------------------------------------------------------------------- |
|
23 | 23 | |
|
24 | 24 | class SlidesExporter(HTMLExporter): |
|
25 | 25 | """Exports HTML slides with reveal.js""" |
|
26 | 26 | |
|
27 | 27 | def _file_extension_default(self): |
|
28 | 28 | return '.slides.html' |
|
29 | 29 | |
|
30 | 30 | def _template_file_default(self): |
|
31 | 31 | return 'slides_reveal' |
|
32 | 32 | |
|
33 | 33 | output_mimetype = 'text/html' |
|
34 | 34 | |
|
35 | 35 | @property |
|
36 | 36 | def default_config(self): |
|
37 | 37 | c = Config({ |
|
38 | 38 | 'RevealHelpPreprocessor': { |
|
39 | 39 | 'enabled': True, |
|
40 | 40 | }, |
|
41 | 41 | }) |
|
42 | 42 | c.merge(super(SlidesExporter,self).default_config) |
|
43 | 43 | return c |
@@ -1,321 +1,321 b'' | |||
|
1 | 1 | """This module defines TemplateExporter, a highly configurable converter |
|
2 | 2 | that uses Jinja2 to export notebook files into different formats. |
|
3 | 3 | """ |
|
4 | 4 | |
|
5 | 5 | #----------------------------------------------------------------------------- |
|
6 | 6 | # Copyright (c) 2013, the IPython Development Team. |
|
7 | 7 | # |
|
8 | 8 | # Distributed under the terms of the Modified BSD License. |
|
9 | 9 | # |
|
10 | 10 | # The full license is in the file COPYING.txt, distributed with this software. |
|
11 | 11 | #----------------------------------------------------------------------------- |
|
12 | 12 | |
|
13 | 13 | #----------------------------------------------------------------------------- |
|
14 | 14 | # Imports |
|
15 | 15 | #----------------------------------------------------------------------------- |
|
16 | 16 | |
|
17 | 17 | from __future__ import print_function, absolute_import |
|
18 | 18 | |
|
19 | 19 | # Stdlib imports |
|
20 | 20 | import os |
|
21 | 21 | |
|
22 | 22 | # other libs/dependencies are imported at runtime |
|
23 | 23 | # to move ImportErrors to runtime when the requirement is actually needed |
|
24 | 24 | |
|
25 | 25 | # IPython imports |
|
26 | 26 | from IPython.utils.traitlets import MetaHasTraits, Unicode, List, Dict, Any |
|
27 | 27 | from IPython.utils.importstring import import_item |
|
28 | 28 | from IPython.utils import py3compat, text |
|
29 | 29 | |
|
30 |
from |
|
|
30 | from jupyter_nbconvert import filters | |
|
31 | 31 | from .exporter import Exporter |
|
32 | 32 | |
|
33 | 33 | #----------------------------------------------------------------------------- |
|
34 | 34 | # Globals and constants |
|
35 | 35 | #----------------------------------------------------------------------------- |
|
36 | 36 | |
|
37 | 37 | #Jinja2 extensions to load. |
|
38 | 38 | JINJA_EXTENSIONS = ['jinja2.ext.loopcontrols'] |
|
39 | 39 | |
|
40 | 40 | default_filters = { |
|
41 | 41 | 'indent': text.indent, |
|
42 | 42 | 'markdown2html': filters.markdown2html, |
|
43 | 43 | 'ansi2html': filters.ansi2html, |
|
44 | 44 | 'filter_data_type': filters.DataTypeFilter, |
|
45 | 45 | 'get_lines': filters.get_lines, |
|
46 | 46 | 'highlight2html': filters.Highlight2HTML, |
|
47 | 47 | 'highlight2latex': filters.Highlight2Latex, |
|
48 | 48 | 'ipython2python': filters.ipython2python, |
|
49 | 49 | 'posix_path': filters.posix_path, |
|
50 | 50 | 'markdown2latex': filters.markdown2latex, |
|
51 | 51 | 'markdown2rst': filters.markdown2rst, |
|
52 | 52 | 'comment_lines': filters.comment_lines, |
|
53 | 53 | 'strip_ansi': filters.strip_ansi, |
|
54 | 54 | 'strip_dollars': filters.strip_dollars, |
|
55 | 55 | 'strip_files_prefix': filters.strip_files_prefix, |
|
56 | 56 | 'html2text' : filters.html2text, |
|
57 | 57 | 'add_anchor': filters.add_anchor, |
|
58 | 58 | 'ansi2latex': filters.ansi2latex, |
|
59 | 59 | 'wrap_text': filters.wrap_text, |
|
60 | 60 | 'escape_latex': filters.escape_latex, |
|
61 | 61 | 'citation2latex': filters.citation2latex, |
|
62 | 62 | 'path2url': filters.path2url, |
|
63 | 63 | 'add_prompts': filters.add_prompts, |
|
64 | 64 | 'ascii_only': filters.ascii_only, |
|
65 | 65 | 'prevent_list_blocks': filters.prevent_list_blocks, |
|
66 | 66 | } |
|
67 | 67 | |
|
68 | 68 | #----------------------------------------------------------------------------- |
|
69 | 69 | # Class |
|
70 | 70 | #----------------------------------------------------------------------------- |
|
71 | 71 | |
|
72 | 72 | class TemplateExporter(Exporter): |
|
73 | 73 | """ |
|
74 | 74 | Exports notebooks into other file formats. Uses Jinja 2 templating engine |
|
75 | 75 | to output new formats. Inherit from this class if you are creating a new |
|
76 | 76 | template type along with new filters/preprocessors. If the filters/ |
|
77 | 77 | preprocessors provided by default suffice, there is no need to inherit from |
|
78 | 78 | this class. Instead, override the template_file and file_extension |
|
79 | 79 | traits via a config file. |
|
80 | 80 | |
|
81 | 81 | {filters} |
|
82 | 82 | """ |
|
83 | 83 | |
|
84 | 84 | # finish the docstring |
|
85 | 85 | __doc__ = __doc__.format(filters = '- '+'\n - '.join(default_filters.keys())) |
|
86 | 86 | |
|
87 | 87 | |
|
88 | 88 | template_file = Unicode(u'default', |
|
89 | 89 | config=True, |
|
90 | 90 | help="Name of the template file to use") |
|
91 | 91 | def _template_file_changed(self, name, old, new): |
|
92 | 92 | if new == 'default': |
|
93 | 93 | self.template_file = self.default_template |
|
94 | 94 | else: |
|
95 | 95 | self.template_file = new |
|
96 | 96 | self.template = None |
|
97 | 97 | self._load_template() |
|
98 | 98 | |
|
99 | 99 | default_template = Unicode(u'') |
|
100 | 100 | template = Any() |
|
101 | 101 | environment = Any() |
|
102 | 102 | |
|
103 | 103 | template_path = List(['.'], config=True) |
|
104 | 104 | def _template_path_changed(self, name, old, new): |
|
105 | 105 | self._load_template() |
|
106 | 106 | |
|
107 | 107 | default_template_path = Unicode( |
|
108 | 108 | os.path.join("..", "templates"), |
|
109 | 109 | help="Path where the template files are located.") |
|
110 | 110 | |
|
111 | 111 | template_skeleton_path = Unicode( |
|
112 | 112 | os.path.join("..", "templates", "skeleton"), |
|
113 | 113 | help="Path where the template skeleton files are located.") |
|
114 | 114 | |
|
115 | 115 | #Jinja block definitions |
|
116 | 116 | jinja_comment_block_start = Unicode("", config=True) |
|
117 | 117 | jinja_comment_block_end = Unicode("", config=True) |
|
118 | 118 | jinja_variable_block_start = Unicode("", config=True) |
|
119 | 119 | jinja_variable_block_end = Unicode("", config=True) |
|
120 | 120 | jinja_logic_block_start = Unicode("", config=True) |
|
121 | 121 | jinja_logic_block_end = Unicode("", config=True) |
|
122 | 122 | |
|
123 | 123 | #Extension that the template files use. |
|
124 | 124 | template_extension = Unicode(".tpl", config=True) |
|
125 | 125 | |
|
126 | 126 | filters = Dict(config=True, |
|
127 | 127 | help="""Dictionary of filters, by name and namespace, to add to the Jinja |
|
128 | 128 | environment.""") |
|
129 | 129 | |
|
130 | 130 | raw_mimetypes = List(config=True, |
|
131 | 131 | help="""formats of raw cells to be included in this Exporter's output.""" |
|
132 | 132 | ) |
|
133 | 133 | def _raw_mimetypes_default(self): |
|
134 | 134 | return [self.output_mimetype, ''] |
|
135 | 135 | |
|
136 | 136 | |
|
137 | 137 | def __init__(self, config=None, extra_loaders=None, **kw): |
|
138 | 138 | """ |
|
139 | 139 | Public constructor |
|
140 | 140 | |
|
141 | 141 | Parameters |
|
142 | 142 | ---------- |
|
143 | 143 | config : config |
|
144 | 144 | User configuration instance. |
|
145 | 145 | extra_loaders : list[of Jinja Loaders] |
|
146 | 146 | ordered list of Jinja loader to find templates. Will be tried in order |
|
147 | 147 | before the default FileSystem ones. |
|
148 | 148 | template : str (optional, kw arg) |
|
149 | 149 | Template to use when exporting. |
|
150 | 150 | """ |
|
151 | 151 | super(TemplateExporter, self).__init__(config=config, **kw) |
|
152 | 152 | |
|
153 | 153 | #Init |
|
154 | 154 | self._init_template() |
|
155 | 155 | self._init_environment(extra_loaders=extra_loaders) |
|
156 | 156 | self._init_filters() |
|
157 | 157 | |
|
158 | 158 | |
|
159 | 159 | def _load_template(self): |
|
160 | 160 | """Load the Jinja template object from the template file |
|
161 | 161 | |
|
162 | 162 | This is a no-op if the template attribute is already defined, |
|
163 | 163 | or the Jinja environment is not setup yet. |
|
164 | 164 | |
|
165 | 165 | This is triggered by various trait changes that would change the template. |
|
166 | 166 | """ |
|
167 | 167 | from jinja2 import TemplateNotFound |
|
168 | 168 | |
|
169 | 169 | if self.template is not None: |
|
170 | 170 | return |
|
171 | 171 | # called too early, do nothing |
|
172 | 172 | if self.environment is None: |
|
173 | 173 | return |
|
174 | 174 | # Try different template names during conversion. First try to load the |
|
175 | 175 | # template by name with extension added, then try loading the template |
|
176 | 176 | # as if the name is explicitly specified, then try the name as a |
|
177 | 177 | # 'flavor', and lastly just try to load the template by module name. |
|
178 | 178 | try_names = [] |
|
179 | 179 | if self.template_file: |
|
180 | 180 | try_names.extend([ |
|
181 | 181 | self.template_file + self.template_extension, |
|
182 | 182 | self.template_file, |
|
183 | 183 | ]) |
|
184 | 184 | for try_name in try_names: |
|
185 | 185 | self.log.debug("Attempting to load template %s", try_name) |
|
186 | 186 | try: |
|
187 | 187 | self.template = self.environment.get_template(try_name) |
|
188 | 188 | except (TemplateNotFound, IOError): |
|
189 | 189 | pass |
|
190 | 190 | except Exception as e: |
|
191 | 191 | self.log.warn("Unexpected exception loading template: %s", try_name, exc_info=True) |
|
192 | 192 | else: |
|
193 | 193 | self.log.debug("Loaded template %s", try_name) |
|
194 | 194 | break |
|
195 | 195 | |
|
196 | 196 | def from_notebook_node(self, nb, resources=None, **kw): |
|
197 | 197 | """ |
|
198 | 198 | Convert a notebook from a notebook node instance. |
|
199 | 199 | |
|
200 | 200 | Parameters |
|
201 | 201 | ---------- |
|
202 | 202 | nb : :class:`~IPython.nbformat.NotebookNode` |
|
203 | 203 | Notebook node |
|
204 | 204 | resources : dict |
|
205 | 205 | Additional resources that can be accessed read/write by |
|
206 | 206 | preprocessors and filters. |
|
207 | 207 | """ |
|
208 | 208 | nb_copy, resources = super(TemplateExporter, self).from_notebook_node(nb, resources, **kw) |
|
209 | 209 | resources.setdefault('raw_mimetypes', self.raw_mimetypes) |
|
210 | 210 | |
|
211 | 211 | self._load_template() |
|
212 | 212 | |
|
213 | 213 | if self.template is not None: |
|
214 | 214 | output = self.template.render(nb=nb_copy, resources=resources) |
|
215 | 215 | else: |
|
216 | 216 | raise IOError('template file "%s" could not be found' % self.template_file) |
|
217 | 217 | return output, resources |
|
218 | 218 | |
|
219 | 219 | |
|
220 | 220 | def register_filter(self, name, jinja_filter): |
|
221 | 221 | """ |
|
222 | 222 | Register a filter. |
|
223 | 223 | A filter is a function that accepts and acts on one string. |
|
224 | 224 | The filters are accesible within the Jinja templating engine. |
|
225 | 225 | |
|
226 | 226 | Parameters |
|
227 | 227 | ---------- |
|
228 | 228 | name : str |
|
229 | 229 | name to give the filter in the Jinja engine |
|
230 | 230 | filter : filter |
|
231 | 231 | """ |
|
232 | 232 | if jinja_filter is None: |
|
233 | 233 | raise TypeError('filter') |
|
234 | 234 | isclass = isinstance(jinja_filter, type) |
|
235 | 235 | constructed = not isclass |
|
236 | 236 | |
|
237 | 237 | #Handle filter's registration based on it's type |
|
238 | 238 | if constructed and isinstance(jinja_filter, py3compat.string_types): |
|
239 | 239 | #filter is a string, import the namespace and recursively call |
|
240 | 240 | #this register_filter method |
|
241 | 241 | filter_cls = import_item(jinja_filter) |
|
242 | 242 | return self.register_filter(name, filter_cls) |
|
243 | 243 | |
|
244 | 244 | if constructed and hasattr(jinja_filter, '__call__'): |
|
245 | 245 | #filter is a function, no need to construct it. |
|
246 | 246 | self.environment.filters[name] = jinja_filter |
|
247 | 247 | return jinja_filter |
|
248 | 248 | |
|
249 | 249 | elif isclass and isinstance(jinja_filter, MetaHasTraits): |
|
250 | 250 | #filter is configurable. Make sure to pass in new default for |
|
251 | 251 | #the enabled flag if one was specified. |
|
252 | 252 | filter_instance = jinja_filter(parent=self) |
|
253 | 253 | self.register_filter(name, filter_instance ) |
|
254 | 254 | |
|
255 | 255 | elif isclass: |
|
256 | 256 | #filter is not configurable, construct it |
|
257 | 257 | filter_instance = jinja_filter() |
|
258 | 258 | self.register_filter(name, filter_instance) |
|
259 | 259 | |
|
260 | 260 | else: |
|
261 | 261 | #filter is an instance of something without a __call__ |
|
262 | 262 | #attribute. |
|
263 | 263 | raise TypeError('filter') |
|
264 | 264 | |
|
265 | 265 | |
|
266 | 266 | def _init_template(self): |
|
267 | 267 | """ |
|
268 | 268 | Make sure a template name is specified. If one isn't specified, try to |
|
269 | 269 | build one from the information we know. |
|
270 | 270 | """ |
|
271 | 271 | self._template_file_changed('template_file', self.template_file, self.template_file) |
|
272 | 272 | |
|
273 | 273 | |
|
274 | 274 | def _init_environment(self, extra_loaders=None): |
|
275 | 275 | """ |
|
276 | 276 | Create the Jinja templating environment. |
|
277 | 277 | """ |
|
278 | 278 | from jinja2 import Environment, ChoiceLoader, FileSystemLoader |
|
279 | 279 | here = os.path.dirname(os.path.realpath(__file__)) |
|
280 | 280 | loaders = [] |
|
281 | 281 | if extra_loaders: |
|
282 | 282 | loaders.extend(extra_loaders) |
|
283 | 283 | |
|
284 | 284 | paths = self.template_path |
|
285 | 285 | paths.extend([os.path.join(here, self.default_template_path), |
|
286 | 286 | os.path.join(here, self.template_skeleton_path)]) |
|
287 | 287 | loaders.append(FileSystemLoader(paths)) |
|
288 | 288 | |
|
289 | 289 | self.environment = Environment( |
|
290 | 290 | loader= ChoiceLoader(loaders), |
|
291 | 291 | extensions=JINJA_EXTENSIONS |
|
292 | 292 | ) |
|
293 | 293 | |
|
294 | 294 | #Set special Jinja2 syntax that will not conflict with latex. |
|
295 | 295 | if self.jinja_logic_block_start: |
|
296 | 296 | self.environment.block_start_string = self.jinja_logic_block_start |
|
297 | 297 | if self.jinja_logic_block_end: |
|
298 | 298 | self.environment.block_end_string = self.jinja_logic_block_end |
|
299 | 299 | if self.jinja_variable_block_start: |
|
300 | 300 | self.environment.variable_start_string = self.jinja_variable_block_start |
|
301 | 301 | if self.jinja_variable_block_end: |
|
302 | 302 | self.environment.variable_end_string = self.jinja_variable_block_end |
|
303 | 303 | if self.jinja_comment_block_start: |
|
304 | 304 | self.environment.comment_start_string = self.jinja_comment_block_start |
|
305 | 305 | if self.jinja_comment_block_end: |
|
306 | 306 | self.environment.comment_end_string = self.jinja_comment_block_end |
|
307 | 307 | |
|
308 | 308 | |
|
309 | 309 | def _init_filters(self): |
|
310 | 310 | """ |
|
311 | 311 | Register all of the filters required for the exporter. |
|
312 | 312 | """ |
|
313 | 313 | |
|
314 | 314 | #Add default filters to the Jinja2 environment |
|
315 | 315 | for key, value in default_filters.items(): |
|
316 | 316 | self.register_filter(key, value) |
|
317 | 317 | |
|
318 | 318 | #Load user filters. Overwrite existing filters if need be. |
|
319 | 319 | if self.filters: |
|
320 | 320 | for key, user_filter in self.filters.items(): |
|
321 | 321 | self.register_filter(key, user_filter) |
@@ -1,108 +1,108 b'' | |||
|
1 | 1 | """ |
|
2 | 2 | Module with tests for templateexporter.py |
|
3 | 3 | """ |
|
4 | 4 | |
|
5 | 5 | #----------------------------------------------------------------------------- |
|
6 | 6 | # Copyright (c) 2013, the IPython Development Team. |
|
7 | 7 | # |
|
8 | 8 | # Distributed under the terms of the Modified BSD License. |
|
9 | 9 | # |
|
10 | 10 | # The full license is in the file COPYING.txt, distributed with this software. |
|
11 | 11 | #----------------------------------------------------------------------------- |
|
12 | 12 | |
|
13 | 13 | #----------------------------------------------------------------------------- |
|
14 | 14 | # Imports |
|
15 | 15 | #----------------------------------------------------------------------------- |
|
16 | 16 | |
|
17 | 17 | from IPython.config import Config |
|
18 | 18 | |
|
19 | 19 | from .base import ExportersTestsBase |
|
20 | 20 | from .cheese import CheesePreprocessor |
|
21 | 21 | from ..templateexporter import TemplateExporter |
|
22 | 22 | |
|
23 | 23 | |
|
24 | 24 | #----------------------------------------------------------------------------- |
|
25 | 25 | # Class |
|
26 | 26 | #----------------------------------------------------------------------------- |
|
27 | 27 | |
|
28 | 28 | class TestExporter(ExportersTestsBase): |
|
29 | 29 | """Contains test functions for exporter.py""" |
|
30 | 30 | |
|
31 | 31 | |
|
32 | 32 | def test_constructor(self): |
|
33 | 33 | """ |
|
34 | 34 | Can a TemplateExporter be constructed? |
|
35 | 35 | """ |
|
36 | 36 | TemplateExporter() |
|
37 | 37 | |
|
38 | 38 | |
|
39 | 39 | def test_export(self): |
|
40 | 40 | """ |
|
41 | 41 | Can a TemplateExporter export something? |
|
42 | 42 | """ |
|
43 | 43 | exporter = self._make_exporter() |
|
44 | 44 | (output, resources) = exporter.from_filename(self._get_notebook()) |
|
45 | 45 | assert len(output) > 0 |
|
46 | 46 | |
|
47 | 47 | |
|
48 | 48 | def test_extract_outputs(self): |
|
49 | 49 | """ |
|
50 | 50 | If the ExtractOutputPreprocessor is enabled, are outputs extracted? |
|
51 | 51 | """ |
|
52 | 52 | config = Config({'ExtractOutputPreprocessor': {'enabled': True}}) |
|
53 | 53 | exporter = self._make_exporter(config=config) |
|
54 | 54 | (output, resources) = exporter.from_filename(self._get_notebook()) |
|
55 | 55 | assert resources is not None |
|
56 | 56 | assert isinstance(resources['outputs'], dict) |
|
57 | 57 | assert len(resources['outputs']) > 0 |
|
58 | 58 | |
|
59 | 59 | |
|
60 | 60 | def test_preprocessor_class(self): |
|
61 | 61 | """ |
|
62 | 62 | Can a preprocessor be added to the preprocessors list by class type? |
|
63 | 63 | """ |
|
64 | 64 | config = Config({'Exporter': {'preprocessors': [CheesePreprocessor]}}) |
|
65 | 65 | exporter = self._make_exporter(config=config) |
|
66 | 66 | (output, resources) = exporter.from_filename(self._get_notebook()) |
|
67 | 67 | assert resources is not None |
|
68 | 68 | assert resources['cheese'] == 'real' |
|
69 | 69 | |
|
70 | 70 | |
|
71 | 71 | def test_preprocessor_instance(self): |
|
72 | 72 | """ |
|
73 | 73 | Can a preprocessor be added to the preprocessors list by instance? |
|
74 | 74 | """ |
|
75 | 75 | config = Config({'Exporter': {'preprocessors': [CheesePreprocessor()]}}) |
|
76 | 76 | exporter = self._make_exporter(config=config) |
|
77 | 77 | (output, resources) = exporter.from_filename(self._get_notebook()) |
|
78 | 78 | assert resources is not None |
|
79 | 79 | assert resources['cheese'] == 'real' |
|
80 | 80 | |
|
81 | 81 | |
|
82 | 82 | def test_preprocessor_dottedobjectname(self): |
|
83 | 83 | """ |
|
84 | 84 | Can a preprocessor be added to the preprocessors list by dotted object name? |
|
85 | 85 | """ |
|
86 |
config = Config({'Exporter': {'preprocessors': [' |
|
|
86 | config = Config({'Exporter': {'preprocessors': ['jupyter_nbconvert.exporters.tests.cheese.CheesePreprocessor']}}) | |
|
87 | 87 | exporter = self._make_exporter(config=config) |
|
88 | 88 | (output, resources) = exporter.from_filename(self._get_notebook()) |
|
89 | 89 | assert resources is not None |
|
90 | 90 | assert resources['cheese'] == 'real' |
|
91 | 91 | |
|
92 | 92 | |
|
93 | 93 | def test_preprocessor_via_method(self): |
|
94 | 94 | """ |
|
95 | 95 | Can a preprocessor be added via the Exporter convenience method? |
|
96 | 96 | """ |
|
97 | 97 | exporter = self._make_exporter() |
|
98 | 98 | exporter.register_preprocessor(CheesePreprocessor, enabled=True) |
|
99 | 99 | (output, resources) = exporter.from_filename(self._get_notebook()) |
|
100 | 100 | assert resources is not None |
|
101 | 101 | assert resources['cheese'] == 'real' |
|
102 | 102 | |
|
103 | 103 | |
|
104 | 104 | def _make_exporter(self, config=None): |
|
105 | 105 | # Create the exporter instance, make sure to set a template name since |
|
106 | 106 | # the base TemplateExporter doesn't have a template associated with it. |
|
107 | 107 | exporter = TemplateExporter(config=config, template_file='python') |
|
108 | 108 | return exporter |
@@ -1,135 +1,135 b'' | |||
|
1 | 1 | """ |
|
2 | 2 | Module containing filter functions that allow code to be highlighted |
|
3 | 3 | from within Jinja templates. |
|
4 | 4 | """ |
|
5 | 5 | |
|
6 | 6 | # Copyright (c) IPython Development Team. |
|
7 | 7 | # Distributed under the terms of the Modified BSD License. |
|
8 | 8 | |
|
9 | 9 | # pygments must not be imported at the module level |
|
10 | 10 | # because errors should be raised at runtime if it's actually needed, |
|
11 | 11 | # not import time, when it may not be needed. |
|
12 | 12 | |
|
13 |
from |
|
|
13 | from jupyter_nbconvert.utils.base import NbConvertBase | |
|
14 | 14 | from warnings import warn |
|
15 | 15 | |
|
16 | 16 | MULTILINE_OUTPUTS = ['text', 'html', 'svg', 'latex', 'javascript', 'json'] |
|
17 | 17 | |
|
18 | 18 | __all__ = [ |
|
19 | 19 | 'Highlight2HTML', |
|
20 | 20 | 'Highlight2Latex' |
|
21 | 21 | ] |
|
22 | 22 | |
|
23 | 23 | class Highlight2HTML(NbConvertBase): |
|
24 | 24 | def __init__(self, pygments_lexer=None, **kwargs): |
|
25 | 25 | self.pygments_lexer = pygments_lexer or 'ipython3' |
|
26 | 26 | super(Highlight2HTML, self).__init__(**kwargs) |
|
27 | 27 | |
|
28 | 28 | def _default_language_changed(self, name, old, new): |
|
29 | 29 | warn('Setting default_language in config is deprecated, ' |
|
30 | 30 | 'please use language_info metadata instead.') |
|
31 | 31 | self.pygments_lexer = new |
|
32 | 32 | |
|
33 | 33 | def __call__(self, source, language=None, metadata=None): |
|
34 | 34 | """ |
|
35 | 35 | Return a syntax-highlighted version of the input source as html output. |
|
36 | 36 | |
|
37 | 37 | Parameters |
|
38 | 38 | ---------- |
|
39 | 39 | source : str |
|
40 | 40 | source of the cell to highlight |
|
41 | 41 | language : str |
|
42 | 42 | language to highlight the syntax of |
|
43 | 43 | metadata : NotebookNode cell metadata |
|
44 | 44 | metadata of the cell to highlight |
|
45 | 45 | """ |
|
46 | 46 | from pygments.formatters import HtmlFormatter |
|
47 | 47 | |
|
48 | 48 | if not language: |
|
49 | 49 | language=self.pygments_lexer |
|
50 | 50 | |
|
51 | 51 | return _pygments_highlight(source if len(source) > 0 else ' ', |
|
52 | 52 | # needed to help post processors: |
|
53 | 53 | HtmlFormatter(cssclass=" highlight hl-"+language), |
|
54 | 54 | language, metadata) |
|
55 | 55 | |
|
56 | 56 | |
|
57 | 57 | class Highlight2Latex(NbConvertBase): |
|
58 | 58 | def __init__(self, pygments_lexer=None, **kwargs): |
|
59 | 59 | self.pygments_lexer = pygments_lexer or 'ipython3' |
|
60 | 60 | super(Highlight2Latex, self).__init__(**kwargs) |
|
61 | 61 | |
|
62 | 62 | def _default_language_changed(self, name, old, new): |
|
63 | 63 | warn('Setting default_language in config is deprecated, ' |
|
64 | 64 | 'please use language_info metadata instead.') |
|
65 | 65 | self.pygments_lexer = new |
|
66 | 66 | |
|
67 | 67 | def __call__(self, source, language=None, metadata=None, strip_verbatim=False): |
|
68 | 68 | """ |
|
69 | 69 | Return a syntax-highlighted version of the input source as latex output. |
|
70 | 70 | |
|
71 | 71 | Parameters |
|
72 | 72 | ---------- |
|
73 | 73 | source : str |
|
74 | 74 | source of the cell to highlight |
|
75 | 75 | language : str |
|
76 | 76 | language to highlight the syntax of |
|
77 | 77 | metadata : NotebookNode cell metadata |
|
78 | 78 | metadata of the cell to highlight |
|
79 | 79 | strip_verbatim : bool |
|
80 | 80 | remove the Verbatim environment that pygments provides by default |
|
81 | 81 | """ |
|
82 | 82 | from pygments.formatters import LatexFormatter |
|
83 | 83 | if not language: |
|
84 | 84 | language=self.pygments_lexer |
|
85 | 85 | |
|
86 | 86 | latex = _pygments_highlight(source, LatexFormatter(), language, metadata) |
|
87 | 87 | if strip_verbatim: |
|
88 | 88 | latex = latex.replace(r'\begin{Verbatim}[commandchars=\\\{\}]' + '\n', '') |
|
89 | 89 | return latex.replace('\n\\end{Verbatim}\n', '') |
|
90 | 90 | else: |
|
91 | 91 | return latex |
|
92 | 92 | |
|
93 | 93 | |
|
94 | 94 | |
|
95 | 95 | def _pygments_highlight(source, output_formatter, language='ipython', metadata=None): |
|
96 | 96 | """ |
|
97 | 97 | Return a syntax-highlighted version of the input source |
|
98 | 98 | |
|
99 | 99 | Parameters |
|
100 | 100 | ---------- |
|
101 | 101 | source : str |
|
102 | 102 | source of the cell to highlight |
|
103 | 103 | output_formatter : Pygments formatter |
|
104 | 104 | language : str |
|
105 | 105 | language to highlight the syntax of |
|
106 | 106 | metadata : NotebookNode cell metadata |
|
107 | 107 | metadata of the cell to highlight |
|
108 | 108 | """ |
|
109 | 109 | from pygments import highlight |
|
110 | 110 | from pygments.lexers import get_lexer_by_name |
|
111 | 111 | from pygments.util import ClassNotFound |
|
112 | 112 | from IPython.lib.lexers import IPythonLexer, IPython3Lexer |
|
113 | 113 | |
|
114 | 114 | # If the cell uses a magic extension language, |
|
115 | 115 | # use the magic language instead. |
|
116 | 116 | if language.startswith('ipython') \ |
|
117 | 117 | and metadata \ |
|
118 | 118 | and 'magics_language' in metadata: |
|
119 | 119 | |
|
120 | 120 | language = metadata['magics_language'] |
|
121 | 121 | |
|
122 | 122 | if language == 'ipython2': |
|
123 | 123 | lexer = IPythonLexer() |
|
124 | 124 | elif language == 'ipython3': |
|
125 | 125 | lexer = IPython3Lexer() |
|
126 | 126 | else: |
|
127 | 127 | try: |
|
128 | 128 | lexer = get_lexer_by_name(language, stripall=True) |
|
129 | 129 | except ClassNotFound: |
|
130 | 130 | warn("No lexer found for language %r. Treating as plain text." % language) |
|
131 | 131 | from pygments.lexers.special import TextLexer |
|
132 | 132 | lexer = TextLexer() |
|
133 | 133 | |
|
134 | 134 | |
|
135 | 135 | return highlight(source, lexer, output_formatter) |
@@ -1,140 +1,140 b'' | |||
|
1 | 1 | """Markdown filters |
|
2 | 2 | |
|
3 | 3 | This file contains a collection of utility filters for dealing with |
|
4 | 4 | markdown within Jinja templates. |
|
5 | 5 | """ |
|
6 | 6 | # Copyright (c) IPython Development Team. |
|
7 | 7 | # Distributed under the terms of the Modified BSD License. |
|
8 | 8 | |
|
9 | 9 | from __future__ import print_function |
|
10 | 10 | |
|
11 | 11 | import os |
|
12 | 12 | import subprocess |
|
13 | 13 | from io import TextIOWrapper, BytesIO |
|
14 | 14 | |
|
15 | 15 | try: |
|
16 | 16 | from .markdown_mistune import markdown2html_mistune |
|
17 | 17 | except ImportError as e: |
|
18 | 18 | # store in variable for Python 3 |
|
19 | 19 | _mistune_import_error = e |
|
20 | 20 | def markdown2html_mistune(source): |
|
21 | 21 | """mistune is unavailable, raise ImportError""" |
|
22 | 22 | raise ImportError("markdown2html requires mistune: %s" % _mistune_import_error) |
|
23 | 23 | |
|
24 |
from |
|
|
25 |
from |
|
|
24 | from jupyter_nbconvert.utils.pandoc import pandoc | |
|
25 | from jupyter_nbconvert.utils.exceptions import ConversionException | |
|
26 | 26 | from IPython.utils.process import get_output_error_code |
|
27 | 27 | from IPython.utils.py3compat import cast_bytes |
|
28 | 28 | from IPython.utils.version import check_version |
|
29 | 29 | |
|
30 | 30 | |
|
31 | 31 | marked = os.path.join(os.path.dirname(__file__), "marked.js") |
|
32 | 32 | _node = None |
|
33 | 33 | |
|
34 | 34 | __all__ = [ |
|
35 | 35 | 'markdown2html', |
|
36 | 36 | 'markdown2html_pandoc', |
|
37 | 37 | 'markdown2html_marked', |
|
38 | 38 | 'markdown2html_mistune', |
|
39 | 39 | 'markdown2latex', |
|
40 | 40 | 'markdown2rst', |
|
41 | 41 | ] |
|
42 | 42 | |
|
43 | 43 | class NodeJSMissing(ConversionException): |
|
44 | 44 | """Exception raised when node.js is missing.""" |
|
45 | 45 | pass |
|
46 | 46 | |
|
47 | 47 | |
|
48 | 48 | def markdown2latex(source, markup='markdown', extra_args=None): |
|
49 | 49 | """Convert a markdown string to LaTeX via pandoc. |
|
50 | 50 | |
|
51 | 51 | This function will raise an error if pandoc is not installed. |
|
52 | 52 | Any error messages generated by pandoc are printed to stderr. |
|
53 | 53 | |
|
54 | 54 | Parameters |
|
55 | 55 | ---------- |
|
56 | 56 | source : string |
|
57 | 57 | Input string, assumed to be valid markdown. |
|
58 | 58 | markup : string |
|
59 | 59 | Markup used by pandoc's reader |
|
60 | 60 | default : pandoc extended markdown |
|
61 | 61 | (see http://johnmacfarlane.net/pandoc/README.html#pandocs-markdown) |
|
62 | 62 | |
|
63 | 63 | Returns |
|
64 | 64 | ------- |
|
65 | 65 | out : string |
|
66 | 66 | Output as returned by pandoc. |
|
67 | 67 | """ |
|
68 | 68 | return pandoc(source, markup, 'latex', extra_args=extra_args) |
|
69 | 69 | |
|
70 | 70 | |
|
71 | 71 | def markdown2html_pandoc(source, extra_args=None): |
|
72 | 72 | """Convert a markdown string to HTML via pandoc""" |
|
73 | 73 | extra_args = extra_args or ['--mathjax'] |
|
74 | 74 | return pandoc(source, 'markdown', 'html', extra_args=extra_args) |
|
75 | 75 | |
|
76 | 76 | |
|
77 | 77 | def _find_nodejs(): |
|
78 | 78 | global _node |
|
79 | 79 | if _node is None: |
|
80 | 80 | # prefer md2html via marked if node.js >= 0.9.12 is available |
|
81 | 81 | # node is called nodejs on debian, so try that first |
|
82 | 82 | _node = 'nodejs' |
|
83 | 83 | if not _verify_node(_node): |
|
84 | 84 | _node = 'node' |
|
85 | 85 | return _node |
|
86 | 86 | |
|
87 | 87 | def markdown2html_marked(source, encoding='utf-8'): |
|
88 | 88 | """Convert a markdown string to HTML via marked""" |
|
89 | 89 | command = [_find_nodejs(), marked] |
|
90 | 90 | try: |
|
91 | 91 | p = subprocess.Popen(command, |
|
92 | 92 | stdin=subprocess.PIPE, stdout=subprocess.PIPE |
|
93 | 93 | ) |
|
94 | 94 | except OSError as e: |
|
95 | 95 | raise NodeJSMissing( |
|
96 | 96 | "The command '%s' returned an error: %s.\n" % (" ".join(command), e) + |
|
97 | 97 | "Please check that Node.js is installed." |
|
98 | 98 | ) |
|
99 | 99 | out, _ = p.communicate(cast_bytes(source, encoding)) |
|
100 | 100 | out = TextIOWrapper(BytesIO(out), encoding, 'replace').read() |
|
101 | 101 | return out.rstrip('\n') |
|
102 | 102 | |
|
103 | 103 | # The mistune renderer is the default, because it's simple to depend on it |
|
104 | 104 | markdown2html = markdown2html_mistune |
|
105 | 105 | |
|
106 | 106 | def markdown2rst(source, extra_args=None): |
|
107 | 107 | """Convert a markdown string to ReST via pandoc. |
|
108 | 108 | |
|
109 | 109 | This function will raise an error if pandoc is not installed. |
|
110 | 110 | Any error messages generated by pandoc are printed to stderr. |
|
111 | 111 | |
|
112 | 112 | Parameters |
|
113 | 113 | ---------- |
|
114 | 114 | source : string |
|
115 | 115 | Input string, assumed to be valid markdown. |
|
116 | 116 | |
|
117 | 117 | Returns |
|
118 | 118 | ------- |
|
119 | 119 | out : string |
|
120 | 120 | Output as returned by pandoc. |
|
121 | 121 | """ |
|
122 | 122 | return pandoc(source, 'markdown', 'rst', extra_args=extra_args) |
|
123 | 123 | |
|
124 | 124 | def _verify_node(cmd): |
|
125 | 125 | """Verify that the node command exists and is at least the minimum supported |
|
126 | 126 | version of node. |
|
127 | 127 | |
|
128 | 128 | Parameters |
|
129 | 129 | ---------- |
|
130 | 130 | cmd : string |
|
131 | 131 | Node command to verify (i.e 'node').""" |
|
132 | 132 | try: |
|
133 | 133 | out, err, return_code = get_output_error_code([cmd, '--version']) |
|
134 | 134 | except OSError: |
|
135 | 135 | # Command not found |
|
136 | 136 | return False |
|
137 | 137 | if return_code: |
|
138 | 138 | # Command error |
|
139 | 139 | return False |
|
140 | 140 | return check_version(out.lstrip('v'), '0.9.12') |
@@ -1,122 +1,122 b'' | |||
|
1 | 1 | """Markdown filters with mistune |
|
2 | 2 | |
|
3 | 3 | Used from markdown.py |
|
4 | 4 | """ |
|
5 | 5 | # Copyright (c) IPython Development Team. |
|
6 | 6 | # Distributed under the terms of the Modified BSD License. |
|
7 | 7 | |
|
8 | 8 | from __future__ import print_function |
|
9 | 9 | |
|
10 | 10 | import re |
|
11 | 11 | |
|
12 | 12 | import mistune |
|
13 | 13 | |
|
14 | 14 | from pygments import highlight |
|
15 | 15 | from pygments.lexers import get_lexer_by_name |
|
16 | 16 | from pygments.formatters import HtmlFormatter |
|
17 | 17 | from pygments.util import ClassNotFound |
|
18 | 18 | |
|
19 |
from |
|
|
20 |
from |
|
|
19 | from jupyter_nbconvert.filters.strings import add_anchor | |
|
20 | from jupyter_nbconvert.utils.exceptions import ConversionException | |
|
21 | 21 | from IPython.utils.decorators import undoc |
|
22 | 22 | |
|
23 | 23 | |
|
24 | 24 | @undoc |
|
25 | 25 | class MathBlockGrammar(mistune.BlockGrammar): |
|
26 | 26 | block_math = re.compile(r"^\$\$(.*?)\$\$", re.DOTALL) |
|
27 | 27 | latex_environment = re.compile(r"^\\begin\{([a-z]*\*?)\}(.*?)\\end\{\1\}", |
|
28 | 28 | re.DOTALL) |
|
29 | 29 | |
|
30 | 30 | @undoc |
|
31 | 31 | class MathBlockLexer(mistune.BlockLexer): |
|
32 | 32 | default_rules = ['block_math', 'latex_environment'] + mistune.BlockLexer.default_rules |
|
33 | 33 | |
|
34 | 34 | def __init__(self, rules=None, **kwargs): |
|
35 | 35 | if rules is None: |
|
36 | 36 | rules = MathBlockGrammar() |
|
37 | 37 | super(MathBlockLexer, self).__init__(rules, **kwargs) |
|
38 | 38 | |
|
39 | 39 | def parse_block_math(self, m): |
|
40 | 40 | """Parse a $$math$$ block""" |
|
41 | 41 | self.tokens.append({ |
|
42 | 42 | 'type': 'block_math', |
|
43 | 43 | 'text': m.group(1) |
|
44 | 44 | }) |
|
45 | 45 | |
|
46 | 46 | def parse_latex_environment(self, m): |
|
47 | 47 | self.tokens.append({ |
|
48 | 48 | 'type': 'latex_environment', |
|
49 | 49 | 'name': m.group(1), |
|
50 | 50 | 'text': m.group(2) |
|
51 | 51 | }) |
|
52 | 52 | |
|
53 | 53 | @undoc |
|
54 | 54 | class MathInlineGrammar(mistune.InlineGrammar): |
|
55 | 55 | math = re.compile(r"^\$(.+?)\$") |
|
56 | 56 | block_math = re.compile(r"^\$\$(.+?)\$\$", re.DOTALL) |
|
57 | 57 | text = re.compile(r'^[\s\S]+?(?=[\\<!\[_*`~$]|https?://| {2,}\n|$)') |
|
58 | 58 | |
|
59 | 59 | @undoc |
|
60 | 60 | class MathInlineLexer(mistune.InlineLexer): |
|
61 | 61 | default_rules = ['math', 'block_math'] + mistune.InlineLexer.default_rules |
|
62 | 62 | |
|
63 | 63 | def __init__(self, renderer, rules=None, **kwargs): |
|
64 | 64 | if rules is None: |
|
65 | 65 | rules = MathInlineGrammar() |
|
66 | 66 | super(MathInlineLexer, self).__init__(renderer, rules, **kwargs) |
|
67 | 67 | |
|
68 | 68 | def output_math(self, m): |
|
69 | 69 | return self.renderer.inline_math(m.group(1)) |
|
70 | 70 | |
|
71 | 71 | def output_block_math(self, m): |
|
72 | 72 | return self.renderer.block_math(m.group(1)) |
|
73 | 73 | |
|
74 | 74 | @undoc |
|
75 | 75 | class MarkdownWithMath(mistune.Markdown): |
|
76 | 76 | def __init__(self, renderer, **kwargs): |
|
77 | 77 | if 'inline' not in kwargs: |
|
78 | 78 | kwargs['inline'] = MathInlineLexer |
|
79 | 79 | if 'block' not in kwargs: |
|
80 | 80 | kwargs['block'] = MathBlockLexer |
|
81 | 81 | super(MarkdownWithMath, self).__init__(renderer, **kwargs) |
|
82 | 82 | |
|
83 | 83 | def output_block_math(self): |
|
84 | 84 | return self.renderer.block_math(self.token['text']) |
|
85 | 85 | |
|
86 | 86 | def output_latex_environment(self): |
|
87 | 87 | return self.renderer.latex_environment(self.token['name'], self.token['text']) |
|
88 | 88 | |
|
89 | 89 | @undoc |
|
90 | 90 | class IPythonRenderer(mistune.Renderer): |
|
91 | 91 | def block_code(self, code, lang): |
|
92 | 92 | if lang: |
|
93 | 93 | try: |
|
94 | 94 | lexer = get_lexer_by_name(lang, stripall=True) |
|
95 | 95 | except ClassNotFound: |
|
96 | 96 | code = lang + '\n' + code |
|
97 | 97 | lang = None |
|
98 | 98 | |
|
99 | 99 | if not lang: |
|
100 | 100 | return '\n<pre><code>%s</code></pre>\n' % \ |
|
101 | 101 | mistune.escape(code) |
|
102 | 102 | |
|
103 | 103 | formatter = HtmlFormatter() |
|
104 | 104 | return highlight(code, lexer, formatter) |
|
105 | 105 | |
|
106 | 106 | def header(self, text, level, raw=None): |
|
107 | 107 | html = super(IPythonRenderer, self).header(text, level, raw=raw) |
|
108 | 108 | return add_anchor(html) |
|
109 | 109 | |
|
110 | 110 | # Pass math through unaltered - mathjax does the rendering in the browser |
|
111 | 111 | def block_math(self, text): |
|
112 | 112 | return '$$%s$$' % text |
|
113 | 113 | |
|
114 | 114 | def latex_environment(self, name, text): |
|
115 | 115 | return r'\begin{%s}%s\end{%s}' % (name, text, name) |
|
116 | 116 | |
|
117 | 117 | def inline_math(self, text): |
|
118 | 118 | return '$%s$' % text |
|
119 | 119 | |
|
120 | 120 | def markdown2html_mistune(source): |
|
121 | 121 | """Convert a markdown string to HTML using mistune""" |
|
122 | 122 | return MarkdownWithMath(renderer=IPythonRenderer()).render(source) |
@@ -1,407 +1,407 b'' | |||
|
1 | 1 | #!/usr/bin/env python |
|
2 | 2 | """NbConvert is a utility for conversion of .ipynb files. |
|
3 | 3 | |
|
4 | 4 | Command-line interface for the NbConvert conversion utility. |
|
5 | 5 | """ |
|
6 | 6 | |
|
7 | 7 | # Copyright (c) IPython Development Team. |
|
8 | 8 | # Distributed under the terms of the Modified BSD License. |
|
9 | 9 | |
|
10 | 10 | from __future__ import print_function |
|
11 | 11 | |
|
12 | 12 | import logging |
|
13 | 13 | import sys |
|
14 | 14 | import os |
|
15 | 15 | import glob |
|
16 | 16 | |
|
17 | 17 | from IPython.core.application import BaseIPythonApplication, base_aliases, base_flags |
|
18 | 18 | from IPython.core.profiledir import ProfileDir |
|
19 | 19 | from IPython.config import catch_config_error, Configurable |
|
20 | 20 | from IPython.utils.traitlets import ( |
|
21 | 21 | Unicode, List, Instance, DottedObjectName, Type, CaselessStrEnum, Bool, |
|
22 | 22 | ) |
|
23 | 23 | from IPython.utils.importstring import import_item |
|
24 | 24 | |
|
25 | 25 | from .exporters.export import get_export_names, exporter_map |
|
26 |
from |
|
|
26 | from jupyter_nbconvert import exporters, preprocessors, writers, postprocessors | |
|
27 | 27 | from .utils.base import NbConvertBase |
|
28 | 28 | from .utils.exceptions import ConversionException |
|
29 | 29 | |
|
30 | 30 | #----------------------------------------------------------------------------- |
|
31 | 31 | #Classes and functions |
|
32 | 32 | #----------------------------------------------------------------------------- |
|
33 | 33 | |
|
34 | 34 | class DottedOrNone(DottedObjectName): |
|
35 | 35 | """ |
|
36 | 36 | A string holding a valid dotted object name in Python, such as A.b3._c |
|
37 | 37 | Also allows for None type.""" |
|
38 | 38 | |
|
39 | 39 | default_value = u'' |
|
40 | 40 | |
|
41 | 41 | def validate(self, obj, value): |
|
42 | 42 | if value is not None and len(value) > 0: |
|
43 | 43 | return super(DottedOrNone, self).validate(obj, value) |
|
44 | 44 | else: |
|
45 | 45 | return value |
|
46 | 46 | |
|
47 | 47 | nbconvert_aliases = {} |
|
48 | 48 | nbconvert_aliases.update(base_aliases) |
|
49 | 49 | nbconvert_aliases.update({ |
|
50 | 50 | 'to' : 'NbConvertApp.export_format', |
|
51 | 51 | 'template' : 'TemplateExporter.template_file', |
|
52 | 52 | 'writer' : 'NbConvertApp.writer_class', |
|
53 | 53 | 'post': 'NbConvertApp.postprocessor_class', |
|
54 | 54 | 'output': 'NbConvertApp.output_base', |
|
55 | 55 | 'reveal-prefix': 'RevealHelpPreprocessor.url_prefix', |
|
56 | 56 | 'nbformat': 'NotebookExporter.nbformat_version', |
|
57 | 57 | }) |
|
58 | 58 | |
|
59 | 59 | nbconvert_flags = {} |
|
60 | 60 | nbconvert_flags.update(base_flags) |
|
61 | 61 | nbconvert_flags.update({ |
|
62 | 62 | 'execute' : ( |
|
63 | 63 | {'ExecutePreprocessor' : {'enabled' : True}}, |
|
64 | 64 | "Execute the notebook prior to export." |
|
65 | 65 | ), |
|
66 | 66 | 'stdout' : ( |
|
67 | 67 | {'NbConvertApp' : {'writer_class' : "StdoutWriter"}}, |
|
68 | 68 | "Write notebook output to stdout instead of files." |
|
69 | 69 | ), |
|
70 | 70 | 'inplace' : ( |
|
71 | 71 | { |
|
72 | 72 | 'NbConvertApp' : {'use_output_suffix' : False}, |
|
73 | 73 | 'FilesWriter': {'build_directory': ''} |
|
74 | 74 | }, |
|
75 | 75 | """Run nbconvert in place, overwriting the existing notebook (only |
|
76 | 76 | relevant when converting to notebook format)""" |
|
77 | 77 | ) |
|
78 | 78 | }) |
|
79 | 79 | |
|
80 | 80 | |
|
81 | 81 | class NbConvertApp(BaseIPythonApplication): |
|
82 | 82 | """Application used to convert from notebook file type (``*.ipynb``)""" |
|
83 | 83 | |
|
84 | 84 | name = 'ipython-nbconvert' |
|
85 | 85 | aliases = nbconvert_aliases |
|
86 | 86 | flags = nbconvert_flags |
|
87 | 87 | |
|
88 | 88 | def _log_level_default(self): |
|
89 | 89 | return logging.INFO |
|
90 | 90 | |
|
91 | 91 | def _classes_default(self): |
|
92 | 92 | classes = [NbConvertBase, ProfileDir] |
|
93 | 93 | for pkg in (exporters, preprocessors, writers, postprocessors): |
|
94 | 94 | for name in dir(pkg): |
|
95 | 95 | cls = getattr(pkg, name) |
|
96 | 96 | if isinstance(cls, type) and issubclass(cls, Configurable): |
|
97 | 97 | classes.append(cls) |
|
98 | 98 | |
|
99 | 99 | return classes |
|
100 | 100 | |
|
101 | 101 | description = Unicode( |
|
102 | 102 | u"""This application is used to convert notebook files (*.ipynb) |
|
103 | 103 | to various other formats. |
|
104 | 104 | |
|
105 | 105 | WARNING: THE COMMANDLINE INTERFACE MAY CHANGE IN FUTURE RELEASES.""") |
|
106 | 106 | |
|
107 | 107 | output_base = Unicode('', config=True, help='''overwrite base name use for output files. |
|
108 | 108 | can only be used when converting one notebook at a time. |
|
109 | 109 | ''') |
|
110 | 110 | |
|
111 | 111 | use_output_suffix = Bool( |
|
112 | 112 | True, |
|
113 | 113 | config=True, |
|
114 | 114 | help="""Whether to apply a suffix prior to the extension (only relevant |
|
115 | 115 | when converting to notebook format). The suffix is determined by |
|
116 | 116 | the exporter, and is usually '.nbconvert'.""") |
|
117 | 117 | |
|
118 | 118 | examples = Unicode(u""" |
|
119 | 119 | The simplest way to use nbconvert is |
|
120 | 120 | |
|
121 | 121 | > ipython nbconvert mynotebook.ipynb |
|
122 | 122 | |
|
123 | 123 | which will convert mynotebook.ipynb to the default format (probably HTML). |
|
124 | 124 | |
|
125 | 125 | You can specify the export format with `--to`. |
|
126 | 126 | Options include {0} |
|
127 | 127 | |
|
128 | 128 | > ipython nbconvert --to latex mynotebook.ipynb |
|
129 | 129 | |
|
130 | 130 | Both HTML and LaTeX support multiple output templates. LaTeX includes |
|
131 | 131 | 'base', 'article' and 'report'. HTML includes 'basic' and 'full'. You |
|
132 | 132 | can specify the flavor of the format used. |
|
133 | 133 | |
|
134 | 134 | > ipython nbconvert --to html --template basic mynotebook.ipynb |
|
135 | 135 | |
|
136 | 136 | You can also pipe the output to stdout, rather than a file |
|
137 | 137 | |
|
138 | 138 | > ipython nbconvert mynotebook.ipynb --stdout |
|
139 | 139 | |
|
140 | 140 | PDF is generated via latex |
|
141 | 141 | |
|
142 | 142 | > ipython nbconvert mynotebook.ipynb --to pdf |
|
143 | 143 | |
|
144 | 144 | You can get (and serve) a Reveal.js-powered slideshow |
|
145 | 145 | |
|
146 | 146 | > ipython nbconvert myslides.ipynb --to slides --post serve |
|
147 | 147 | |
|
148 | 148 | Multiple notebooks can be given at the command line in a couple of |
|
149 | 149 | different ways: |
|
150 | 150 | |
|
151 | 151 | > ipython nbconvert notebook*.ipynb |
|
152 | 152 | > ipython nbconvert notebook1.ipynb notebook2.ipynb |
|
153 | 153 | |
|
154 | 154 | or you can specify the notebooks list in a config file, containing:: |
|
155 | 155 | |
|
156 | 156 | c.NbConvertApp.notebooks = ["my_notebook.ipynb"] |
|
157 | 157 | |
|
158 | 158 | > ipython nbconvert --config mycfg.py |
|
159 | 159 | """.format(get_export_names())) |
|
160 | 160 | |
|
161 | 161 | # Writer specific variables |
|
162 |
writer = Instance(' |
|
|
162 | writer = Instance('jupyter_nbconvert.writers.base.WriterBase', | |
|
163 | 163 | help="""Instance of the writer class used to write the |
|
164 | 164 | results of the conversion.""") |
|
165 | 165 | writer_class = DottedObjectName('FilesWriter', config=True, |
|
166 | 166 | help="""Writer class used to write the |
|
167 | 167 | results of the conversion""") |
|
168 |
writer_aliases = {'fileswriter': ' |
|
|
169 |
'debugwriter': ' |
|
|
170 |
'stdoutwriter': ' |
|
|
168 | writer_aliases = {'fileswriter': 'jupyter_nbconvert.writers.files.FilesWriter', | |
|
169 | 'debugwriter': 'jupyter_nbconvert.writers.debug.DebugWriter', | |
|
170 | 'stdoutwriter': 'jupyter_nbconvert.writers.stdout.StdoutWriter'} | |
|
171 | 171 | writer_factory = Type() |
|
172 | 172 | |
|
173 | 173 | def _writer_class_changed(self, name, old, new): |
|
174 | 174 | if new.lower() in self.writer_aliases: |
|
175 | 175 | new = self.writer_aliases[new.lower()] |
|
176 | 176 | self.writer_factory = import_item(new) |
|
177 | 177 | |
|
178 | 178 | # Post-processor specific variables |
|
179 |
postprocessor = Instance(' |
|
|
179 | postprocessor = Instance('jupyter_nbconvert.postprocessors.base.PostProcessorBase', | |
|
180 | 180 | help="""Instance of the PostProcessor class used to write the |
|
181 | 181 | results of the conversion.""") |
|
182 | 182 | |
|
183 | 183 | postprocessor_class = DottedOrNone(config=True, |
|
184 | 184 | help="""PostProcessor class used to write the |
|
185 | 185 | results of the conversion""") |
|
186 |
postprocessor_aliases = {'serve': ' |
|
|
186 | postprocessor_aliases = {'serve': 'jupyter_nbconvert.postprocessors.serve.ServePostProcessor'} | |
|
187 | 187 | postprocessor_factory = Type() |
|
188 | 188 | |
|
189 | 189 | def _postprocessor_class_changed(self, name, old, new): |
|
190 | 190 | if new.lower() in self.postprocessor_aliases: |
|
191 | 191 | new = self.postprocessor_aliases[new.lower()] |
|
192 | 192 | if new: |
|
193 | 193 | self.postprocessor_factory = import_item(new) |
|
194 | 194 | |
|
195 | 195 | |
|
196 | 196 | # Other configurable variables |
|
197 | 197 | export_format = CaselessStrEnum(get_export_names(), |
|
198 | 198 | default_value="html", |
|
199 | 199 | config=True, |
|
200 | 200 | help="""The export format to be used.""" |
|
201 | 201 | ) |
|
202 | 202 | |
|
203 | 203 | notebooks = List([], config=True, help="""List of notebooks to convert. |
|
204 | 204 | Wildcards are supported. |
|
205 | 205 | Filenames passed positionally will be added to the list. |
|
206 | 206 | """) |
|
207 | 207 | |
|
208 | 208 | @catch_config_error |
|
209 | 209 | def initialize(self, argv=None): |
|
210 | 210 | self.init_syspath() |
|
211 | 211 | super(NbConvertApp, self).initialize(argv) |
|
212 | 212 | self.init_notebooks() |
|
213 | 213 | self.init_writer() |
|
214 | 214 | self.init_postprocessor() |
|
215 | 215 | |
|
216 | 216 | |
|
217 | 217 | |
|
218 | 218 | def init_syspath(self): |
|
219 | 219 | """ |
|
220 | 220 | Add the cwd to the sys.path ($PYTHONPATH) |
|
221 | 221 | """ |
|
222 | 222 | sys.path.insert(0, os.getcwd()) |
|
223 | 223 | |
|
224 | 224 | |
|
225 | 225 | def init_notebooks(self): |
|
226 | 226 | """Construct the list of notebooks. |
|
227 | 227 | If notebooks are passed on the command-line, |
|
228 | 228 | they override notebooks specified in config files. |
|
229 | 229 | Glob each notebook to replace notebook patterns with filenames. |
|
230 | 230 | """ |
|
231 | 231 | |
|
232 | 232 | # Specifying notebooks on the command-line overrides (rather than adds) |
|
233 | 233 | # the notebook list |
|
234 | 234 | if self.extra_args: |
|
235 | 235 | patterns = self.extra_args |
|
236 | 236 | else: |
|
237 | 237 | patterns = self.notebooks |
|
238 | 238 | |
|
239 | 239 | # Use glob to replace all the notebook patterns with filenames. |
|
240 | 240 | filenames = [] |
|
241 | 241 | for pattern in patterns: |
|
242 | 242 | |
|
243 | 243 | # Use glob to find matching filenames. Allow the user to convert |
|
244 | 244 | # notebooks without having to type the extension. |
|
245 | 245 | globbed_files = glob.glob(pattern) |
|
246 | 246 | globbed_files.extend(glob.glob(pattern + '.ipynb')) |
|
247 | 247 | if not globbed_files: |
|
248 | 248 | self.log.warn("pattern %r matched no files", pattern) |
|
249 | 249 | |
|
250 | 250 | for filename in globbed_files: |
|
251 | 251 | if not filename in filenames: |
|
252 | 252 | filenames.append(filename) |
|
253 | 253 | self.notebooks = filenames |
|
254 | 254 | |
|
255 | 255 | def init_writer(self): |
|
256 | 256 | """ |
|
257 | 257 | Initialize the writer (which is stateless) |
|
258 | 258 | """ |
|
259 | 259 | self._writer_class_changed(None, self.writer_class, self.writer_class) |
|
260 | 260 | self.writer = self.writer_factory(parent=self) |
|
261 | 261 | if hasattr(self.writer, 'build_directory') and self.writer.build_directory != '': |
|
262 | 262 | self.use_output_suffix = False |
|
263 | 263 | |
|
264 | 264 | def init_postprocessor(self): |
|
265 | 265 | """ |
|
266 | 266 | Initialize the postprocessor (which is stateless) |
|
267 | 267 | """ |
|
268 | 268 | self._postprocessor_class_changed(None, self.postprocessor_class, |
|
269 | 269 | self.postprocessor_class) |
|
270 | 270 | if self.postprocessor_factory: |
|
271 | 271 | self.postprocessor = self.postprocessor_factory(parent=self) |
|
272 | 272 | |
|
273 | 273 | def start(self): |
|
274 | 274 | """ |
|
275 | 275 | Ran after initialization completed |
|
276 | 276 | """ |
|
277 | 277 | super(NbConvertApp, self).start() |
|
278 | 278 | self.convert_notebooks() |
|
279 | 279 | |
|
280 | 280 | def init_single_notebook_resources(self, notebook_filename): |
|
281 | 281 | """Step 1: Initialize resources |
|
282 | 282 | |
|
283 | 283 | This intializes the resources dictionary for a single notebook. This |
|
284 | 284 | method should return the resources dictionary, and MUST include the |
|
285 | 285 | following keys: |
|
286 | 286 | |
|
287 | 287 | - profile_dir: the location of the profile directory |
|
288 | 288 | - unique_key: the notebook name |
|
289 | 289 | - output_files_dir: a directory where output files (not including |
|
290 | 290 | the notebook itself) should be saved |
|
291 | 291 | |
|
292 | 292 | """ |
|
293 | 293 | |
|
294 | 294 | # Get a unique key for the notebook and set it in the resources object. |
|
295 | 295 | basename = os.path.basename(notebook_filename) |
|
296 | 296 | notebook_name = basename[:basename.rfind('.')] |
|
297 | 297 | if self.output_base: |
|
298 | 298 | # strip duplicate extension from output_base, to avoid Basname.ext.ext |
|
299 | 299 | if getattr(self.exporter, 'file_extension', False): |
|
300 | 300 | base, ext = os.path.splitext(self.output_base) |
|
301 | 301 | if ext == self.exporter.file_extension: |
|
302 | 302 | self.output_base = base |
|
303 | 303 | notebook_name = self.output_base |
|
304 | 304 | |
|
305 | 305 | self.log.debug("Notebook name is '%s'", notebook_name) |
|
306 | 306 | |
|
307 | 307 | # first initialize the resources we want to use |
|
308 | 308 | resources = {} |
|
309 | 309 | resources['profile_dir'] = self.profile_dir.location |
|
310 | 310 | resources['unique_key'] = notebook_name |
|
311 | 311 | resources['output_files_dir'] = '%s_files' % notebook_name |
|
312 | 312 | |
|
313 | 313 | return resources |
|
314 | 314 | |
|
315 | 315 | def export_single_notebook(self, notebook_filename, resources): |
|
316 | 316 | """Step 2: Export the notebook |
|
317 | 317 | |
|
318 | 318 | Exports the notebook to a particular format according to the specified |
|
319 | 319 | exporter. This function returns the output and (possibly modified) |
|
320 | 320 | resources from the exporter. |
|
321 | 321 | |
|
322 | 322 | """ |
|
323 | 323 | try: |
|
324 | 324 | output, resources = self.exporter.from_filename(notebook_filename, resources=resources) |
|
325 | 325 | except ConversionException: |
|
326 | 326 | self.log.error("Error while converting '%s'", notebook_filename, exc_info=True) |
|
327 | 327 | self.exit(1) |
|
328 | 328 | |
|
329 | 329 | return output, resources |
|
330 | 330 | |
|
331 | 331 | def write_single_notebook(self, output, resources): |
|
332 | 332 | """Step 3: Write the notebook to file |
|
333 | 333 | |
|
334 | 334 | This writes output from the exporter to file using the specified writer. |
|
335 | 335 | It returns the results from the writer. |
|
336 | 336 | |
|
337 | 337 | """ |
|
338 | 338 | if 'unique_key' not in resources: |
|
339 | 339 | raise KeyError("unique_key MUST be specified in the resources, but it is not") |
|
340 | 340 | |
|
341 | 341 | notebook_name = resources['unique_key'] |
|
342 | 342 | if self.use_output_suffix and not self.output_base: |
|
343 | 343 | notebook_name += resources.get('output_suffix', '') |
|
344 | 344 | |
|
345 | 345 | write_results = self.writer.write( |
|
346 | 346 | output, resources, notebook_name=notebook_name) |
|
347 | 347 | return write_results |
|
348 | 348 | |
|
349 | 349 | def postprocess_single_notebook(self, write_results): |
|
350 | 350 | """Step 4: Postprocess the notebook |
|
351 | 351 | |
|
352 | 352 | This postprocesses the notebook after it has been written, taking as an |
|
353 | 353 | argument the results of writing the notebook to file. This only actually |
|
354 | 354 | does anything if a postprocessor has actually been specified. |
|
355 | 355 | |
|
356 | 356 | """ |
|
357 | 357 | # Post-process if post processor has been defined. |
|
358 | 358 | if hasattr(self, 'postprocessor') and self.postprocessor: |
|
359 | 359 | self.postprocessor(write_results) |
|
360 | 360 | |
|
361 | 361 | def convert_single_notebook(self, notebook_filename): |
|
362 | 362 | """Convert a single notebook. Performs the following steps: |
|
363 | 363 | |
|
364 | 364 | 1. Initialize notebook resources |
|
365 | 365 | 2. Export the notebook to a particular format |
|
366 | 366 | 3. Write the exported notebook to file |
|
367 | 367 | 4. (Maybe) postprocess the written file |
|
368 | 368 | |
|
369 | 369 | """ |
|
370 | 370 | self.log.info("Converting notebook %s to %s", notebook_filename, self.export_format) |
|
371 | 371 | resources = self.init_single_notebook_resources(notebook_filename) |
|
372 | 372 | output, resources = self.export_single_notebook(notebook_filename, resources) |
|
373 | 373 | write_results = self.write_single_notebook(output, resources) |
|
374 | 374 | self.postprocess_single_notebook(write_results) |
|
375 | 375 | |
|
376 | 376 | def convert_notebooks(self): |
|
377 | 377 | """ |
|
378 | 378 | Convert the notebooks in the self.notebook traitlet |
|
379 | 379 | """ |
|
380 | 380 | # check that the output base isn't specified if there is more than |
|
381 | 381 | # one notebook to convert |
|
382 | 382 | if self.output_base != '' and len(self.notebooks) > 1: |
|
383 | 383 | self.log.error( |
|
384 | 384 | """ |
|
385 | 385 | UsageError: --output flag or `NbConvertApp.output_base` config option |
|
386 | 386 | cannot be used when converting multiple notebooks. |
|
387 | 387 | """ |
|
388 | 388 | ) |
|
389 | 389 | self.exit(1) |
|
390 | 390 | |
|
391 | 391 | # initialize the exporter |
|
392 | 392 | self.exporter = exporter_map[self.export_format](config=self.config) |
|
393 | 393 | |
|
394 | 394 | # no notebooks to convert! |
|
395 | 395 | if len(self.notebooks) == 0: |
|
396 | 396 | self.print_help() |
|
397 | 397 | sys.exit(-1) |
|
398 | 398 | |
|
399 | 399 | # convert each notebook |
|
400 | 400 | for notebook_filename in self.notebooks: |
|
401 | 401 | self.convert_single_notebook(notebook_filename) |
|
402 | 402 | |
|
403 | 403 | #----------------------------------------------------------------------------- |
|
404 | 404 | # Main entry point |
|
405 | 405 | #----------------------------------------------------------------------------- |
|
406 | 406 | |
|
407 | 407 | launch_new_instance = NbConvertApp.launch_instance |
@@ -1,151 +1,151 b'' | |||
|
1 | 1 | """ |
|
2 | 2 | Module with tests for the execute preprocessor. |
|
3 | 3 | """ |
|
4 | 4 | |
|
5 | 5 | # Copyright (c) IPython Development Team. |
|
6 | 6 | # Distributed under the terms of the Modified BSD License. |
|
7 | 7 | |
|
8 | 8 | import copy |
|
9 | 9 | import glob |
|
10 | 10 | import io |
|
11 | 11 | import os |
|
12 | 12 | import re |
|
13 | 13 | |
|
14 | 14 | try: |
|
15 | 15 | from queue import Empty # Py 3 |
|
16 | 16 | except ImportError: |
|
17 | 17 | from Queue import Empty # Py 2 |
|
18 | 18 | |
|
19 | 19 | from IPython import nbformat |
|
20 | 20 | |
|
21 | 21 | from .base import PreprocessorTestsBase |
|
22 | 22 | from ..execute import ExecutePreprocessor |
|
23 | 23 | |
|
24 |
from |
|
|
24 | from jupyter_nbconvert.filters import strip_ansi | |
|
25 | 25 | from nose.tools import assert_raises |
|
26 | 26 | |
|
27 | 27 | addr_pat = re.compile(r'0x[0-9a-f]{7,9}') |
|
28 | 28 | |
|
29 | 29 | class TestExecute(PreprocessorTestsBase): |
|
30 | 30 | """Contains test functions for execute.py""" |
|
31 | 31 | |
|
32 | 32 | @staticmethod |
|
33 | 33 | def normalize_output(output): |
|
34 | 34 | """ |
|
35 | 35 | Normalizes outputs for comparison. |
|
36 | 36 | """ |
|
37 | 37 | output = dict(output) |
|
38 | 38 | if 'metadata' in output: |
|
39 | 39 | del output['metadata'] |
|
40 | 40 | if 'text' in output: |
|
41 | 41 | output['text'] = re.sub(addr_pat, '<HEXADDR>', output['text']) |
|
42 | 42 | if 'text/plain' in output.get('data', {}): |
|
43 | 43 | output['data']['text/plain'] = \ |
|
44 | 44 | re.sub(addr_pat, '<HEXADDR>', output['data']['text/plain']) |
|
45 | 45 | if 'traceback' in output: |
|
46 | 46 | tb = [] |
|
47 | 47 | for line in output['traceback']: |
|
48 | 48 | tb.append(strip_ansi(line)) |
|
49 | 49 | output['traceback'] = tb |
|
50 | 50 | |
|
51 | 51 | return output |
|
52 | 52 | |
|
53 | 53 | |
|
54 | 54 | def assert_notebooks_equal(self, expected, actual): |
|
55 | 55 | expected_cells = expected['cells'] |
|
56 | 56 | actual_cells = actual['cells'] |
|
57 | 57 | self.assertEqual(len(expected_cells), len(actual_cells)) |
|
58 | 58 | |
|
59 | 59 | for expected_cell, actual_cell in zip(expected_cells, actual_cells): |
|
60 | 60 | expected_outputs = expected_cell.get('outputs', []) |
|
61 | 61 | actual_outputs = actual_cell.get('outputs', []) |
|
62 | 62 | normalized_expected_outputs = list(map(self.normalize_output, expected_outputs)) |
|
63 | 63 | normalized_actual_outputs = list(map(self.normalize_output, actual_outputs)) |
|
64 | 64 | self.assertEqual(normalized_expected_outputs, normalized_actual_outputs) |
|
65 | 65 | |
|
66 | 66 | expected_execution_count = expected_cell.get('execution_count', None) |
|
67 | 67 | actual_execution_count = actual_cell.get('execution_count', None) |
|
68 | 68 | self.assertEqual(expected_execution_count, actual_execution_count) |
|
69 | 69 | |
|
70 | 70 | |
|
71 | 71 | def build_preprocessor(self, opts): |
|
72 | 72 | """Make an instance of a preprocessor""" |
|
73 | 73 | preprocessor = ExecutePreprocessor() |
|
74 | 74 | preprocessor.enabled = True |
|
75 | 75 | for opt in opts: |
|
76 | 76 | setattr(preprocessor, opt, opts[opt]) |
|
77 | 77 | return preprocessor |
|
78 | 78 | |
|
79 | 79 | |
|
80 | 80 | def test_constructor(self): |
|
81 | 81 | """Can a ExecutePreprocessor be constructed?""" |
|
82 | 82 | self.build_preprocessor({}) |
|
83 | 83 | |
|
84 | 84 | |
|
85 | 85 | def run_notebook(self, filename, opts, resources): |
|
86 | 86 | """Loads and runs a notebook, returning both the version prior to |
|
87 | 87 | running it and the version after running it. |
|
88 | 88 | |
|
89 | 89 | """ |
|
90 | 90 | with io.open(filename) as f: |
|
91 | 91 | input_nb = nbformat.read(f, 4) |
|
92 | 92 | preprocessor = self.build_preprocessor(opts) |
|
93 | 93 | cleaned_input_nb = copy.deepcopy(input_nb) |
|
94 | 94 | for cell in cleaned_input_nb.cells: |
|
95 | 95 | if 'execution_count' in cell: |
|
96 | 96 | del cell['execution_count'] |
|
97 | 97 | cell['outputs'] = [] |
|
98 | 98 | output_nb, _ = preprocessor(cleaned_input_nb, resources) |
|
99 | 99 | return input_nb, output_nb |
|
100 | 100 | |
|
101 | 101 | def test_run_notebooks(self): |
|
102 | 102 | """Runs a series of test notebooks and compares them to their actual output""" |
|
103 | 103 | current_dir = os.path.dirname(__file__) |
|
104 | 104 | input_files = glob.glob(os.path.join(current_dir, 'files', '*.ipynb')) |
|
105 | 105 | for filename in input_files: |
|
106 | 106 | if os.path.basename(filename) == "Disable Stdin.ipynb": |
|
107 | 107 | continue |
|
108 | 108 | elif os.path.basename(filename) == "Interrupt.ipynb": |
|
109 | 109 | opts = dict(timeout=1, interrupt_on_timeout=True) |
|
110 | 110 | else: |
|
111 | 111 | opts = {} |
|
112 | 112 | res = self.build_resources() |
|
113 | 113 | res['metadata']['path'] = os.path.dirname(filename) |
|
114 | 114 | input_nb, output_nb = self.run_notebook(filename, opts, res) |
|
115 | 115 | self.assert_notebooks_equal(input_nb, output_nb) |
|
116 | 116 | |
|
117 | 117 | def test_empty_path(self): |
|
118 | 118 | """Can the kernel be started when the path is empty?""" |
|
119 | 119 | current_dir = os.path.dirname(__file__) |
|
120 | 120 | filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb') |
|
121 | 121 | res = self.build_resources() |
|
122 | 122 | res['metadata']['path'] = '' |
|
123 | 123 | input_nb, output_nb = self.run_notebook(filename, {}, res) |
|
124 | 124 | self.assert_notebooks_equal(input_nb, output_nb) |
|
125 | 125 | |
|
126 | 126 | def test_disable_stdin(self): |
|
127 | 127 | """Test disabling standard input""" |
|
128 | 128 | current_dir = os.path.dirname(__file__) |
|
129 | 129 | filename = os.path.join(current_dir, 'files', 'Disable Stdin.ipynb') |
|
130 | 130 | res = self.build_resources() |
|
131 | 131 | res['metadata']['path'] = os.path.dirname(filename) |
|
132 | 132 | input_nb, output_nb = self.run_notebook(filename, {}, res) |
|
133 | 133 | |
|
134 | 134 | # We need to special-case this particular notebook, because the |
|
135 | 135 | # traceback contains machine-specific stuff like where IPython |
|
136 | 136 | # is installed. It is sufficient here to just check that an error |
|
137 | 137 | # was thrown, and that it was a StdinNotImplementedError |
|
138 | 138 | self.assertEqual(len(output_nb['cells']), 1) |
|
139 | 139 | self.assertEqual(len(output_nb['cells'][0]['outputs']), 1) |
|
140 | 140 | output = output_nb['cells'][0]['outputs'][0] |
|
141 | 141 | self.assertEqual(output['output_type'], 'error') |
|
142 | 142 | self.assertEqual(output['ename'], 'StdinNotImplementedError') |
|
143 | 143 | self.assertEqual(output['evalue'], 'raw_input was called, but this frontend does not support input requests.') |
|
144 | 144 | |
|
145 | 145 | def test_timeout(self): |
|
146 | 146 | """Check that an error is raised when a computation times out""" |
|
147 | 147 | current_dir = os.path.dirname(__file__) |
|
148 | 148 | filename = os.path.join(current_dir, 'files', 'Interrupt.ipynb') |
|
149 | 149 | res = self.build_resources() |
|
150 | 150 | res['metadata']['path'] = os.path.dirname(filename) |
|
151 | 151 | assert_raises(Empty, self.run_notebook, filename, dict(timeout=1), res) |
@@ -1,7 +1,7 b'' | |||
|
1 |
from |
|
|
1 | from jupyter_nbconvert.writers.base import WriterBase | |
|
2 | 2 | |
|
3 | 3 | class HelloWriter(WriterBase): |
|
4 | 4 | |
|
5 | 5 | def write(self, output, resources, notebook_name=None, **kw): |
|
6 | 6 | with open('hello.txt', 'w') as outfile: |
|
7 | 7 | outfile.write('hello world') |
@@ -1,243 +1,243 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | """Test NbConvertApp""" |
|
3 | 3 | |
|
4 | 4 | # Copyright (c) IPython Development Team. |
|
5 | 5 | # Distributed under the terms of the Modified BSD License. |
|
6 | 6 | |
|
7 | 7 | import os |
|
8 | 8 | import glob |
|
9 | 9 | import sys |
|
10 | 10 | |
|
11 | 11 | from .base import TestsBase |
|
12 | 12 | from ..postprocessors import PostProcessorBase |
|
13 | 13 | |
|
14 | 14 | import IPython.testing.tools as tt |
|
15 | 15 | from IPython.testing import decorators as dec |
|
16 | 16 | |
|
17 | 17 | #----------------------------------------------------------------------------- |
|
18 | 18 | # Classes and functions |
|
19 | 19 | #----------------------------------------------------------------------------- |
|
20 | 20 | |
|
21 | 21 | class DummyPost(PostProcessorBase): |
|
22 | 22 | def postprocess(self, filename): |
|
23 | 23 | print("Dummy:%s" % filename) |
|
24 | 24 | |
|
25 | 25 | class TestNbConvertApp(TestsBase): |
|
26 | 26 | """Collection of NbConvertApp tests""" |
|
27 | 27 | |
|
28 | 28 | |
|
29 | 29 | def test_notebook_help(self): |
|
30 | 30 | """Will help show if no notebooks are specified?""" |
|
31 | 31 | with self.create_temp_cwd(): |
|
32 | 32 | out, err = self.call('nbconvert --log-level 0', ignore_return_code=True) |
|
33 | 33 | self.assertIn("see '--help-all'", out) |
|
34 | 34 | |
|
35 | 35 | def test_help_output(self): |
|
36 | 36 | """ipython nbconvert --help-all works""" |
|
37 | 37 | tt.help_all_output_test('nbconvert') |
|
38 | 38 | |
|
39 | 39 | def test_glob(self): |
|
40 | 40 | """ |
|
41 | 41 | Do search patterns work for notebook names? |
|
42 | 42 | """ |
|
43 | 43 | with self.create_temp_cwd(['notebook*.ipynb']): |
|
44 | 44 | self.call('nbconvert --to python *.ipynb --log-level 0') |
|
45 | 45 | assert os.path.isfile('notebook1.py') |
|
46 | 46 | assert os.path.isfile('notebook2.py') |
|
47 | 47 | |
|
48 | 48 | |
|
49 | 49 | def test_glob_subdir(self): |
|
50 | 50 | """ |
|
51 | 51 | Do search patterns work for subdirectory notebook names? |
|
52 | 52 | """ |
|
53 | 53 | with self.create_temp_cwd(): |
|
54 | 54 | self.copy_files_to(['notebook*.ipynb'], 'subdir/') |
|
55 | 55 | self.call('nbconvert --to python --log-level 0 ' + |
|
56 | 56 | os.path.join('subdir', '*.ipynb')) |
|
57 | 57 | assert os.path.isfile('notebook1.py') |
|
58 | 58 | assert os.path.isfile('notebook2.py') |
|
59 | 59 | |
|
60 | 60 | |
|
61 | 61 | def test_explicit(self): |
|
62 | 62 | """ |
|
63 | 63 | Do explicit notebook names work? |
|
64 | 64 | """ |
|
65 | 65 | with self.create_temp_cwd(['notebook*.ipynb']): |
|
66 | 66 | self.call('nbconvert --log-level 0 --to python notebook2') |
|
67 | 67 | assert not os.path.isfile('notebook1.py') |
|
68 | 68 | assert os.path.isfile('notebook2.py') |
|
69 | 69 | |
|
70 | 70 | |
|
71 | 71 | @dec.onlyif_cmds_exist('pdflatex') |
|
72 | 72 | @dec.onlyif_cmds_exist('pandoc') |
|
73 | 73 | def test_filename_spaces(self): |
|
74 | 74 | """ |
|
75 | 75 | Generate PDFs with graphics if notebooks have spaces in the name? |
|
76 | 76 | """ |
|
77 | 77 | with self.create_temp_cwd(['notebook2.ipynb']): |
|
78 | 78 | os.rename('notebook2.ipynb', 'notebook with spaces.ipynb') |
|
79 | 79 | self.call('nbconvert --log-level 0 --to pdf' |
|
80 | 80 | ' "notebook with spaces"' |
|
81 | 81 | ' --PDFExporter.latex_count=1' |
|
82 | 82 | ' --PDFExporter.verbose=True' |
|
83 | 83 | ) |
|
84 | 84 | assert os.path.isfile('notebook with spaces.pdf') |
|
85 | 85 | |
|
86 | 86 | def test_post_processor(self): |
|
87 | 87 | """Do post processors work?""" |
|
88 | 88 | with self.create_temp_cwd(['notebook1.ipynb']): |
|
89 | 89 | out, err = self.call('nbconvert --log-level 0 --to python notebook1 ' |
|
90 |
'--post |
|
|
90 | '--post jupyter_nbconvert.tests.test_nbconvertapp.DummyPost') | |
|
91 | 91 | self.assertIn('Dummy:notebook1.py', out) |
|
92 | 92 | |
|
93 | 93 | @dec.onlyif_cmds_exist('pandoc') |
|
94 | 94 | def test_spurious_cr(self): |
|
95 | 95 | """Check for extra CR characters""" |
|
96 | 96 | with self.create_temp_cwd(['notebook2.ipynb']): |
|
97 | 97 | self.call('nbconvert --log-level 0 --to latex notebook2') |
|
98 | 98 | assert os.path.isfile('notebook2.tex') |
|
99 | 99 | with open('notebook2.tex') as f: |
|
100 | 100 | tex = f.read() |
|
101 | 101 | self.call('nbconvert --log-level 0 --to html notebook2') |
|
102 | 102 | assert os.path.isfile('notebook2.html') |
|
103 | 103 | with open('notebook2.html') as f: |
|
104 | 104 | html = f.read() |
|
105 | 105 | self.assertEqual(tex.count('\r'), tex.count('\r\n')) |
|
106 | 106 | self.assertEqual(html.count('\r'), html.count('\r\n')) |
|
107 | 107 | |
|
108 | 108 | @dec.onlyif_cmds_exist('pandoc') |
|
109 | 109 | def test_png_base64_html_ok(self): |
|
110 | 110 | """Is embedded png data well formed in HTML?""" |
|
111 | 111 | with self.create_temp_cwd(['notebook2.ipynb']): |
|
112 | 112 | self.call('nbconvert --log-level 0 --to HTML ' |
|
113 | 113 | 'notebook2.ipynb --template full') |
|
114 | 114 | assert os.path.isfile('notebook2.html') |
|
115 | 115 | with open('notebook2.html') as f: |
|
116 | 116 | assert "data:image/png;base64,b'" not in f.read() |
|
117 | 117 | |
|
118 | 118 | @dec.onlyif_cmds_exist('pandoc') |
|
119 | 119 | def test_template(self): |
|
120 | 120 | """ |
|
121 | 121 | Do export templates work? |
|
122 | 122 | """ |
|
123 | 123 | with self.create_temp_cwd(['notebook2.ipynb']): |
|
124 | 124 | self.call('nbconvert --log-level 0 --to slides ' |
|
125 | 125 | 'notebook2.ipynb') |
|
126 | 126 | assert os.path.isfile('notebook2.slides.html') |
|
127 | 127 | with open('notebook2.slides.html') as f: |
|
128 | 128 | assert '/reveal.css' in f.read() |
|
129 | 129 | |
|
130 | 130 | def test_output_ext(self): |
|
131 | 131 | """test --output=outputfile[.ext]""" |
|
132 | 132 | with self.create_temp_cwd(['notebook1.ipynb']): |
|
133 | 133 | self.call('nbconvert --log-level 0 --to python ' |
|
134 | 134 | 'notebook1.ipynb --output nb.py') |
|
135 | 135 | assert os.path.exists('nb.py') |
|
136 | 136 | |
|
137 | 137 | self.call('nbconvert --log-level 0 --to python ' |
|
138 | 138 | 'notebook1.ipynb --output nb2') |
|
139 | 139 | assert os.path.exists('nb2.py') |
|
140 | 140 | |
|
141 | 141 | def test_glob_explicit(self): |
|
142 | 142 | """ |
|
143 | 143 | Can a search pattern be used along with matching explicit notebook names? |
|
144 | 144 | """ |
|
145 | 145 | with self.create_temp_cwd(['notebook*.ipynb']): |
|
146 | 146 | self.call('nbconvert --log-level 0 --to python ' |
|
147 | 147 | '*.ipynb notebook1.ipynb notebook2.ipynb') |
|
148 | 148 | assert os.path.isfile('notebook1.py') |
|
149 | 149 | assert os.path.isfile('notebook2.py') |
|
150 | 150 | |
|
151 | 151 | |
|
152 | 152 | def test_explicit_glob(self): |
|
153 | 153 | """ |
|
154 | 154 | Can explicit notebook names be used and then a matching search pattern? |
|
155 | 155 | """ |
|
156 | 156 | with self.create_temp_cwd(['notebook*.ipynb']): |
|
157 | 157 | self.call('nbconvert --log-level 0 --to=python ' |
|
158 | 158 | 'notebook1.ipynb notebook2.ipynb *.ipynb') |
|
159 | 159 | assert os.path.isfile('notebook1.py') |
|
160 | 160 | assert os.path.isfile('notebook2.py') |
|
161 | 161 | |
|
162 | 162 | |
|
163 | 163 | def test_default_config(self): |
|
164 | 164 | """ |
|
165 | 165 | Does the default config work? |
|
166 | 166 | """ |
|
167 | 167 | with self.create_temp_cwd(['notebook*.ipynb', 'ipython_nbconvert_config.py']): |
|
168 | 168 | self.call('nbconvert --log-level 0') |
|
169 | 169 | assert os.path.isfile('notebook1.py') |
|
170 | 170 | assert not os.path.isfile('notebook2.py') |
|
171 | 171 | |
|
172 | 172 | |
|
173 | 173 | def test_override_config(self): |
|
174 | 174 | """ |
|
175 | 175 | Can the default config be overriden? |
|
176 | 176 | """ |
|
177 | 177 | with self.create_temp_cwd(['notebook*.ipynb', |
|
178 | 178 | 'ipython_nbconvert_config.py', |
|
179 | 179 | 'override.py']): |
|
180 | 180 | self.call('nbconvert --log-level 0 --config="override.py"') |
|
181 | 181 | assert not os.path.isfile('notebook1.py') |
|
182 | 182 | assert os.path.isfile('notebook2.py') |
|
183 | 183 | |
|
184 | 184 | def test_accents_in_filename(self): |
|
185 | 185 | """ |
|
186 | 186 | Can notebook names include accents? |
|
187 | 187 | """ |
|
188 | 188 | with self.create_temp_cwd(): |
|
189 | 189 | self.create_empty_notebook(u'nb1_anΓ‘lisis.ipynb') |
|
190 | 190 | self.call('nbconvert --log-level 0 --to python nb1_*') |
|
191 | 191 | assert os.path.isfile(u'nb1_anΓ‘lisis.py') |
|
192 | 192 | |
|
193 | 193 | @dec.onlyif_cmds_exist('pdflatex', 'pandoc') |
|
194 | 194 | def test_filename_accent_pdf(self): |
|
195 | 195 | """ |
|
196 | 196 | Generate PDFs if notebooks have an accent in their name? |
|
197 | 197 | """ |
|
198 | 198 | with self.create_temp_cwd(): |
|
199 | 199 | self.create_empty_notebook(u'nb1_anΓ‘lisis.ipynb') |
|
200 | 200 | self.call('nbconvert --log-level 0 --to pdf "nb1_*"' |
|
201 | 201 | ' --PDFExporter.latex_count=1' |
|
202 | 202 | ' --PDFExporter.verbose=True') |
|
203 | 203 | assert os.path.isfile(u'nb1_anΓ‘lisis.pdf') |
|
204 | 204 | |
|
205 | 205 | def test_cwd_plugin(self): |
|
206 | 206 | """ |
|
207 | 207 | Verify that an extension in the cwd can be imported. |
|
208 | 208 | """ |
|
209 | 209 | with self.create_temp_cwd(['hello.py']): |
|
210 | 210 | self.create_empty_notebook(u'empty.ipynb') |
|
211 | 211 | self.call('nbconvert empty --to html --NbConvertApp.writer_class=\'hello.HelloWriter\'') |
|
212 | 212 | assert os.path.isfile(u'hello.txt') |
|
213 | 213 | |
|
214 | 214 | def test_output_suffix(self): |
|
215 | 215 | """ |
|
216 | 216 | Verify that the output suffix is applied |
|
217 | 217 | """ |
|
218 | 218 | with self.create_temp_cwd(): |
|
219 | 219 | self.create_empty_notebook('empty.ipynb') |
|
220 | 220 | self.call('nbconvert empty.ipynb --to notebook') |
|
221 | 221 | assert os.path.isfile('empty.nbconvert.ipynb') |
|
222 | 222 | |
|
223 | 223 | def test_different_build_dir(self): |
|
224 | 224 | """ |
|
225 | 225 | Verify that the output suffix is not applied |
|
226 | 226 | """ |
|
227 | 227 | with self.create_temp_cwd(): |
|
228 | 228 | self.create_empty_notebook('empty.ipynb') |
|
229 | 229 | os.mkdir('output') |
|
230 | 230 | self.call( |
|
231 | 231 | 'nbconvert empty.ipynb --to notebook ' |
|
232 | 232 | '--FilesWriter.build_directory=output') |
|
233 | 233 | assert os.path.isfile('output/empty.ipynb') |
|
234 | 234 | |
|
235 | 235 | def test_inplace(self): |
|
236 | 236 | """ |
|
237 | 237 | Verify that the notebook is converted in place |
|
238 | 238 | """ |
|
239 | 239 | with self.create_temp_cwd(): |
|
240 | 240 | self.create_empty_notebook('empty.ipynb') |
|
241 | 241 | self.call('nbconvert empty.ipynb --to notebook --inplace') |
|
242 | 242 | assert os.path.isfile('empty.ipynb') |
|
243 | 243 | assert not os.path.isfile('empty.nbconvert.ipynb') |
@@ -1,131 +1,131 b'' | |||
|
1 | 1 | """Test lexers module""" |
|
2 | 2 | #----------------------------------------------------------------------------- |
|
3 | 3 | # Copyright (C) 2014 The IPython Development Team |
|
4 | 4 | # |
|
5 | 5 | # Distributed under the terms of the BSD License. The full license is in |
|
6 | 6 | # the file COPYING, distributed as part of this software. |
|
7 | 7 | #----------------------------------------------------------------------------- |
|
8 | 8 | |
|
9 | 9 | #----------------------------------------------------------------------------- |
|
10 | 10 | # Imports |
|
11 | 11 | #----------------------------------------------------------------------------- |
|
12 | 12 | from pygments.token import Token |
|
13 | 13 | |
|
14 |
from |
|
|
14 | from jupyter_nbconvert.tests.base import TestsBase | |
|
15 | 15 | from .. import lexers |
|
16 | 16 | |
|
17 | 17 | |
|
18 | 18 | #----------------------------------------------------------------------------- |
|
19 | 19 | # Classes and functions |
|
20 | 20 | #----------------------------------------------------------------------------- |
|
21 | 21 | class TestLexers(TestsBase): |
|
22 | 22 | """Collection of lexers tests""" |
|
23 | 23 | def setUp(self): |
|
24 | 24 | self.lexer = lexers.IPythonLexer() |
|
25 | 25 | |
|
26 | 26 | def testIPythonLexer(self): |
|
27 | 27 | fragment = '!echo $HOME\n' |
|
28 | 28 | tokens = [ |
|
29 | 29 | (Token.Operator, '!'), |
|
30 | 30 | (Token.Name.Builtin, 'echo'), |
|
31 | 31 | (Token.Text, ' '), |
|
32 | 32 | (Token.Name.Variable, '$HOME'), |
|
33 | 33 | (Token.Text, '\n'), |
|
34 | 34 | ] |
|
35 | 35 | self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) |
|
36 | 36 | |
|
37 | 37 | fragment_2 = '!' + fragment |
|
38 | 38 | tokens_2 = [ |
|
39 | 39 | (Token.Operator, '!!'), |
|
40 | 40 | ] + tokens[1:] |
|
41 | 41 | self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2))) |
|
42 | 42 | |
|
43 | 43 | fragment_2 = '\t %%!\n' + fragment[1:] |
|
44 | 44 | tokens_2 = [ |
|
45 | 45 | (Token.Text, '\t '), |
|
46 | 46 | (Token.Operator, '%%!'), |
|
47 | 47 | (Token.Text, '\n'), |
|
48 | 48 | ] + tokens[1:] |
|
49 | 49 | self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2))) |
|
50 | 50 | |
|
51 | 51 | fragment_2 = 'x = ' + fragment |
|
52 | 52 | tokens_2 = [ |
|
53 | 53 | (Token.Name, 'x'), |
|
54 | 54 | (Token.Text, ' '), |
|
55 | 55 | (Token.Operator, '='), |
|
56 | 56 | (Token.Text, ' '), |
|
57 | 57 | ] + tokens |
|
58 | 58 | self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2))) |
|
59 | 59 | |
|
60 | 60 | fragment_2 = 'x, = ' + fragment |
|
61 | 61 | tokens_2 = [ |
|
62 | 62 | (Token.Name, 'x'), |
|
63 | 63 | (Token.Punctuation, ','), |
|
64 | 64 | (Token.Text, ' '), |
|
65 | 65 | (Token.Operator, '='), |
|
66 | 66 | (Token.Text, ' '), |
|
67 | 67 | ] + tokens |
|
68 | 68 | self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2))) |
|
69 | 69 | |
|
70 | 70 | fragment_2 = 'x, = %sx ' + fragment[1:] |
|
71 | 71 | tokens_2 = [ |
|
72 | 72 | (Token.Name, 'x'), |
|
73 | 73 | (Token.Punctuation, ','), |
|
74 | 74 | (Token.Text, ' '), |
|
75 | 75 | (Token.Operator, '='), |
|
76 | 76 | (Token.Text, ' '), |
|
77 | 77 | (Token.Operator, '%'), |
|
78 | 78 | (Token.Keyword, 'sx'), |
|
79 | 79 | (Token.Text, ' '), |
|
80 | 80 | ] + tokens[1:] |
|
81 | 81 | self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2))) |
|
82 | 82 | |
|
83 | 83 | fragment_2 = 'f = %R function () {}\n' |
|
84 | 84 | tokens_2 = [ |
|
85 | 85 | (Token.Name, 'f'), |
|
86 | 86 | (Token.Text, ' '), |
|
87 | 87 | (Token.Operator, '='), |
|
88 | 88 | (Token.Text, ' '), |
|
89 | 89 | (Token.Operator, '%'), |
|
90 | 90 | (Token.Keyword, 'R'), |
|
91 | 91 | (Token.Text, ' function () {}\n'), |
|
92 | 92 | ] |
|
93 | 93 | self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2))) |
|
94 | 94 | |
|
95 | 95 | fragment_2 = '\t%%xyz\n$foo\n' |
|
96 | 96 | tokens_2 = [ |
|
97 | 97 | (Token.Text, '\t'), |
|
98 | 98 | (Token.Operator, '%%'), |
|
99 | 99 | (Token.Keyword, 'xyz'), |
|
100 | 100 | (Token.Text, '\n$foo\n'), |
|
101 | 101 | ] |
|
102 | 102 | self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2))) |
|
103 | 103 | |
|
104 | 104 | fragment_2 = '%system?\n' |
|
105 | 105 | tokens_2 = [ |
|
106 | 106 | (Token.Operator, '%'), |
|
107 | 107 | (Token.Keyword, 'system'), |
|
108 | 108 | (Token.Operator, '?'), |
|
109 | 109 | (Token.Text, '\n'), |
|
110 | 110 | ] |
|
111 | 111 | self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2))) |
|
112 | 112 | |
|
113 | 113 | fragment_2 = 'x != y\n' |
|
114 | 114 | tokens_2 = [ |
|
115 | 115 | (Token.Name, 'x'), |
|
116 | 116 | (Token.Text, ' '), |
|
117 | 117 | (Token.Operator, '!='), |
|
118 | 118 | (Token.Text, ' '), |
|
119 | 119 | (Token.Name, 'y'), |
|
120 | 120 | (Token.Text, '\n'), |
|
121 | 121 | ] |
|
122 | 122 | self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2))) |
|
123 | 123 | |
|
124 | 124 | fragment_2 = ' ?math.sin\n' |
|
125 | 125 | tokens_2 = [ |
|
126 | 126 | (Token.Text, ' '), |
|
127 | 127 | (Token.Operator, '?'), |
|
128 | 128 | (Token.Text, 'math.sin'), |
|
129 | 129 | (Token.Text, '\n'), |
|
130 | 130 | ] |
|
131 | 131 | self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2))) |
@@ -1,70 +1,70 b'' | |||
|
1 | 1 | """Test Pandoc module""" |
|
2 | 2 | #----------------------------------------------------------------------------- |
|
3 | 3 | # Copyright (C) 2014 The IPython Development Team |
|
4 | 4 | # |
|
5 | 5 | # Distributed under the terms of the BSD License. The full license is in |
|
6 | 6 | # the file COPYING, distributed as part of this software. |
|
7 | 7 | #----------------------------------------------------------------------------- |
|
8 | 8 | |
|
9 | 9 | #----------------------------------------------------------------------------- |
|
10 | 10 | # Imports |
|
11 | 11 | #----------------------------------------------------------------------------- |
|
12 | 12 | import os |
|
13 | 13 | import warnings |
|
14 | 14 | |
|
15 | 15 | from IPython.testing import decorators as dec |
|
16 | 16 | |
|
17 |
from |
|
|
17 | from jupyter_nbconvert.tests.base import TestsBase | |
|
18 | 18 | from .. import pandoc |
|
19 | 19 | |
|
20 | 20 | #----------------------------------------------------------------------------- |
|
21 | 21 | # Classes and functions |
|
22 | 22 | #----------------------------------------------------------------------------- |
|
23 | 23 | class TestPandoc(TestsBase): |
|
24 | 24 | """Collection of Pandoc tests""" |
|
25 | 25 | |
|
26 | 26 | def __init__(self, *args, **kwargs): |
|
27 | 27 | super(TestPandoc, self).__init__(*args, **kwargs) |
|
28 | 28 | self.original_env = os.environ.copy() |
|
29 | 29 | |
|
30 | 30 | @dec.onlyif_cmds_exist('pandoc') |
|
31 | 31 | def test_pandoc_available(self): |
|
32 | 32 | """ Test behaviour that pandoc functions raise PandocMissing as documented """ |
|
33 | 33 | pandoc.clean_cache() |
|
34 | 34 | |
|
35 | 35 | os.environ["PATH"] = "" |
|
36 | 36 | with self.assertRaises(pandoc.PandocMissing): |
|
37 | 37 | pandoc.get_pandoc_version() |
|
38 | 38 | with self.assertRaises(pandoc.PandocMissing): |
|
39 | 39 | pandoc.check_pandoc_version() |
|
40 | 40 | with self.assertRaises(pandoc.PandocMissing): |
|
41 | 41 | pandoc.pandoc("", "markdown", "html") |
|
42 | 42 | |
|
43 | 43 | # original_env["PATH"] should contain pandoc |
|
44 | 44 | os.environ["PATH"] = self.original_env["PATH"] |
|
45 | 45 | with warnings.catch_warnings(record=True) as w: |
|
46 | 46 | pandoc.get_pandoc_version() |
|
47 | 47 | pandoc.check_pandoc_version() |
|
48 | 48 | pandoc.pandoc("", "markdown", "html") |
|
49 | 49 | self.assertEqual(w, []) |
|
50 | 50 | |
|
51 | 51 | @dec.onlyif_cmds_exist('pandoc') |
|
52 | 52 | def test_minimal_version(self): |
|
53 | 53 | original_minversion = pandoc._minimal_version |
|
54 | 54 | |
|
55 | 55 | pandoc._minimal_version = "120.0" |
|
56 | 56 | with warnings.catch_warnings(record=True) as w: |
|
57 | 57 | assert not pandoc.check_pandoc_version() |
|
58 | 58 | self.assertEqual(len(w), 1) |
|
59 | 59 | |
|
60 | 60 | pandoc._minimal_version = pandoc.get_pandoc_version() |
|
61 | 61 | assert pandoc.check_pandoc_version() |
|
62 | 62 | |
|
63 | 63 | |
|
64 | 64 | def pandoc_function_raised_missing(f, *args, **kwargs): |
|
65 | 65 | try: |
|
66 | 66 | f(*args, **kwargs) |
|
67 | 67 | except pandoc.PandocMissing: |
|
68 | 68 | return True |
|
69 | 69 | else: |
|
70 | 70 | return False |
General Comments 0
You need to be logged in to leave comments.
Login now