##// END OF EJS Templates
Revert "Cleanup Python 2 compact from Lexers"
Matthias Bussonnier -
r28494:56f48b87 revert-14190-1402...
parent child Browse files
Show More
@@ -1,549 +1,540
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2 """
2 """
3 Defines a variety of Pygments lexers for highlighting IPython code.
3 Defines a variety of Pygments lexers for highlighting IPython code.
4
4
5 This includes:
5 This includes:
6
6
7 IPython3Lexer
7 IPythonLexer, IPython3Lexer
8 Lexer for pure IPython (python + magic/shell commands)
8 Lexers for pure IPython (python + magic/shell commands)
9
9
10 IPythonPartialTracebackLexer, IPythonTracebackLexer
10 IPythonPartialTracebackLexer, IPythonTracebackLexer
11 The partial traceback lexer reads everything but the Python code
11 Supports 2.x and 3.x via keyword `python3`. The partial traceback
12 appearing in a traceback.
12 lexer reads everything but the Python code appearing in a traceback.
13 The full lexer combines the partial lexer with the IPython3Lexer.
13 The full lexer combines the partial lexer with an IPython lexer.
14
14
15 IPythonConsoleLexer
15 IPythonConsoleLexer
16 A lexer for IPython console sessions, with support for tracebacks.
16 A lexer for IPython console sessions, with support for tracebacks.
17
17
18 IPyLexer
18 IPyLexer
19 A friendly lexer which examines the first line of text and from it,
19 A friendly lexer which examines the first line of text and from it,
20 decides whether to use an IPython lexer or an IPython console lexer.
20 decides whether to use an IPython lexer or an IPython console lexer.
21 This is probably the only lexer that needs to be explicitly added
21 This is probably the only lexer that needs to be explicitly added
22 to Pygments.
22 to Pygments.
23
23
24 """
24 """
25 #-----------------------------------------------------------------------------
25 #-----------------------------------------------------------------------------
26 # Copyright (c) 2013, the IPython Development Team.
26 # Copyright (c) 2013, the IPython Development Team.
27 #
27 #
28 # Distributed under the terms of the Modified BSD License.
28 # Distributed under the terms of the Modified BSD License.
29 #
29 #
30 # The full license is in the file COPYING.txt, distributed with this software.
30 # The full license is in the file COPYING.txt, distributed with this software.
31 #-----------------------------------------------------------------------------
31 #-----------------------------------------------------------------------------
32
32
33 # Standard library
33 # Standard library
34 import re
34 import re
35
35
36 # Third party
36 # Third party
37 from pygments.lexers import (
37 from pygments.lexers import (
38 BashLexer,
38 BashLexer, HtmlLexer, JavascriptLexer, RubyLexer, PerlLexer, PythonLexer,
39 HtmlLexer,
39 Python3Lexer, TexLexer)
40 JavascriptLexer,
41 RubyLexer,
42 PerlLexer,
43 Python3Lexer,
44 TexLexer,
45 )
46 from pygments.lexer import (
40 from pygments.lexer import (
47 Lexer,
41 Lexer, DelegatingLexer, RegexLexer, do_insertions, bygroups, using,
48 DelegatingLexer,
49 RegexLexer,
50 do_insertions,
51 bygroups,
52 using,
53 inherit,
54 )
42 )
55 from pygments.token import (
43 from pygments.token import (
56 Generic, Keyword, Literal, Name, Operator, Other, Text, Error,
44 Generic, Keyword, Literal, Name, Operator, Other, Text, Error,
57 )
45 )
58 from pygments.util import get_bool_opt
46 from pygments.util import get_bool_opt
59
47
60 # Local
48 # Local
61
49
62 line_re = re.compile('.*?\n')
50 line_re = re.compile('.*?\n')
63
51
64 __all__ = [
52 __all__ = ['build_ipy_lexer', 'IPython3Lexer', 'IPythonLexer',
65 "IPython3Lexer",
53 'IPythonPartialTracebackLexer', 'IPythonTracebackLexer',
66 "IPythonPartialTracebackLexer",
54 'IPythonConsoleLexer', 'IPyLexer']
67 "IPythonTracebackLexer",
68 "IPythonConsoleLexer",
69 "IPyLexer",
70 ]
71
55
72
56
73 class IPython3Lexer(Python3Lexer):
57 def build_ipy_lexer(python3):
74 """IPython3 Lexer"""
58 """Builds IPython lexers depending on the value of `python3`.
75
59
76 name = "IPython3"
60 The lexer inherits from an appropriate Python lexer and then adds
77 aliases = ["ipython3"]
61 information about IPython specific keywords (i.e. magic commands,
62 shell commands, etc.)
78
63
79 tokens = {
64 Parameters
80 "root": [
65 ----------
81 (
66 python3 : bool
82 r"(?s)(\s*)(%%capture)([^\n]*\n)(.*)",
67 If `True`, then build an IPython lexer from a Python 3 lexer.
83 bygroups(Text, Operator, Text, using(Python3Lexer)),
68
84 ),
69 """
85 (
70 # It would be nice to have a single IPython lexer class which takes
86 r"(?s)(\s*)(%%debug)([^\n]*\n)(.*)",
71 # a boolean `python3`. But since there are two Python lexer classes,
87 bygroups(Text, Operator, Text, using(Python3Lexer)),
72 # we will also have two IPython lexer classes.
88 ),
73 if python3:
89 (
74 PyLexer = Python3Lexer
90 r"(?is)(\s*)(%%html)([^\n]*\n)(.*)",
75 name = 'IPython3'
91 bygroups(Text, Operator, Text, using(HtmlLexer)),
76 aliases = ['ipython3']
92 ),
77 doc = """IPython3 Lexer"""
93 (
78 else:
94 r"(?s)(\s*)(%%javascript)([^\n]*\n)(.*)",
79 PyLexer = PythonLexer
95 bygroups(Text, Operator, Text, using(JavascriptLexer)),
80 name = 'IPython'
96 ),
81 aliases = ['ipython2', 'ipython']
97 (
82 doc = """IPython Lexer"""
98 r"(?s)(\s*)(%%js)([^\n]*\n)(.*)",
83
99 bygroups(Text, Operator, Text, using(JavascriptLexer)),
84 ipython_tokens = [
100 ),
85 (r'(?s)(\s*)(%%capture)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
101 (
86 (r'(?s)(\s*)(%%debug)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
102 r"(?s)(\s*)(%%latex)([^\n]*\n)(.*)",
87 (r'(?is)(\s*)(%%html)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(HtmlLexer))),
103 bygroups(Text, Operator, Text, using(TexLexer)),
88 (r'(?s)(\s*)(%%javascript)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(JavascriptLexer))),
104 ),
89 (r'(?s)(\s*)(%%js)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(JavascriptLexer))),
105 (
90 (r'(?s)(\s*)(%%latex)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(TexLexer))),
106 r"(?s)(\s*)(%%perl)([^\n]*\n)(.*)",
91 (r'(?s)(\s*)(%%perl)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PerlLexer))),
107 bygroups(Text, Operator, Text, using(PerlLexer)),
92 (r'(?s)(\s*)(%%prun)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
108 ),
93 (r'(?s)(\s*)(%%pypy)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
109 (
94 (r'(?s)(\s*)(%%python)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
110 r"(?s)(\s*)(%%prun)([^\n]*\n)(.*)",
95 (r'(?s)(\s*)(%%python2)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PythonLexer))),
111 bygroups(Text, Operator, Text, using(Python3Lexer)),
96 (r'(?s)(\s*)(%%python3)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(Python3Lexer))),
112 ),
97 (r'(?s)(\s*)(%%ruby)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(RubyLexer))),
113 (
98 (r'(?s)(\s*)(%%time)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
114 r"(?s)(\s*)(%%pypy)([^\n]*\n)(.*)",
99 (r'(?s)(\s*)(%%timeit)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
115 bygroups(Text, Operator, Text, using(Python3Lexer)),
100 (r'(?s)(\s*)(%%writefile)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
116 ),
101 (r'(?s)(\s*)(%%file)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
117 (
118 r"(?s)(\s*)(%%python)([^\n]*\n)(.*)",
119 bygroups(Text, Operator, Text, using(Python3Lexer)),
120 ),
121 (
122 r"(?s)(\s*)(%%python3)([^\n]*\n)(.*)",
123 bygroups(Text, Operator, Text, using(Python3Lexer)),
124 ),
125 (
126 r"(?s)(\s*)(%%ruby)([^\n]*\n)(.*)",
127 bygroups(Text, Operator, Text, using(RubyLexer)),
128 ),
129 (
130 r"(?s)(\s*)(%%time)([^\n]*\n)(.*)",
131 bygroups(Text, Operator, Text, using(Python3Lexer)),
132 ),
133 (
134 r"(?s)(\s*)(%%timeit)([^\n]*\n)(.*)",
135 bygroups(Text, Operator, Text, using(Python3Lexer)),
136 ),
137 (
138 r"(?s)(\s*)(%%writefile)([^\n]*\n)(.*)",
139 bygroups(Text, Operator, Text, using(Python3Lexer)),
140 ),
141 (
142 r"(?s)(\s*)(%%file)([^\n]*\n)(.*)",
143 bygroups(Text, Operator, Text, using(Python3Lexer)),
144 ),
145 (r"(?s)(\s*)(%%)(\w+)(.*)", bygroups(Text, Operator, Keyword, Text)),
102 (r"(?s)(\s*)(%%)(\w+)(.*)", bygroups(Text, Operator, Keyword, Text)),
146 (
103 (r'(?s)(^\s*)(%%!)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(BashLexer))),
147 r"(?s)(^\s*)(%%!)([^\n]*\n)(.*)",
148 bygroups(Text, Operator, Text, using(BashLexer)),
149 ),
150 (r"(%%?)(\w+)(\?\??)$", bygroups(Operator, Keyword, Operator)),
104 (r"(%%?)(\w+)(\?\??)$", bygroups(Operator, Keyword, Operator)),
151 (r"\b(\?\??)(\s*)$", bygroups(Operator, Text)),
105 (r"\b(\?\??)(\s*)$", bygroups(Operator, Text)),
152 (
106 (r'(%)(sx|sc|system)(.*)(\n)', bygroups(Operator, Keyword,
153 r"(%)(sx|sc|system)(.*)(\n)",
107 using(BashLexer), Text)),
154 bygroups(Operator, Keyword, using(BashLexer), Text),
108 (r'(%)(\w+)(.*\n)', bygroups(Operator, Keyword, Text)),
155 ),
109 (r'^(!!)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)),
156 (r"(%)(\w+)(.*\n)", bygroups(Operator, Keyword, Text)),
110 (r'(!)(?!=)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)),
157 (r"^(!!)(.+)(\n)", bygroups(Operator, using(BashLexer), Text)),
111 (r'^(\s*)(\?\??)(\s*%{0,2}[\w\.\*]*)', bygroups(Text, Operator, Text)),
158 (r"(!)(?!=)(.+)(\n)", bygroups(Operator, using(BashLexer), Text)),
112 (r'(\s*%{0,2}[\w\.\*]*)(\?\??)(\s*)$', bygroups(Text, Operator, Text)),
159 (r"^(\s*)(\?\??)(\s*%{0,2}[\w\.\*]*)", bygroups(Text, Operator, Text)),
160 (r"(\s*%{0,2}[\w\.\*]*)(\?\??)(\s*)$", bygroups(Text, Operator, Text)),
161 inherit,
162 ]
113 ]
163 }
114
115 tokens = PyLexer.tokens.copy()
116 tokens['root'] = ipython_tokens + tokens['root']
117
118 attrs = {'name': name, 'aliases': aliases, 'filenames': [],
119 '__doc__': doc, 'tokens': tokens}
120
121 return type(name, (PyLexer,), attrs)
122
123
124 IPython3Lexer = build_ipy_lexer(python3=True)
125 IPythonLexer = build_ipy_lexer(python3=False)
164
126
165
127
166 class IPythonPartialTracebackLexer(RegexLexer):
128 class IPythonPartialTracebackLexer(RegexLexer):
167 """
129 """
168 Partial lexer for IPython tracebacks.
130 Partial lexer for IPython tracebacks.
169
131
170 Handles all the non-python output.
132 Handles all the non-python output.
171
133
172 """
134 """
173 name = 'IPython Partial Traceback'
135 name = 'IPython Partial Traceback'
174
136
175 tokens = {
137 tokens = {
176 'root': [
138 'root': [
177 # Tracebacks for syntax errors have a different style.
139 # Tracebacks for syntax errors have a different style.
178 # For both types of tracebacks, we mark the first line with
140 # For both types of tracebacks, we mark the first line with
179 # Generic.Traceback. For syntax errors, we mark the filename
141 # Generic.Traceback. For syntax errors, we mark the filename
180 # as we mark the filenames for non-syntax tracebacks.
142 # as we mark the filenames for non-syntax tracebacks.
181 #
143 #
182 # These two regexps define how IPythonConsoleLexer finds a
144 # These two regexps define how IPythonConsoleLexer finds a
183 # traceback.
145 # traceback.
184 #
146 #
185 ## Non-syntax traceback
147 ## Non-syntax traceback
186 (r'^(\^C)?(-+\n)', bygroups(Error, Generic.Traceback)),
148 (r'^(\^C)?(-+\n)', bygroups(Error, Generic.Traceback)),
187 ## Syntax traceback
149 ## Syntax traceback
188 (r'^( File)(.*)(, line )(\d+\n)',
150 (r'^( File)(.*)(, line )(\d+\n)',
189 bygroups(Generic.Traceback, Name.Namespace,
151 bygroups(Generic.Traceback, Name.Namespace,
190 Generic.Traceback, Literal.Number.Integer)),
152 Generic.Traceback, Literal.Number.Integer)),
191
153
192 # (Exception Identifier)(Whitespace)(Traceback Message)
154 # (Exception Identifier)(Whitespace)(Traceback Message)
193 (r'(?u)(^[^\d\W]\w*)(\s*)(Traceback.*?\n)',
155 (r'(?u)(^[^\d\W]\w*)(\s*)(Traceback.*?\n)',
194 bygroups(Name.Exception, Generic.Whitespace, Text)),
156 bygroups(Name.Exception, Generic.Whitespace, Text)),
195 # (Module/Filename)(Text)(Callee)(Function Signature)
157 # (Module/Filename)(Text)(Callee)(Function Signature)
196 # Better options for callee and function signature?
158 # Better options for callee and function signature?
197 (r'(.*)( in )(.*)(\(.*\)\n)',
159 (r'(.*)( in )(.*)(\(.*\)\n)',
198 bygroups(Name.Namespace, Text, Name.Entity, Name.Tag)),
160 bygroups(Name.Namespace, Text, Name.Entity, Name.Tag)),
199 # Regular line: (Whitespace)(Line Number)(Python Code)
161 # Regular line: (Whitespace)(Line Number)(Python Code)
200 (r'(\s*?)(\d+)(.*?\n)',
162 (r'(\s*?)(\d+)(.*?\n)',
201 bygroups(Generic.Whitespace, Literal.Number.Integer, Other)),
163 bygroups(Generic.Whitespace, Literal.Number.Integer, Other)),
202 # Emphasized line: (Arrow)(Line Number)(Python Code)
164 # Emphasized line: (Arrow)(Line Number)(Python Code)
203 # Using Exception token so arrow color matches the Exception.
165 # Using Exception token so arrow color matches the Exception.
204 (r'(-*>?\s?)(\d+)(.*?\n)',
166 (r'(-*>?\s?)(\d+)(.*?\n)',
205 bygroups(Name.Exception, Literal.Number.Integer, Other)),
167 bygroups(Name.Exception, Literal.Number.Integer, Other)),
206 # (Exception Identifier)(Message)
168 # (Exception Identifier)(Message)
207 (r'(?u)(^[^\d\W]\w*)(:.*?\n)',
169 (r'(?u)(^[^\d\W]\w*)(:.*?\n)',
208 bygroups(Name.Exception, Text)),
170 bygroups(Name.Exception, Text)),
209 # Tag everything else as Other, will be handled later.
171 # Tag everything else as Other, will be handled later.
210 (r'.*\n', Other),
172 (r'.*\n', Other),
211 ],
173 ],
212 }
174 }
213
175
214
176
215 class IPythonTracebackLexer(DelegatingLexer):
177 class IPythonTracebackLexer(DelegatingLexer):
216 """
178 """
217 IPython traceback lexer.
179 IPython traceback lexer.
218
180
219 For doctests, the tracebacks can be snipped as much as desired with the
181 For doctests, the tracebacks can be snipped as much as desired with the
220 exception to the lines that designate a traceback. For non-syntax error
182 exception to the lines that designate a traceback. For non-syntax error
221 tracebacks, this is the line of hyphens. For syntax error tracebacks,
183 tracebacks, this is the line of hyphens. For syntax error tracebacks,
222 this is the line which lists the File and line number.
184 this is the line which lists the File and line number.
223
185
224 """
186 """
225
187 # The lexer inherits from DelegatingLexer. The "root" lexer is an
226 # The lexer inherits from DelegatingLexer. The "root" lexer is the
188 # appropriate IPython lexer, which depends on the value of the boolean
227 # IPython3 lexer. First, we parse with the partial IPython traceback lexer.
189 # `python3`. First, we parse with the partial IPython traceback lexer.
228 # Then, any code marked with the "Other" token is delegated to the root
190 # Then, any code marked with the "Other" token is delegated to the root
229 # lexer.
191 # lexer.
230 #
192 #
231 name = 'IPython Traceback'
193 name = 'IPython Traceback'
232 aliases = ['ipythontb']
194 aliases = ['ipythontb']
233
195
234 def __init__(self, **options):
196 def __init__(self, **options):
235 """
197 """
236 A subclass of `DelegatingLexer` which delegates to the appropriate to either IPyLexer,
198 A subclass of `DelegatingLexer` which delegates to the appropriate to either IPyLexer,
237 IPythonPartialTracebackLexer.
199 IPythonPartialTracebackLexer.
238 """
200 """
239 # note we need a __init__ doc, as otherwise it inherits the doc from the super class
201 # note we need a __init__ doc, as otherwise it inherits the doc from the super class
240 # which will fail the documentation build as it references section of the pygments docs that
202 # which will fail the documentation build as it references section of the pygments docs that
241 # do not exists when building IPython's docs.
203 # do not exists when building IPython's docs.
204 self.python3 = get_bool_opt(options, 'python3', False)
205 if self.python3:
206 self.aliases = ['ipython3tb']
207 else:
208 self.aliases = ['ipython2tb', 'ipythontb']
242
209
243 super().__init__(IPython3Lexer, IPythonPartialTracebackLexer, **options)
210 if self.python3:
211 IPyLexer = IPython3Lexer
212 else:
213 IPyLexer = IPythonLexer
244
214
215 DelegatingLexer.__init__(self, IPyLexer,
216 IPythonPartialTracebackLexer, **options)
245
217
246 class IPythonConsoleLexer(Lexer):
218 class IPythonConsoleLexer(Lexer):
247 """
219 """
248 An IPython console lexer for IPython code-blocks and doctests, such as:
220 An IPython console lexer for IPython code-blocks and doctests, such as:
249
221
250 .. code-block:: rst
222 .. code-block:: rst
251
223
252 .. code-block:: ipythonconsole
224 .. code-block:: ipythonconsole
253
225
254 In [1]: a = 'foo'
226 In [1]: a = 'foo'
255
227
256 In [2]: a
228 In [2]: a
257 Out[2]: 'foo'
229 Out[2]: 'foo'
258
230
259 In [3]: print(a)
231 In [3]: print(a)
260 foo
232 foo
261
233
262
234
263 Support is also provided for IPython exceptions:
235 Support is also provided for IPython exceptions:
264
236
265 .. code-block:: rst
237 .. code-block:: rst
266
238
267 .. code-block:: ipythonconsole
239 .. code-block:: ipythonconsole
268
240
269 In [1]: raise Exception
241 In [1]: raise Exception
270 Traceback (most recent call last):
242 Traceback (most recent call last):
271 ...
243 ...
272 Exception
244 Exception
273
245
274 """
246 """
275 name = 'IPython console session'
247 name = 'IPython console session'
276 aliases = ['ipythonconsole']
248 aliases = ['ipythonconsole']
277 mimetypes = ['text/x-ipython-console']
249 mimetypes = ['text/x-ipython-console']
278
250
279 # The regexps used to determine what is input and what is output.
251 # The regexps used to determine what is input and what is output.
280 # The default prompts for IPython are:
252 # The default prompts for IPython are:
281 #
253 #
282 # in = 'In [#]: '
254 # in = 'In [#]: '
283 # continuation = ' .D.: '
255 # continuation = ' .D.: '
284 # template = 'Out[#]: '
256 # template = 'Out[#]: '
285 #
257 #
286 # Where '#' is the 'prompt number' or 'execution count' and 'D'
258 # Where '#' is the 'prompt number' or 'execution count' and 'D'
287 # D is a number of dots matching the width of the execution count
259 # D is a number of dots matching the width of the execution count
288 #
260 #
289 in1_regex = r'In \[[0-9]+\]: '
261 in1_regex = r'In \[[0-9]+\]: '
290 in2_regex = r' \.\.+\.: '
262 in2_regex = r' \.\.+\.: '
291 out_regex = r'Out\[[0-9]+\]: '
263 out_regex = r'Out\[[0-9]+\]: '
292
264
293 #: The regex to determine when a traceback starts.
265 #: The regex to determine when a traceback starts.
294 ipytb_start = re.compile(r'^(\^C)?(-+\n)|^( File)(.*)(, line )(\d+\n)')
266 ipytb_start = re.compile(r'^(\^C)?(-+\n)|^( File)(.*)(, line )(\d+\n)')
295
267
296 def __init__(self, **options):
268 def __init__(self, **options):
297 """Initialize the IPython console lexer.
269 """Initialize the IPython console lexer.
298
270
299 Parameters
271 Parameters
300 ----------
272 ----------
273 python3 : bool
274 If `True`, then the console inputs are parsed using a Python 3
275 lexer. Otherwise, they are parsed using a Python 2 lexer.
301 in1_regex : RegexObject
276 in1_regex : RegexObject
302 The compiled regular expression used to detect the start
277 The compiled regular expression used to detect the start
303 of inputs. Although the IPython configuration setting may have a
278 of inputs. Although the IPython configuration setting may have a
304 trailing whitespace, do not include it in the regex. If `None`,
279 trailing whitespace, do not include it in the regex. If `None`,
305 then the default input prompt is assumed.
280 then the default input prompt is assumed.
306 in2_regex : RegexObject
281 in2_regex : RegexObject
307 The compiled regular expression used to detect the continuation
282 The compiled regular expression used to detect the continuation
308 of inputs. Although the IPython configuration setting may have a
283 of inputs. Although the IPython configuration setting may have a
309 trailing whitespace, do not include it in the regex. If `None`,
284 trailing whitespace, do not include it in the regex. If `None`,
310 then the default input prompt is assumed.
285 then the default input prompt is assumed.
311 out_regex : RegexObject
286 out_regex : RegexObject
312 The compiled regular expression used to detect outputs. If `None`,
287 The compiled regular expression used to detect outputs. If `None`,
313 then the default output prompt is assumed.
288 then the default output prompt is assumed.
314
289
315 """
290 """
316 self.aliases = ["ipython3console"]
291 self.python3 = get_bool_opt(options, 'python3', False)
292 if self.python3:
293 self.aliases = ['ipython3console']
294 else:
295 self.aliases = ['ipython2console', 'ipythonconsole']
317
296
318 in1_regex = options.get('in1_regex', self.in1_regex)
297 in1_regex = options.get('in1_regex', self.in1_regex)
319 in2_regex = options.get('in2_regex', self.in2_regex)
298 in2_regex = options.get('in2_regex', self.in2_regex)
320 out_regex = options.get('out_regex', self.out_regex)
299 out_regex = options.get('out_regex', self.out_regex)
321
300
322 # So that we can work with input and output prompts which have been
301 # So that we can work with input and output prompts which have been
323 # rstrip'd (possibly by editors) we also need rstrip'd variants. If
302 # rstrip'd (possibly by editors) we also need rstrip'd variants. If
324 # we do not do this, then such prompts will be tagged as 'output'.
303 # we do not do this, then such prompts will be tagged as 'output'.
325 # The reason can't just use the rstrip'd variants instead is because
304 # The reason can't just use the rstrip'd variants instead is because
326 # we want any whitespace associated with the prompt to be inserted
305 # we want any whitespace associated with the prompt to be inserted
327 # with the token. This allows formatted code to be modified so as hide
306 # with the token. This allows formatted code to be modified so as hide
328 # the appearance of prompts, with the whitespace included. One example
307 # the appearance of prompts, with the whitespace included. One example
329 # use of this is in copybutton.js from the standard lib Python docs.
308 # use of this is in copybutton.js from the standard lib Python docs.
330 in1_regex_rstrip = in1_regex.rstrip() + '\n'
309 in1_regex_rstrip = in1_regex.rstrip() + '\n'
331 in2_regex_rstrip = in2_regex.rstrip() + '\n'
310 in2_regex_rstrip = in2_regex.rstrip() + '\n'
332 out_regex_rstrip = out_regex.rstrip() + '\n'
311 out_regex_rstrip = out_regex.rstrip() + '\n'
333
312
334 # Compile and save them all.
313 # Compile and save them all.
335 attrs = ['in1_regex', 'in2_regex', 'out_regex',
314 attrs = ['in1_regex', 'in2_regex', 'out_regex',
336 'in1_regex_rstrip', 'in2_regex_rstrip', 'out_regex_rstrip']
315 'in1_regex_rstrip', 'in2_regex_rstrip', 'out_regex_rstrip']
337 for attr in attrs:
316 for attr in attrs:
338 self.__setattr__(attr, re.compile(locals()[attr]))
317 self.__setattr__(attr, re.compile(locals()[attr]))
339
318
340 Lexer.__init__(self, **options)
319 Lexer.__init__(self, **options)
341
320
342 self.pylexer = IPython3Lexer(**options)
321 if self.python3:
343 self.tblexer = IPythonTracebackLexer(**options)
322 pylexer = IPython3Lexer
323 tblexer = IPythonTracebackLexer
324 else:
325 pylexer = IPythonLexer
326 tblexer = IPythonTracebackLexer
327
328 self.pylexer = pylexer(**options)
329 self.tblexer = tblexer(**options)
344
330
345 self.reset()
331 self.reset()
346
332
347 def reset(self):
333 def reset(self):
348 self.mode = 'output'
334 self.mode = 'output'
349 self.index = 0
335 self.index = 0
350 self.buffer = u''
336 self.buffer = u''
351 self.insertions = []
337 self.insertions = []
352
338
353 def buffered_tokens(self):
339 def buffered_tokens(self):
354 """
340 """
355 Generator of unprocessed tokens after doing insertions and before
341 Generator of unprocessed tokens after doing insertions and before
356 changing to a new state.
342 changing to a new state.
357
343
358 """
344 """
359 if self.mode == 'output':
345 if self.mode == 'output':
360 tokens = [(0, Generic.Output, self.buffer)]
346 tokens = [(0, Generic.Output, self.buffer)]
361 elif self.mode == 'input':
347 elif self.mode == 'input':
362 tokens = self.pylexer.get_tokens_unprocessed(self.buffer)
348 tokens = self.pylexer.get_tokens_unprocessed(self.buffer)
363 else: # traceback
349 else: # traceback
364 tokens = self.tblexer.get_tokens_unprocessed(self.buffer)
350 tokens = self.tblexer.get_tokens_unprocessed(self.buffer)
365
351
366 for i, t, v in do_insertions(self.insertions, tokens):
352 for i, t, v in do_insertions(self.insertions, tokens):
367 # All token indexes are relative to the buffer.
353 # All token indexes are relative to the buffer.
368 yield self.index + i, t, v
354 yield self.index + i, t, v
369
355
370 # Clear it all
356 # Clear it all
371 self.index += len(self.buffer)
357 self.index += len(self.buffer)
372 self.buffer = u''
358 self.buffer = u''
373 self.insertions = []
359 self.insertions = []
374
360
375 def get_mci(self, line):
361 def get_mci(self, line):
376 """
362 """
377 Parses the line and returns a 3-tuple: (mode, code, insertion).
363 Parses the line and returns a 3-tuple: (mode, code, insertion).
378
364
379 `mode` is the next mode (or state) of the lexer, and is always equal
365 `mode` is the next mode (or state) of the lexer, and is always equal
380 to 'input', 'output', or 'tb'.
366 to 'input', 'output', or 'tb'.
381
367
382 `code` is a portion of the line that should be added to the buffer
368 `code` is a portion of the line that should be added to the buffer
383 corresponding to the next mode and eventually lexed by another lexer.
369 corresponding to the next mode and eventually lexed by another lexer.
384 For example, `code` could be Python code if `mode` were 'input'.
370 For example, `code` could be Python code if `mode` were 'input'.
385
371
386 `insertion` is a 3-tuple (index, token, text) representing an
372 `insertion` is a 3-tuple (index, token, text) representing an
387 unprocessed "token" that will be inserted into the stream of tokens
373 unprocessed "token" that will be inserted into the stream of tokens
388 that are created from the buffer once we change modes. This is usually
374 that are created from the buffer once we change modes. This is usually
389 the input or output prompt.
375 the input or output prompt.
390
376
391 In general, the next mode depends on current mode and on the contents
377 In general, the next mode depends on current mode and on the contents
392 of `line`.
378 of `line`.
393
379
394 """
380 """
395 # To reduce the number of regex match checks, we have multiple
381 # To reduce the number of regex match checks, we have multiple
396 # 'if' blocks instead of 'if-elif' blocks.
382 # 'if' blocks instead of 'if-elif' blocks.
397
383
398 # Check for possible end of input
384 # Check for possible end of input
399 in2_match = self.in2_regex.match(line)
385 in2_match = self.in2_regex.match(line)
400 in2_match_rstrip = self.in2_regex_rstrip.match(line)
386 in2_match_rstrip = self.in2_regex_rstrip.match(line)
401 if (in2_match and in2_match.group().rstrip() == line.rstrip()) or \
387 if (in2_match and in2_match.group().rstrip() == line.rstrip()) or \
402 in2_match_rstrip:
388 in2_match_rstrip:
403 end_input = True
389 end_input = True
404 else:
390 else:
405 end_input = False
391 end_input = False
406 if end_input and self.mode != 'tb':
392 if end_input and self.mode != 'tb':
407 # Only look for an end of input when not in tb mode.
393 # Only look for an end of input when not in tb mode.
408 # An ellipsis could appear within the traceback.
394 # An ellipsis could appear within the traceback.
409 mode = 'output'
395 mode = 'output'
410 code = u''
396 code = u''
411 insertion = (0, Generic.Prompt, line)
397 insertion = (0, Generic.Prompt, line)
412 return mode, code, insertion
398 return mode, code, insertion
413
399
414 # Check for output prompt
400 # Check for output prompt
415 out_match = self.out_regex.match(line)
401 out_match = self.out_regex.match(line)
416 out_match_rstrip = self.out_regex_rstrip.match(line)
402 out_match_rstrip = self.out_regex_rstrip.match(line)
417 if out_match or out_match_rstrip:
403 if out_match or out_match_rstrip:
418 mode = 'output'
404 mode = 'output'
419 if out_match:
405 if out_match:
420 idx = out_match.end()
406 idx = out_match.end()
421 else:
407 else:
422 idx = out_match_rstrip.end()
408 idx = out_match_rstrip.end()
423 code = line[idx:]
409 code = line[idx:]
424 # Use the 'heading' token for output. We cannot use Generic.Error
410 # Use the 'heading' token for output. We cannot use Generic.Error
425 # since it would conflict with exceptions.
411 # since it would conflict with exceptions.
426 insertion = (0, Generic.Heading, line[:idx])
412 insertion = (0, Generic.Heading, line[:idx])
427 return mode, code, insertion
413 return mode, code, insertion
428
414
429
415
430 # Check for input or continuation prompt (non stripped version)
416 # Check for input or continuation prompt (non stripped version)
431 in1_match = self.in1_regex.match(line)
417 in1_match = self.in1_regex.match(line)
432 if in1_match or (in2_match and self.mode != 'tb'):
418 if in1_match or (in2_match and self.mode != 'tb'):
433 # New input or when not in tb, continued input.
419 # New input or when not in tb, continued input.
434 # We do not check for continued input when in tb since it is
420 # We do not check for continued input when in tb since it is
435 # allowable to replace a long stack with an ellipsis.
421 # allowable to replace a long stack with an ellipsis.
436 mode = 'input'
422 mode = 'input'
437 if in1_match:
423 if in1_match:
438 idx = in1_match.end()
424 idx = in1_match.end()
439 else: # in2_match
425 else: # in2_match
440 idx = in2_match.end()
426 idx = in2_match.end()
441 code = line[idx:]
427 code = line[idx:]
442 insertion = (0, Generic.Prompt, line[:idx])
428 insertion = (0, Generic.Prompt, line[:idx])
443 return mode, code, insertion
429 return mode, code, insertion
444
430
445 # Check for input or continuation prompt (stripped version)
431 # Check for input or continuation prompt (stripped version)
446 in1_match_rstrip = self.in1_regex_rstrip.match(line)
432 in1_match_rstrip = self.in1_regex_rstrip.match(line)
447 if in1_match_rstrip or (in2_match_rstrip and self.mode != 'tb'):
433 if in1_match_rstrip or (in2_match_rstrip and self.mode != 'tb'):
448 # New input or when not in tb, continued input.
434 # New input or when not in tb, continued input.
449 # We do not check for continued input when in tb since it is
435 # We do not check for continued input when in tb since it is
450 # allowable to replace a long stack with an ellipsis.
436 # allowable to replace a long stack with an ellipsis.
451 mode = 'input'
437 mode = 'input'
452 if in1_match_rstrip:
438 if in1_match_rstrip:
453 idx = in1_match_rstrip.end()
439 idx = in1_match_rstrip.end()
454 else: # in2_match
440 else: # in2_match
455 idx = in2_match_rstrip.end()
441 idx = in2_match_rstrip.end()
456 code = line[idx:]
442 code = line[idx:]
457 insertion = (0, Generic.Prompt, line[:idx])
443 insertion = (0, Generic.Prompt, line[:idx])
458 return mode, code, insertion
444 return mode, code, insertion
459
445
460 # Check for traceback
446 # Check for traceback
461 if self.ipytb_start.match(line):
447 if self.ipytb_start.match(line):
462 mode = 'tb'
448 mode = 'tb'
463 code = line
449 code = line
464 insertion = None
450 insertion = None
465 return mode, code, insertion
451 return mode, code, insertion
466
452
467 # All other stuff...
453 # All other stuff...
468 if self.mode in ('input', 'output'):
454 if self.mode in ('input', 'output'):
469 # We assume all other text is output. Multiline input that
455 # We assume all other text is output. Multiline input that
470 # does not use the continuation marker cannot be detected.
456 # does not use the continuation marker cannot be detected.
471 # For example, the 3 in the following is clearly output:
457 # For example, the 3 in the following is clearly output:
472 #
458 #
473 # In [1]: print 3
459 # In [1]: print 3
474 # 3
460 # 3
475 #
461 #
476 # But the following second line is part of the input:
462 # But the following second line is part of the input:
477 #
463 #
478 # In [2]: while True:
464 # In [2]: while True:
479 # print True
465 # print True
480 #
466 #
481 # In both cases, the 2nd line will be 'output'.
467 # In both cases, the 2nd line will be 'output'.
482 #
468 #
483 mode = 'output'
469 mode = 'output'
484 else:
470 else:
485 mode = 'tb'
471 mode = 'tb'
486
472
487 code = line
473 code = line
488 insertion = None
474 insertion = None
489
475
490 return mode, code, insertion
476 return mode, code, insertion
491
477
492 def get_tokens_unprocessed(self, text):
478 def get_tokens_unprocessed(self, text):
493 self.reset()
479 self.reset()
494 for match in line_re.finditer(text):
480 for match in line_re.finditer(text):
495 line = match.group()
481 line = match.group()
496 mode, code, insertion = self.get_mci(line)
482 mode, code, insertion = self.get_mci(line)
497
483
498 if mode != self.mode:
484 if mode != self.mode:
499 # Yield buffered tokens before transitioning to new mode.
485 # Yield buffered tokens before transitioning to new mode.
500 for token in self.buffered_tokens():
486 for token in self.buffered_tokens():
501 yield token
487 yield token
502 self.mode = mode
488 self.mode = mode
503
489
504 if insertion:
490 if insertion:
505 self.insertions.append((len(self.buffer), [insertion]))
491 self.insertions.append((len(self.buffer), [insertion]))
506 self.buffer += code
492 self.buffer += code
507
493
508 for token in self.buffered_tokens():
494 for token in self.buffered_tokens():
509 yield token
495 yield token
510
496
511 class IPyLexer(Lexer):
497 class IPyLexer(Lexer):
512 r"""
498 r"""
513 Primary lexer for all IPython-like code.
499 Primary lexer for all IPython-like code.
514
500
515 This is a simple helper lexer. If the first line of the text begins with
501 This is a simple helper lexer. If the first line of the text begins with
516 "In \[[0-9]+\]:", then the entire text is parsed with an IPython console
502 "In \[[0-9]+\]:", then the entire text is parsed with an IPython console
517 lexer. If not, then the entire text is parsed with an IPython lexer.
503 lexer. If not, then the entire text is parsed with an IPython lexer.
518
504
519 The goal is to reduce the number of lexers that are registered
505 The goal is to reduce the number of lexers that are registered
520 with Pygments.
506 with Pygments.
521
507
522 """
508 """
523 name = 'IPy session'
509 name = 'IPy session'
524 aliases = ['ipy']
510 aliases = ['ipy']
525
511
526 def __init__(self, **options):
512 def __init__(self, **options):
527 """
513 """
528 Create a new IPyLexer instance which dispatch to either an
514 Create a new IPyLexer instance which dispatch to either an
529 IPythonConsoleLexer (if In prompts are present) or and IPython3Lexer (if
515 IPythonCOnsoleLexer (if In prompts are present) or and IPythonLexer (if
530 In prompts are not present).
516 In prompts are not present).
531 """
517 """
532 # init docstring is necessary for docs not to fail to build do to parent
518 # init docstring is necessary for docs not to fail to build do to parent
533 # docs referenceing a section in pygments docs.
519 # docs referenceing a section in pygments docs.
534 self.aliases = ["ipy3"]
520 self.python3 = get_bool_opt(options, 'python3', False)
521 if self.python3:
522 self.aliases = ['ipy3']
523 else:
524 self.aliases = ['ipy2', 'ipy']
535
525
536 Lexer.__init__(self, **options)
526 Lexer.__init__(self, **options)
537
527
538 self.IPythonLexer = IPython3Lexer(**options)
528 self.IPythonLexer = IPythonLexer(**options)
539 self.IPythonConsoleLexer = IPythonConsoleLexer(**options)
529 self.IPythonConsoleLexer = IPythonConsoleLexer(**options)
540
530
541 def get_tokens_unprocessed(self, text):
531 def get_tokens_unprocessed(self, text):
542 # Search for the input prompt anywhere...this allows code blocks to
532 # Search for the input prompt anywhere...this allows code blocks to
543 # begin with comments as well.
533 # begin with comments as well.
544 if re.match(r'.*(In \[[0-9]+\]:)', text.strip(), re.DOTALL):
534 if re.match(r'.*(In \[[0-9]+\]:)', text.strip(), re.DOTALL):
545 lex = self.IPythonConsoleLexer
535 lex = self.IPythonConsoleLexer
546 else:
536 else:
547 lex = self.IPythonLexer
537 lex = self.IPythonLexer
548 for token in lex.get_tokens_unprocessed(text):
538 for token in lex.get_tokens_unprocessed(text):
549 yield token
539 yield token
540
@@ -1,184 +1,184
1 """Test lexers module"""
1 """Test lexers module"""
2
2
3 # Copyright (c) IPython Development Team.
3 # Copyright (c) IPython Development Team.
4 # Distributed under the terms of the Modified BSD License.
4 # Distributed under the terms of the Modified BSD License.
5
5
6 from unittest import TestCase
6 from unittest import TestCase
7 from pygments import __version__ as pygments_version
7 from pygments import __version__ as pygments_version
8 from pygments.token import Token
8 from pygments.token import Token
9 from pygments.lexers import BashLexer
9 from pygments.lexers import BashLexer
10
10
11 from .. import lexers
11 from .. import lexers
12
12
13 pyg214 = tuple(int(x) for x in pygments_version.split(".")[:2]) >= (2, 14)
13 pyg214 = tuple(int(x) for x in pygments_version.split(".")[:2]) >= (2, 14)
14
14
15
15
16 class TestLexers(TestCase):
16 class TestLexers(TestCase):
17 """Collection of lexers tests"""
17 """Collection of lexers tests"""
18 def setUp(self):
18 def setUp(self):
19 self.lexer = lexers.IPython3Lexer()
19 self.lexer = lexers.IPythonLexer()
20 self.bash_lexer = BashLexer()
20 self.bash_lexer = BashLexer()
21
21
22 def testIPython3Lexer(self):
22 def testIPythonLexer(self):
23 fragment = '!echo $HOME\n'
23 fragment = '!echo $HOME\n'
24 bash_tokens = [
24 bash_tokens = [
25 (Token.Operator, '!'),
25 (Token.Operator, '!'),
26 ]
26 ]
27 bash_tokens.extend(self.bash_lexer.get_tokens(fragment[1:]))
27 bash_tokens.extend(self.bash_lexer.get_tokens(fragment[1:]))
28 ipylex_token = list(self.lexer.get_tokens(fragment))
28 ipylex_token = list(self.lexer.get_tokens(fragment))
29 assert bash_tokens[:-1] == ipylex_token[:-1]
29 assert bash_tokens[:-1] == ipylex_token[:-1]
30
30
31 fragment_2 = "!" + fragment
31 fragment_2 = "!" + fragment
32 tokens_2 = [
32 tokens_2 = [
33 (Token.Operator, '!!'),
33 (Token.Operator, '!!'),
34 ] + bash_tokens[1:]
34 ] + bash_tokens[1:]
35 assert tokens_2[:-1] == list(self.lexer.get_tokens(fragment_2))[:-1]
35 assert tokens_2[:-1] == list(self.lexer.get_tokens(fragment_2))[:-1]
36
36
37 fragment_2 = '\t %%!\n' + fragment[1:]
37 fragment_2 = '\t %%!\n' + fragment[1:]
38 tokens_2 = [
38 tokens_2 = [
39 (Token.Text, '\t '),
39 (Token.Text, '\t '),
40 (Token.Operator, '%%!'),
40 (Token.Operator, '%%!'),
41 (Token.Text, '\n'),
41 (Token.Text, '\n'),
42 ] + bash_tokens[1:]
42 ] + bash_tokens[1:]
43 assert tokens_2 == list(self.lexer.get_tokens(fragment_2))
43 assert tokens_2 == list(self.lexer.get_tokens(fragment_2))
44
44
45 fragment_2 = 'x = ' + fragment
45 fragment_2 = 'x = ' + fragment
46 tokens_2 = [
46 tokens_2 = [
47 (Token.Name, 'x'),
47 (Token.Name, 'x'),
48 (Token.Text, ' '),
48 (Token.Text, ' '),
49 (Token.Operator, '='),
49 (Token.Operator, '='),
50 (Token.Text, ' '),
50 (Token.Text, ' '),
51 ] + bash_tokens
51 ] + bash_tokens
52 assert tokens_2[:-1] == list(self.lexer.get_tokens(fragment_2))[:-1]
52 assert tokens_2[:-1] == list(self.lexer.get_tokens(fragment_2))[:-1]
53
53
54 fragment_2 = 'x, = ' + fragment
54 fragment_2 = 'x, = ' + fragment
55 tokens_2 = [
55 tokens_2 = [
56 (Token.Name, 'x'),
56 (Token.Name, 'x'),
57 (Token.Punctuation, ','),
57 (Token.Punctuation, ','),
58 (Token.Text, ' '),
58 (Token.Text, ' '),
59 (Token.Operator, '='),
59 (Token.Operator, '='),
60 (Token.Text, ' '),
60 (Token.Text, ' '),
61 ] + bash_tokens
61 ] + bash_tokens
62 assert tokens_2[:-1] == list(self.lexer.get_tokens(fragment_2))[:-1]
62 assert tokens_2[:-1] == list(self.lexer.get_tokens(fragment_2))[:-1]
63
63
64 fragment_2 = 'x, = %sx ' + fragment[1:]
64 fragment_2 = 'x, = %sx ' + fragment[1:]
65 tokens_2 = [
65 tokens_2 = [
66 (Token.Name, 'x'),
66 (Token.Name, 'x'),
67 (Token.Punctuation, ','),
67 (Token.Punctuation, ','),
68 (Token.Text, ' '),
68 (Token.Text, ' '),
69 (Token.Operator, '='),
69 (Token.Operator, '='),
70 (Token.Text, ' '),
70 (Token.Text, ' '),
71 (Token.Operator, '%'),
71 (Token.Operator, '%'),
72 (Token.Keyword, 'sx'),
72 (Token.Keyword, 'sx'),
73 (Token.Text, ' '),
73 (Token.Text, ' '),
74 ] + bash_tokens[1:]
74 ] + bash_tokens[1:]
75 if tokens_2[7] == (Token.Text, " ") and pyg214: # pygments 2.14+
75 if tokens_2[7] == (Token.Text, " ") and pyg214: # pygments 2.14+
76 tokens_2[7] = (Token.Text.Whitespace, " ")
76 tokens_2[7] = (Token.Text.Whitespace, " ")
77 assert tokens_2[:-1] == list(self.lexer.get_tokens(fragment_2))[:-1]
77 assert tokens_2[:-1] == list(self.lexer.get_tokens(fragment_2))[:-1]
78
78
79 fragment_2 = 'f = %R function () {}\n'
79 fragment_2 = 'f = %R function () {}\n'
80 tokens_2 = [
80 tokens_2 = [
81 (Token.Name, 'f'),
81 (Token.Name, 'f'),
82 (Token.Text, ' '),
82 (Token.Text, ' '),
83 (Token.Operator, '='),
83 (Token.Operator, '='),
84 (Token.Text, ' '),
84 (Token.Text, ' '),
85 (Token.Operator, '%'),
85 (Token.Operator, '%'),
86 (Token.Keyword, 'R'),
86 (Token.Keyword, 'R'),
87 (Token.Text, ' function () {}\n'),
87 (Token.Text, ' function () {}\n'),
88 ]
88 ]
89 assert tokens_2 == list(self.lexer.get_tokens(fragment_2))
89 assert tokens_2 == list(self.lexer.get_tokens(fragment_2))
90
90
91 fragment_2 = '\t%%xyz\n$foo\n'
91 fragment_2 = '\t%%xyz\n$foo\n'
92 tokens_2 = [
92 tokens_2 = [
93 (Token.Text, '\t'),
93 (Token.Text, '\t'),
94 (Token.Operator, '%%'),
94 (Token.Operator, '%%'),
95 (Token.Keyword, 'xyz'),
95 (Token.Keyword, 'xyz'),
96 (Token.Text, '\n$foo\n'),
96 (Token.Text, '\n$foo\n'),
97 ]
97 ]
98 assert tokens_2 == list(self.lexer.get_tokens(fragment_2))
98 assert tokens_2 == list(self.lexer.get_tokens(fragment_2))
99
99
100 fragment_2 = '%system?\n'
100 fragment_2 = '%system?\n'
101 tokens_2 = [
101 tokens_2 = [
102 (Token.Operator, '%'),
102 (Token.Operator, '%'),
103 (Token.Keyword, 'system'),
103 (Token.Keyword, 'system'),
104 (Token.Operator, '?'),
104 (Token.Operator, '?'),
105 (Token.Text, '\n'),
105 (Token.Text, '\n'),
106 ]
106 ]
107 assert tokens_2[:-1] == list(self.lexer.get_tokens(fragment_2))[:-1]
107 assert tokens_2[:-1] == list(self.lexer.get_tokens(fragment_2))[:-1]
108
108
109 fragment_2 = 'x != y\n'
109 fragment_2 = 'x != y\n'
110 tokens_2 = [
110 tokens_2 = [
111 (Token.Name, 'x'),
111 (Token.Name, 'x'),
112 (Token.Text, ' '),
112 (Token.Text, ' '),
113 (Token.Operator, '!='),
113 (Token.Operator, '!='),
114 (Token.Text, ' '),
114 (Token.Text, ' '),
115 (Token.Name, 'y'),
115 (Token.Name, 'y'),
116 (Token.Text, '\n'),
116 (Token.Text, '\n'),
117 ]
117 ]
118 assert tokens_2[:-1] == list(self.lexer.get_tokens(fragment_2))[:-1]
118 assert tokens_2[:-1] == list(self.lexer.get_tokens(fragment_2))[:-1]
119
119
120 fragment_2 = ' ?math.sin\n'
120 fragment_2 = ' ?math.sin\n'
121 tokens_2 = [
121 tokens_2 = [
122 (Token.Text, ' '),
122 (Token.Text, ' '),
123 (Token.Operator, '?'),
123 (Token.Operator, '?'),
124 (Token.Text, 'math.sin'),
124 (Token.Text, 'math.sin'),
125 (Token.Text, '\n'),
125 (Token.Text, '\n'),
126 ]
126 ]
127 assert tokens_2[:-1] == list(self.lexer.get_tokens(fragment_2))[:-1]
127 assert tokens_2[:-1] == list(self.lexer.get_tokens(fragment_2))[:-1]
128
128
129 fragment = ' *int*?\n'
129 fragment = ' *int*?\n'
130 tokens = [
130 tokens = [
131 (Token.Text, ' *int*'),
131 (Token.Text, ' *int*'),
132 (Token.Operator, '?'),
132 (Token.Operator, '?'),
133 (Token.Text, '\n'),
133 (Token.Text, '\n'),
134 ]
134 ]
135 assert tokens == list(self.lexer.get_tokens(fragment))
135 assert tokens == list(self.lexer.get_tokens(fragment))
136
136
137 fragment = '%%writefile -a foo.py\nif a == b:\n pass'
137 fragment = '%%writefile -a foo.py\nif a == b:\n pass'
138 tokens = [
138 tokens = [
139 (Token.Operator, '%%writefile'),
139 (Token.Operator, '%%writefile'),
140 (Token.Text, ' -a foo.py\n'),
140 (Token.Text, ' -a foo.py\n'),
141 (Token.Keyword, 'if'),
141 (Token.Keyword, 'if'),
142 (Token.Text, ' '),
142 (Token.Text, ' '),
143 (Token.Name, 'a'),
143 (Token.Name, 'a'),
144 (Token.Text, ' '),
144 (Token.Text, ' '),
145 (Token.Operator, '=='),
145 (Token.Operator, '=='),
146 (Token.Text, ' '),
146 (Token.Text, ' '),
147 (Token.Name, 'b'),
147 (Token.Name, 'b'),
148 (Token.Punctuation, ':'),
148 (Token.Punctuation, ':'),
149 (Token.Text, '\n'),
149 (Token.Text, '\n'),
150 (Token.Text, ' '),
150 (Token.Text, ' '),
151 (Token.Keyword, 'pass'),
151 (Token.Keyword, 'pass'),
152 (Token.Text, '\n'),
152 (Token.Text, '\n'),
153 ]
153 ]
154 if tokens[10] == (Token.Text, "\n") and pyg214: # pygments 2.14+
154 if tokens[10] == (Token.Text, "\n") and pyg214: # pygments 2.14+
155 tokens[10] = (Token.Text.Whitespace, "\n")
155 tokens[10] = (Token.Text.Whitespace, "\n")
156 assert tokens[:-1] == list(self.lexer.get_tokens(fragment))[:-1]
156 assert tokens[:-1] == list(self.lexer.get_tokens(fragment))[:-1]
157
157
158 fragment = '%%timeit\nmath.sin(0)'
158 fragment = '%%timeit\nmath.sin(0)'
159 tokens = [
159 tokens = [
160 (Token.Operator, '%%timeit\n'),
160 (Token.Operator, '%%timeit\n'),
161 (Token.Name, 'math'),
161 (Token.Name, 'math'),
162 (Token.Operator, '.'),
162 (Token.Operator, '.'),
163 (Token.Name, 'sin'),
163 (Token.Name, 'sin'),
164 (Token.Punctuation, '('),
164 (Token.Punctuation, '('),
165 (Token.Literal.Number.Integer, '0'),
165 (Token.Literal.Number.Integer, '0'),
166 (Token.Punctuation, ')'),
166 (Token.Punctuation, ')'),
167 (Token.Text, '\n'),
167 (Token.Text, '\n'),
168 ]
168 ]
169
169
170 fragment = '%%HTML\n<div>foo</div>'
170 fragment = '%%HTML\n<div>foo</div>'
171 tokens = [
171 tokens = [
172 (Token.Operator, '%%HTML'),
172 (Token.Operator, '%%HTML'),
173 (Token.Text, '\n'),
173 (Token.Text, '\n'),
174 (Token.Punctuation, '<'),
174 (Token.Punctuation, '<'),
175 (Token.Name.Tag, 'div'),
175 (Token.Name.Tag, 'div'),
176 (Token.Punctuation, '>'),
176 (Token.Punctuation, '>'),
177 (Token.Text, 'foo'),
177 (Token.Text, 'foo'),
178 (Token.Punctuation, '<'),
178 (Token.Punctuation, '<'),
179 (Token.Punctuation, '/'),
179 (Token.Punctuation, '/'),
180 (Token.Name.Tag, 'div'),
180 (Token.Name.Tag, 'div'),
181 (Token.Punctuation, '>'),
181 (Token.Punctuation, '>'),
182 (Token.Text, '\n'),
182 (Token.Text, '\n'),
183 ]
183 ]
184 assert tokens == list(self.lexer.get_tokens(fragment))
184 assert tokens == list(self.lexer.get_tokens(fragment))
@@ -1,24 +1,26
1 from typing import List
1 from typing import List
2
2
3 import pytest
3 import pytest
4 import pygments.lexers
4 import pygments.lexers
5 import pygments.lexer
5 import pygments.lexer
6
6
7 from IPython.lib.lexers import IPythonConsoleLexer, IPython3Lexer
7 from IPython.lib.lexers import IPythonConsoleLexer, IPythonLexer, IPython3Lexer
8
8
9 #: the human-readable names of the IPython lexers with ``entry_points``
9 #: the human-readable names of the IPython lexers with ``entry_points``
10 EXPECTED_LEXER_NAMES = [cls.name for cls in [IPythonConsoleLexer, IPython3Lexer]]
10 EXPECTED_LEXER_NAMES = [
11 cls.name for cls in [IPythonConsoleLexer, IPythonLexer, IPython3Lexer]
12 ]
11
13
12
14
13 @pytest.fixture
15 @pytest.fixture
14 def all_pygments_lexer_names() -> List[str]:
16 def all_pygments_lexer_names() -> List[str]:
15 """Get all lexer names registered in pygments."""
17 """Get all lexer names registered in pygments."""
16 return {l[0] for l in pygments.lexers.get_all_lexers()}
18 return {l[0] for l in pygments.lexers.get_all_lexers()}
17
19
18
20
19 @pytest.mark.parametrize("expected_lexer", EXPECTED_LEXER_NAMES)
21 @pytest.mark.parametrize("expected_lexer", EXPECTED_LEXER_NAMES)
20 def test_pygments_entry_points(
22 def test_pygments_entry_points(
21 expected_lexer: str, all_pygments_lexer_names: List[str]
23 expected_lexer: str, all_pygments_lexer_names: List[str]
22 ) -> None:
24 ) -> None:
23 """Check whether the ``entry_points`` for ``pygments.lexers`` are correct."""
25 """Check whether the ``entry_points`` for ``pygments.lexers`` are correct."""
24 assert expected_lexer in all_pygments_lexer_names
26 assert expected_lexer in all_pygments_lexer_names
@@ -1,24 +1,28
1 """
1 """
2 reST directive for syntax-highlighting ipython interactive sessions.
2 reST directive for syntax-highlighting ipython interactive sessions.
3
3
4 """
4 """
5
5
6 from sphinx import highlighting
6 from sphinx import highlighting
7 from IPython.lib.lexers import IPyLexer
7 from IPython.lib.lexers import IPyLexer
8
8
9 def setup(app):
9 def setup(app):
10 """Setup as a sphinx extension."""
10 """Setup as a sphinx extension."""
11
11
12 # This is only a lexer, so adding it below to pygments appears sufficient.
12 # This is only a lexer, so adding it below to pygments appears sufficient.
13 # But if somebody knows what the right API usage should be to do that via
13 # But if somebody knows what the right API usage should be to do that via
14 # sphinx, by all means fix it here. At least having this setup.py
14 # sphinx, by all means fix it here. At least having this setup.py
15 # suppresses the sphinx warning we'd get without it.
15 # suppresses the sphinx warning we'd get without it.
16 metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
16 metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
17 return metadata
17 return metadata
18
18
19 # Register the extension as a valid pygments lexer.
19 # Register the extension as a valid pygments lexer.
20 # Alternatively, we could register the lexer with pygments instead. This would
20 # Alternatively, we could register the lexer with pygments instead. This would
21 # require using setuptools entrypoints: http://pygments.org/docs/plugins
21 # require using setuptools entrypoints: http://pygments.org/docs/plugins
22
22
23 highlighting.lexers["ipython"] = IPyLexer()
23 ipy2 = IPyLexer(python3=False)
24 highlighting.lexers["ipython3"] = IPyLexer()
24 ipy3 = IPyLexer(python3=True)
25
26 highlighting.lexers['ipython'] = ipy2
27 highlighting.lexers['ipython2'] = ipy2
28 highlighting.lexers['ipython3'] = ipy3
@@ -1,62 +1,64
1 .. _console_lexer:
1 .. _console_lexer:
2
2
3 New IPython Console Lexer
3 New IPython Console Lexer
4 -------------------------
4 -------------------------
5
5
6 .. versionadded:: 2.0.0
6 .. versionadded:: 2.0.0
7
7
8 The IPython console lexer has been rewritten and now supports tracebacks
8 The IPython console lexer has been rewritten and now supports tracebacks
9 and customized input/output prompts. An entire suite of lexers is now
9 and customized input/output prompts. An entire suite of lexers is now
10 available at :mod:`IPython.lib.lexers`. These include:
10 available at :mod:`IPython.lib.lexers`. These include:
11
11
12 IPython3Lexer
12 IPythonLexer & IPython3Lexer
13 Lexer for pure IPython (python 3 + magic/shell commands)
13 Lexers for pure IPython (python + magic/shell commands)
14
14
15 IPythonPartialTracebackLexer & IPythonTracebackLexer
15 IPythonPartialTracebackLexer & IPythonTracebackLexer
16 The partial traceback lexer reads everything but the Python code
16 Supports 2.x and 3.x via the keyword `python3`. The partial traceback
17 appearing in a traceback. The full lexer combines the partial lexer
17 lexer reads everything but the Python code appearing in a traceback.
18 with the IPython3Lexer.
18 The full lexer combines the partial lexer with an IPython lexer.
19
19
20 IPythonConsoleLexer
20 IPythonConsoleLexer
21 A lexer for python 3 IPython console sessions, with support for tracebacks.
21 A lexer for IPython console sessions, with support for tracebacks.
22 Supports 2.x and 3.x via the keyword `python3`.
22
23
23 IPyLexer
24 IPyLexer
24 A friendly lexer which examines the first line of text and from it,
25 A friendly lexer which examines the first line of text and from it,
25 decides whether to use an IPython lexer or an IPython console lexer.
26 decides whether to use an IPython lexer or an IPython console lexer.
27 Supports 2.x and 3.x via the keyword `python3`.
26
28
27 Previously, the :class:`IPythonConsoleLexer` class was available at
29 Previously, the :class:`IPythonConsoleLexer` class was available at
28 :mod:`IPython.sphinxext.ipython_console_hightlight`. It was inserted
30 :mod:`IPython.sphinxext.ipython_console_hightlight`. It was inserted
29 into Pygments' list of available lexers under the name `ipython`. It should
31 into Pygments' list of available lexers under the name `ipython`. It should
30 be mentioned that this name is inaccurate, since an IPython console session
32 be mentioned that this name is inaccurate, since an IPython console session
31 is not the same as IPython code (which itself is a superset of the Python
33 is not the same as IPython code (which itself is a superset of the Python
32 language).
34 language).
33
35
34 Now, the Sphinx extension inserts two console lexers into Pygments' list of
36 Now, the Sphinx extension inserts two console lexers into Pygments' list of
35 available lexers. Both are IPyLexer instances under the names: `ipython` and
37 available lexers. Both are IPyLexer instances under the names: `ipython` and
36 `ipython3`. Although the names can be confusing (as mentioned above), their
38 `ipython3`. Although the names can be confusing (as mentioned above), their
37 continued use is, in part, to maintain backwards compatibility and to
39 continued use is, in part, to maintain backwards compatibility and to
38 aid typical usage. If a project needs to make Pygments aware of more than just
40 aid typical usage. If a project needs to make Pygments aware of more than just
39 the IPyLexer class, then one should not make the IPyLexer class available under
41 the IPyLexer class, then one should not make the IPyLexer class available under
40 the name `ipython` and use `ipy` or some other non-conflicting value.
42 the name `ipython` and use `ipy` or some other non-conflicting value.
41
43
42 Code blocks such as:
44 Code blocks such as:
43
45
44 .. code-block:: rst
46 .. code-block:: rst
45
47
46 .. code-block:: ipython
48 .. code-block:: ipython
47
49
48 In [1]: 2**2
50 In [1]: 2**2
49 Out[1]: 4
51 Out[1]: 4
50
52
51 will continue to work as before, but now, they will also properly highlight
53 will continue to work as before, but now, they will also properly highlight
52 tracebacks. For pure IPython code, the same lexer will also work:
54 tracebacks. For pure IPython code, the same lexer will also work:
53
55
54 .. code-block:: rst
56 .. code-block:: rst
55
57
56 .. code-block:: ipython
58 .. code-block:: ipython
57
59
58 x = ''.join(map(str, range(10)))
60 x = ''.join(map(str, range(10)))
59 !echo $x
61 !echo $x
60
62
61 Since the first line of the block did not begin with a standard IPython console
63 Since the first line of the block did not begin with a standard IPython console
62 prompt, the entire block is assumed to consist of IPython code instead.
64 prompt, the entire block is assumed to consist of IPython code instead.
@@ -1,157 +1,158
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2 """Setup script for IPython.
2 """Setup script for IPython.
3
3
4 Under Posix environments it works like a typical setup.py script.
4 Under Posix environments it works like a typical setup.py script.
5 Under Windows, the command sdist is not supported, since IPython
5 Under Windows, the command sdist is not supported, since IPython
6 requires utilities which are not available under Windows."""
6 requires utilities which are not available under Windows."""
7
7
8 #-----------------------------------------------------------------------------
8 #-----------------------------------------------------------------------------
9 # Copyright (c) 2008-2011, IPython Development Team.
9 # Copyright (c) 2008-2011, IPython Development Team.
10 # Copyright (c) 2001-2007, Fernando Perez <fernando.perez@colorado.edu>
10 # Copyright (c) 2001-2007, Fernando Perez <fernando.perez@colorado.edu>
11 # Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
11 # Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
12 # Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
12 # Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
13 #
13 #
14 # Distributed under the terms of the Modified BSD License.
14 # Distributed under the terms of the Modified BSD License.
15 #
15 #
16 # The full license is in the file COPYING.rst, distributed with this software.
16 # The full license is in the file COPYING.rst, distributed with this software.
17 #-----------------------------------------------------------------------------
17 #-----------------------------------------------------------------------------
18
18
19 import os
19 import os
20 import sys
20 import sys
21
21
22 # **Python version check**
22 # **Python version check**
23 #
23 #
24 # This check is also made in IPython/__init__, don't forget to update both when
24 # This check is also made in IPython/__init__, don't forget to update both when
25 # changing Python version requirements.
25 # changing Python version requirements.
26 if sys.version_info < (3, 9):
26 if sys.version_info < (3, 9):
27 pip_message = 'This may be due to an out of date pip. Make sure you have pip >= 9.0.1.'
27 pip_message = 'This may be due to an out of date pip. Make sure you have pip >= 9.0.1.'
28 try:
28 try:
29 import pip
29 import pip
30 pip_version = tuple([int(x) for x in pip.__version__.split('.')[:3]])
30 pip_version = tuple([int(x) for x in pip.__version__.split('.')[:3]])
31 if pip_version < (9, 0, 1) :
31 if pip_version < (9, 0, 1) :
32 pip_message = 'Your pip version is out of date, please install pip >= 9.0.1. '\
32 pip_message = 'Your pip version is out of date, please install pip >= 9.0.1. '\
33 'pip {} detected.'.format(pip.__version__)
33 'pip {} detected.'.format(pip.__version__)
34 else:
34 else:
35 # pip is new enough - it must be something else
35 # pip is new enough - it must be something else
36 pip_message = ''
36 pip_message = ''
37 except Exception:
37 except Exception:
38 pass
38 pass
39
39
40
40
41 error = """
41 error = """
42 IPython 8.13+ supports Python 3.9 and above, following NEP 29.
42 IPython 8.13+ supports Python 3.9 and above, following NEP 29.
43 IPython 8.0-8.12 supports Python 3.8 and above, following NEP 29.
43 IPython 8.0-8.12 supports Python 3.8 and above, following NEP 29.
44 When using Python 2.7, please install IPython 5.x LTS Long Term Support version.
44 When using Python 2.7, please install IPython 5.x LTS Long Term Support version.
45 Python 3.3 and 3.4 were supported up to IPython 6.x.
45 Python 3.3 and 3.4 were supported up to IPython 6.x.
46 Python 3.5 was supported with IPython 7.0 to 7.9.
46 Python 3.5 was supported with IPython 7.0 to 7.9.
47 Python 3.6 was supported with IPython up to 7.16.
47 Python 3.6 was supported with IPython up to 7.16.
48 Python 3.7 was still supported with the 7.x branch.
48 Python 3.7 was still supported with the 7.x branch.
49
49
50 See IPython `README.rst` file for more information:
50 See IPython `README.rst` file for more information:
51
51
52 https://github.com/ipython/ipython/blob/main/README.rst
52 https://github.com/ipython/ipython/blob/main/README.rst
53
53
54 Python {py} detected.
54 Python {py} detected.
55 {pip}
55 {pip}
56 """.format(
56 """.format(
57 py=sys.version_info, pip=pip_message
57 py=sys.version_info, pip=pip_message
58 )
58 )
59
59
60 print(error, file=sys.stderr)
60 print(error, file=sys.stderr)
61 sys.exit(1)
61 sys.exit(1)
62
62
63 # At least we're on the python version we need, move on.
63 # At least we're on the python version we need, move on.
64
64
65 from setuptools import setup
65 from setuptools import setup
66
66
67 # Our own imports
67 # Our own imports
68 sys.path.insert(0, ".")
68 sys.path.insert(0, ".")
69
69
70 from setupbase import target_update, find_entry_points
70 from setupbase import target_update, find_entry_points
71
71
72 from setupbase import (
72 from setupbase import (
73 setup_args,
73 setup_args,
74 check_package_data_first,
74 check_package_data_first,
75 find_data_files,
75 find_data_files,
76 git_prebuild,
76 git_prebuild,
77 install_symlinked,
77 install_symlinked,
78 install_lib_symlink,
78 install_lib_symlink,
79 install_scripts_for_symlink,
79 install_scripts_for_symlink,
80 unsymlink,
80 unsymlink,
81 )
81 )
82
82
83 #-------------------------------------------------------------------------------
83 #-------------------------------------------------------------------------------
84 # Handle OS specific things
84 # Handle OS specific things
85 #-------------------------------------------------------------------------------
85 #-------------------------------------------------------------------------------
86
86
87 if os.name in ('nt','dos'):
87 if os.name in ('nt','dos'):
88 os_name = 'windows'
88 os_name = 'windows'
89 else:
89 else:
90 os_name = os.name
90 os_name = os.name
91
91
92 # Under Windows, 'sdist' has not been supported. Now that the docs build with
92 # Under Windows, 'sdist' has not been supported. Now that the docs build with
93 # Sphinx it might work, but let's not turn it on until someone confirms that it
93 # Sphinx it might work, but let's not turn it on until someone confirms that it
94 # actually works.
94 # actually works.
95 if os_name == 'windows' and 'sdist' in sys.argv:
95 if os_name == 'windows' and 'sdist' in sys.argv:
96 print('The sdist command is not available under Windows. Exiting.')
96 print('The sdist command is not available under Windows. Exiting.')
97 sys.exit(1)
97 sys.exit(1)
98
98
99
99
100 #-------------------------------------------------------------------------------
100 #-------------------------------------------------------------------------------
101 # Things related to the IPython documentation
101 # Things related to the IPython documentation
102 #-------------------------------------------------------------------------------
102 #-------------------------------------------------------------------------------
103
103
104 # update the manuals when building a source dist
104 # update the manuals when building a source dist
105 if len(sys.argv) >= 2 and sys.argv[1] in ('sdist','bdist_rpm'):
105 if len(sys.argv) >= 2 and sys.argv[1] in ('sdist','bdist_rpm'):
106
106
107 # List of things to be updated. Each entry is a triplet of args for
107 # List of things to be updated. Each entry is a triplet of args for
108 # target_update()
108 # target_update()
109 to_update = [
109 to_update = [
110 (
110 (
111 "docs/man/ipython.1.gz",
111 "docs/man/ipython.1.gz",
112 ["docs/man/ipython.1"],
112 ["docs/man/ipython.1"],
113 "cd docs/man && python -m gzip --best ipython.1",
113 "cd docs/man && python -m gzip --best ipython.1",
114 ),
114 ),
115 ]
115 ]
116
116
117
117
118 [ target_update(*t) for t in to_update ]
118 [ target_update(*t) for t in to_update ]
119
119
120 #---------------------------------------------------------------------------
120 #---------------------------------------------------------------------------
121 # Find all the packages, package data, and data_files
121 # Find all the packages, package data, and data_files
122 #---------------------------------------------------------------------------
122 #---------------------------------------------------------------------------
123
123
124 data_files = find_data_files()
124 data_files = find_data_files()
125
125
126 setup_args['data_files'] = data_files
126 setup_args['data_files'] = data_files
127
127
128 #---------------------------------------------------------------------------
128 #---------------------------------------------------------------------------
129 # custom distutils commands
129 # custom distutils commands
130 #---------------------------------------------------------------------------
130 #---------------------------------------------------------------------------
131 # imports here, so they are after setuptools import if there was one
131 # imports here, so they are after setuptools import if there was one
132 from setuptools.command.sdist import sdist
132 from setuptools.command.sdist import sdist
133
133
134 setup_args['cmdclass'] = {
134 setup_args['cmdclass'] = {
135 'build_py': \
135 'build_py': \
136 check_package_data_first(git_prebuild('IPython')),
136 check_package_data_first(git_prebuild('IPython')),
137 'sdist' : git_prebuild('IPython', sdist),
137 'sdist' : git_prebuild('IPython', sdist),
138 'symlink': install_symlinked,
138 'symlink': install_symlinked,
139 'install_lib_symlink': install_lib_symlink,
139 'install_lib_symlink': install_lib_symlink,
140 'install_scripts_sym': install_scripts_for_symlink,
140 'install_scripts_sym': install_scripts_for_symlink,
141 'unsymlink': unsymlink,
141 'unsymlink': unsymlink,
142 }
142 }
143
143
144 setup_args["entry_points"] = {
144 setup_args["entry_points"] = {
145 "console_scripts": find_entry_points(),
145 "console_scripts": find_entry_points(),
146 "pygments.lexers": [
146 "pygments.lexers": [
147 "ipythonconsole = IPython.lib.lexers:IPythonConsoleLexer",
147 "ipythonconsole = IPython.lib.lexers:IPythonConsoleLexer",
148 "ipython = IPython.lib.lexers:IPythonLexer",
148 "ipython3 = IPython.lib.lexers:IPython3Lexer",
149 "ipython3 = IPython.lib.lexers:IPython3Lexer",
149 ],
150 ],
150 }
151 }
151
152
152 #---------------------------------------------------------------------------
153 #---------------------------------------------------------------------------
153 # Do the actual setup now
154 # Do the actual setup now
154 #---------------------------------------------------------------------------
155 #---------------------------------------------------------------------------
155
156
156 if __name__ == "__main__":
157 if __name__ == "__main__":
157 setup(**setup_args)
158 setup(**setup_args)
General Comments 0
You need to be logged in to leave comments. Login now