##// END OF EJS Templates
Merge pull request #7556 from abalkin/issue-7548-2...
Matthias Bussonnier -
r20139:b2c6426f merge
parent child Browse files
Show More
@@ -0,0 +1,111 b''
1 """Test lexers module"""
2 #-----------------------------------------------------------------------------
3 # Copyright (C) 2014 The IPython Development Team
4 #
5 # Distributed under the terms of the BSD License. The full license is in
6 # the file COPYING, distributed as part of this software.
7 #-----------------------------------------------------------------------------
8
9 #-----------------------------------------------------------------------------
10 # Imports
11 #-----------------------------------------------------------------------------
12 from pygments.token import Token
13
14 from IPython.nbconvert.tests.base import TestsBase
15 from .. import lexers
16
17
18 #-----------------------------------------------------------------------------
19 # Classes and functions
20 #-----------------------------------------------------------------------------
21 class TestLexers(TestsBase):
22 """Collection of lexers tests"""
23 def setUp(self):
24 self.lexer = lexers.IPythonLexer()
25
26 def testIPythonLexer(self):
27 fragment = '!echo $HOME\n'
28 tokens = [
29 (Token.Operator, '!'),
30 (Token.Name.Builtin, 'echo'),
31 (Token.Text, ' '),
32 (Token.Name.Variable, '$HOME'),
33 (Token.Text, '\n'),
34 ]
35 self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
36
37 fragment_2 = '!' + fragment
38 tokens_2 = [
39 (Token.Operator, '!!'),
40 ] + tokens[1:]
41 self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2)))
42
43 fragment_2 = '\t %%!\n' + fragment[1:]
44 tokens_2 = [
45 (Token.Text, '\t '),
46 (Token.Operator, '%%!'),
47 (Token.Text, '\n'),
48 ] + tokens[1:]
49 self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2)))
50
51 fragment_2 = 'x = ' + fragment
52 tokens_2 = [
53 (Token.Name, 'x'),
54 (Token.Text, ' '),
55 (Token.Operator, '='),
56 (Token.Text, ' '),
57 ] + tokens
58 self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2)))
59
60 fragment_2 = 'x, = ' + fragment
61 tokens_2 = [
62 (Token.Name, 'x'),
63 (Token.Punctuation, ','),
64 (Token.Text, ' '),
65 (Token.Operator, '='),
66 (Token.Text, ' '),
67 ] + tokens
68 self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2)))
69
70 fragment_2 = 'x, = %sx ' + fragment[1:]
71 tokens_2 = [
72 (Token.Name, 'x'),
73 (Token.Punctuation, ','),
74 (Token.Text, ' '),
75 (Token.Operator, '='),
76 (Token.Text, ' '),
77 (Token.Operator, '%'),
78 (Token.Keyword, 'sx'),
79 (Token.Text, ' '),
80 ] + tokens[1:]
81 self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2)))
82
83 fragment_2 = 'f = %R function () {}\n'
84 tokens_2 = [
85 (Token.Name, 'f'),
86 (Token.Text, ' '),
87 (Token.Operator, '='),
88 (Token.Text, ' '),
89 (Token.Operator, '%'),
90 (Token.Keyword, 'R'),
91 (Token.Text, ' function () {}\n'),
92 ]
93 self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2)))
94
95 fragment_2 = '\t%%xyz\n$foo\n'
96 tokens_2 = [
97 (Token.Text, '\t'),
98 (Token.Operator, '%%'),
99 (Token.Keyword, 'xyz'),
100 (Token.Text, '\n$foo\n'),
101 ]
102 self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2)))
103
104 fragment_2 = '%system?\n'
105 tokens_2 = [
106 (Token.Operator, '%'),
107 (Token.Keyword, 'system'),
108 (Token.Operator, '?'),
109 (Token.Text, '\n'),
110 ]
111 self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2)))
@@ -1,502 +1,507 b''
1 1 # -*- coding: utf-8 -*-
2 2 """
3 3 Defines a variety of Pygments lexers for highlighting IPython code.
4 4
5 5 This includes:
6 6
7 7 IPythonLexer, IPython3Lexer
8 8 Lexers for pure IPython (python + magic/shell commands)
9 9
10 10 IPythonPartialTracebackLexer, IPythonTracebackLexer
11 11 Supports 2.x and 3.x via keyword `python3`. The partial traceback
12 12 lexer reads everything but the Python code appearing in a traceback.
13 13 The full lexer combines the partial lexer with an IPython lexer.
14 14
15 15 IPythonConsoleLexer
16 16 A lexer for IPython console sessions, with support for tracebacks.
17 17
18 18 IPyLexer
19 19 A friendly lexer which examines the first line of text and from it,
20 20 decides whether to use an IPython lexer or an IPython console lexer.
21 21 This is probably the only lexer that needs to be explicitly added
22 22 to Pygments.
23 23
24 24 """
25 25 #-----------------------------------------------------------------------------
26 26 # Copyright (c) 2013, the IPython Development Team.
27 27 #
28 28 # Distributed under the terms of the Modified BSD License.
29 29 #
30 30 # The full license is in the file COPYING.txt, distributed with this software.
31 31 #-----------------------------------------------------------------------------
32 32
33 33 # Standard library
34 34 import re
35 35
36 36 # Third party
37 37 from pygments.lexers import BashLexer, PythonLexer, Python3Lexer
38 38 from pygments.lexer import (
39 39 Lexer, DelegatingLexer, RegexLexer, do_insertions, bygroups, using,
40 40 )
41 41 from pygments.token import (
42 42 Comment, Generic, Keyword, Literal, Name, Operator, Other, Text, Error,
43 43 )
44 44 from pygments.util import get_bool_opt
45 45
46 46 # Local
47 47 from IPython.testing.skipdoctest import skip_doctest
48 48
49 49 line_re = re.compile('.*?\n')
50 50
51 51 ipython_tokens = [
52 (r'(\%+)(\w+)\s+(\.*)(\n)', bygroups(Operator, Keyword,
52 (r"(?s)(\s*)(%%)(\w+)(.*)", bygroups(Text, Operator, Keyword, Text)),
53 (r'(?s)(^\s*)(%%!)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(BashLexer))),
54 (r"(%%?)(\w+)(\?\??)$", bygroups(Operator, Keyword, Operator)),
55 (r"\b(\?\??)(\s*)$", bygroups(Operator, Text)),
56 (r'(%)(sx|sc|system)(.*)(\n)', bygroups(Operator, Keyword,
53 57 using(BashLexer), Text)),
54 (r'(\%+)(\w+)\b', bygroups(Operator, Keyword)),
55 (r'^(!)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)),
58 (r'(%)(\w+)(.*\n)', bygroups(Operator, Keyword, Text)),
59 (r'^(!!)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)),
60 (r'(!)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)),
56 61 ]
57 62
58 63 def build_ipy_lexer(python3):
59 64 """Builds IPython lexers depending on the value of `python3`.
60 65
61 66 The lexer inherits from an appropriate Python lexer and then adds
62 67 information about IPython specific keywords (i.e. magic commands,
63 68 shell commands, etc.)
64 69
65 70 Parameters
66 71 ----------
67 72 python3 : bool
68 73 If `True`, then build an IPython lexer from a Python 3 lexer.
69 74
70 75 """
71 76 # It would be nice to have a single IPython lexer class which takes
72 77 # a boolean `python3`. But since there are two Python lexer classes,
73 78 # we will also have two IPython lexer classes.
74 79 if python3:
75 80 PyLexer = Python3Lexer
76 81 clsname = 'IPython3Lexer'
77 82 name = 'IPython3'
78 83 aliases = ['ipython3']
79 84 doc = """IPython3 Lexer"""
80 85 else:
81 86 PyLexer = PythonLexer
82 87 clsname = 'IPythonLexer'
83 88 name = 'IPython'
84 89 aliases = ['ipython2', 'ipython']
85 90 doc = """IPython Lexer"""
86 91
87 92 tokens = PyLexer.tokens.copy()
88 93 tokens['root'] = ipython_tokens + tokens['root']
89 94
90 95 attrs = {'name': name, 'aliases': aliases, 'filenames': [],
91 96 '__doc__': doc, 'tokens': tokens}
92 97
93 98 return type(name, (PyLexer,), attrs)
94 99
95 100
96 101 IPython3Lexer = build_ipy_lexer(python3=True)
97 102 IPythonLexer = build_ipy_lexer(python3=False)
98 103
99 104
100 105 class IPythonPartialTracebackLexer(RegexLexer):
101 106 """
102 107 Partial lexer for IPython tracebacks.
103 108
104 109 Handles all the non-python output. This works for both Python 2.x and 3.x.
105 110
106 111 """
107 112 name = 'IPython Partial Traceback'
108 113
109 114 tokens = {
110 115 'root': [
111 116 # Tracebacks for syntax errors have a different style.
112 117 # For both types of tracebacks, we mark the first line with
113 118 # Generic.Traceback. For syntax errors, we mark the filename
114 119 # as we mark the filenames for non-syntax tracebacks.
115 120 #
116 121 # These two regexps define how IPythonConsoleLexer finds a
117 122 # traceback.
118 123 #
119 124 ## Non-syntax traceback
120 125 (r'^(\^C)?(-+\n)', bygroups(Error, Generic.Traceback)),
121 126 ## Syntax traceback
122 127 (r'^( File)(.*)(, line )(\d+\n)',
123 128 bygroups(Generic.Traceback, Name.Namespace,
124 129 Generic.Traceback, Literal.Number.Integer)),
125 130
126 131 # (Exception Identifier)(Whitespace)(Traceback Message)
127 132 (r'(?u)(^[^\d\W]\w*)(\s*)(Traceback.*?\n)',
128 133 bygroups(Name.Exception, Generic.Whitespace, Text)),
129 134 # (Module/Filename)(Text)(Callee)(Function Signature)
130 135 # Better options for callee and function signature?
131 136 (r'(.*)( in )(.*)(\(.*\)\n)',
132 137 bygroups(Name.Namespace, Text, Name.Entity, Name.Tag)),
133 138 # Regular line: (Whitespace)(Line Number)(Python Code)
134 139 (r'(\s*?)(\d+)(.*?\n)',
135 140 bygroups(Generic.Whitespace, Literal.Number.Integer, Other)),
136 141 # Emphasized line: (Arrow)(Line Number)(Python Code)
137 142 # Using Exception token so arrow color matches the Exception.
138 143 (r'(-*>?\s?)(\d+)(.*?\n)',
139 144 bygroups(Name.Exception, Literal.Number.Integer, Other)),
140 145 # (Exception Identifier)(Message)
141 146 (r'(?u)(^[^\d\W]\w*)(:.*?\n)',
142 147 bygroups(Name.Exception, Text)),
143 148 # Tag everything else as Other, will be handled later.
144 149 (r'.*\n', Other),
145 150 ],
146 151 }
147 152
148 153
149 154 class IPythonTracebackLexer(DelegatingLexer):
150 155 """
151 156 IPython traceback lexer.
152 157
153 158 For doctests, the tracebacks can be snipped as much as desired with the
154 159 exception to the lines that designate a traceback. For non-syntax error
155 160 tracebacks, this is the line of hyphens. For syntax error tracebacks,
156 161 this is the line which lists the File and line number.
157 162
158 163 """
159 164 # The lexer inherits from DelegatingLexer. The "root" lexer is an
160 165 # appropriate IPython lexer, which depends on the value of the boolean
161 166 # `python3`. First, we parse with the partial IPython traceback lexer.
162 167 # Then, any code marked with the "Other" token is delegated to the root
163 168 # lexer.
164 169 #
165 170 name = 'IPython Traceback'
166 171 aliases = ['ipythontb']
167 172
168 173 def __init__(self, **options):
169 174 self.python3 = get_bool_opt(options, 'python3', False)
170 175 if self.python3:
171 176 self.aliases = ['ipython3tb']
172 177 else:
173 178 self.aliases = ['ipython2tb', 'ipythontb']
174 179
175 180 if self.python3:
176 181 IPyLexer = IPython3Lexer
177 182 else:
178 183 IPyLexer = IPythonLexer
179 184
180 185 DelegatingLexer.__init__(self, IPyLexer,
181 186 IPythonPartialTracebackLexer, **options)
182 187
183 188 @skip_doctest
184 189 class IPythonConsoleLexer(Lexer):
185 190 """
186 191 An IPython console lexer for IPython code-blocks and doctests, such as:
187 192
188 193 .. code-block:: rst
189 194
190 195 .. code-block:: ipythonconsole
191 196
192 197 In [1]: a = 'foo'
193 198
194 199 In [2]: a
195 200 Out[2]: 'foo'
196 201
197 202 In [3]: print a
198 203 foo
199 204
200 205 In [4]: 1 / 0
201 206
202 207
203 208 Support is also provided for IPython exceptions:
204 209
205 210 .. code-block:: rst
206 211
207 212 .. code-block:: ipythonconsole
208 213
209 214 In [1]: raise Exception
210 215
211 216 ---------------------------------------------------------------------------
212 217 Exception Traceback (most recent call last)
213 218 <ipython-input-1-fca2ab0ca76b> in <module>()
214 219 ----> 1 raise Exception
215 220
216 221 Exception:
217 222
218 223 """
219 224 name = 'IPython console session'
220 225 aliases = ['ipythonconsole']
221 226 mimetypes = ['text/x-ipython-console']
222 227
223 228 # The regexps used to determine what is input and what is output.
224 229 # The default prompts for IPython are:
225 230 #
226 231 # c.PromptManager.in_template = 'In [\#]: '
227 232 # c.PromptManager.in2_template = ' .\D.: '
228 233 # c.PromptManager.out_template = 'Out[\#]: '
229 234 #
230 235 in1_regex = r'In \[[0-9]+\]: '
231 236 in2_regex = r' \.\.+\.: '
232 237 out_regex = r'Out\[[0-9]+\]: '
233 238
234 239 #: The regex to determine when a traceback starts.
235 240 ipytb_start = re.compile(r'^(\^C)?(-+\n)|^( File)(.*)(, line )(\d+\n)')
236 241
237 242 def __init__(self, **options):
238 243 """Initialize the IPython console lexer.
239 244
240 245 Parameters
241 246 ----------
242 247 python3 : bool
243 248 If `True`, then the console inputs are parsed using a Python 3
244 249 lexer. Otherwise, they are parsed using a Python 2 lexer.
245 250 in1_regex : RegexObject
246 251 The compiled regular expression used to detect the start
247 252 of inputs. Although the IPython configuration setting may have a
248 253 trailing whitespace, do not include it in the regex. If `None`,
249 254 then the default input prompt is assumed.
250 255 in2_regex : RegexObject
251 256 The compiled regular expression used to detect the continuation
252 257 of inputs. Although the IPython configuration setting may have a
253 258 trailing whitespace, do not include it in the regex. If `None`,
254 259 then the default input prompt is assumed.
255 260 out_regex : RegexObject
256 261 The compiled regular expression used to detect outputs. If `None`,
257 262 then the default output prompt is assumed.
258 263
259 264 """
260 265 self.python3 = get_bool_opt(options, 'python3', False)
261 266 if self.python3:
262 267 self.aliases = ['ipython3console']
263 268 else:
264 269 self.aliases = ['ipython2console', 'ipythonconsole']
265 270
266 271 in1_regex = options.get('in1_regex', self.in1_regex)
267 272 in2_regex = options.get('in2_regex', self.in2_regex)
268 273 out_regex = options.get('out_regex', self.out_regex)
269 274
270 275 # So that we can work with input and output prompts which have been
271 276 # rstrip'd (possibly by editors) we also need rstrip'd variants. If
272 277 # we do not do this, then such prompts will be tagged as 'output'.
273 278 # The reason can't just use the rstrip'd variants instead is because
274 279 # we want any whitespace associated with the prompt to be inserted
275 280 # with the token. This allows formatted code to be modified so as hide
276 281 # the appearance of prompts, with the whitespace included. One example
277 282 # use of this is in copybutton.js from the standard lib Python docs.
278 283 in1_regex_rstrip = in1_regex.rstrip() + '\n'
279 284 in2_regex_rstrip = in2_regex.rstrip() + '\n'
280 285 out_regex_rstrip = out_regex.rstrip() + '\n'
281 286
282 287 # Compile and save them all.
283 288 attrs = ['in1_regex', 'in2_regex', 'out_regex',
284 289 'in1_regex_rstrip', 'in2_regex_rstrip', 'out_regex_rstrip']
285 290 for attr in attrs:
286 291 self.__setattr__(attr, re.compile(locals()[attr]))
287 292
288 293 Lexer.__init__(self, **options)
289 294
290 295 if self.python3:
291 296 pylexer = IPython3Lexer
292 297 tblexer = IPythonTracebackLexer
293 298 else:
294 299 pylexer = IPythonLexer
295 300 tblexer = IPythonTracebackLexer
296 301
297 302 self.pylexer = pylexer(**options)
298 303 self.tblexer = tblexer(**options)
299 304
300 305 self.reset()
301 306
302 307 def reset(self):
303 308 self.mode = 'output'
304 309 self.index = 0
305 310 self.buffer = u''
306 311 self.insertions = []
307 312
308 313 def buffered_tokens(self):
309 314 """
310 315 Generator of unprocessed tokens after doing insertions and before
311 316 changing to a new state.
312 317
313 318 """
314 319 if self.mode == 'output':
315 320 tokens = [(0, Generic.Output, self.buffer)]
316 321 elif self.mode == 'input':
317 322 tokens = self.pylexer.get_tokens_unprocessed(self.buffer)
318 323 else: # traceback
319 324 tokens = self.tblexer.get_tokens_unprocessed(self.buffer)
320 325
321 326 for i, t, v in do_insertions(self.insertions, tokens):
322 327 # All token indexes are relative to the buffer.
323 328 yield self.index + i, t, v
324 329
325 330 # Clear it all
326 331 self.index += len(self.buffer)
327 332 self.buffer = u''
328 333 self.insertions = []
329 334
330 335 def get_mci(self, line):
331 336 """
332 337 Parses the line and returns a 3-tuple: (mode, code, insertion).
333 338
334 339 `mode` is the next mode (or state) of the lexer, and is always equal
335 340 to 'input', 'output', or 'tb'.
336 341
337 342 `code` is a portion of the line that should be added to the buffer
338 343 corresponding to the next mode and eventually lexed by another lexer.
339 344 For example, `code` could be Python code if `mode` were 'input'.
340 345
341 346 `insertion` is a 3-tuple (index, token, text) representing an
342 347 unprocessed "token" that will be inserted into the stream of tokens
343 348 that are created from the buffer once we change modes. This is usually
344 349 the input or output prompt.
345 350
346 351 In general, the next mode depends on current mode and on the contents
347 352 of `line`.
348 353
349 354 """
350 355 # To reduce the number of regex match checks, we have multiple
351 356 # 'if' blocks instead of 'if-elif' blocks.
352 357
353 358 # Check for possible end of input
354 359 in2_match = self.in2_regex.match(line)
355 360 in2_match_rstrip = self.in2_regex_rstrip.match(line)
356 361 if (in2_match and in2_match.group().rstrip() == line.rstrip()) or \
357 362 in2_match_rstrip:
358 363 end_input = True
359 364 else:
360 365 end_input = False
361 366 if end_input and self.mode != 'tb':
362 367 # Only look for an end of input when not in tb mode.
363 368 # An ellipsis could appear within the traceback.
364 369 mode = 'output'
365 370 code = u''
366 371 insertion = (0, Generic.Prompt, line)
367 372 return mode, code, insertion
368 373
369 374 # Check for output prompt
370 375 out_match = self.out_regex.match(line)
371 376 out_match_rstrip = self.out_regex_rstrip.match(line)
372 377 if out_match or out_match_rstrip:
373 378 mode = 'output'
374 379 if out_match:
375 380 idx = out_match.end()
376 381 else:
377 382 idx = out_match_rstrip.end()
378 383 code = line[idx:]
379 384 # Use the 'heading' token for output. We cannot use Generic.Error
380 385 # since it would conflict with exceptions.
381 386 insertion = (0, Generic.Heading, line[:idx])
382 387 return mode, code, insertion
383 388
384 389
385 390 # Check for input or continuation prompt (non stripped version)
386 391 in1_match = self.in1_regex.match(line)
387 392 if in1_match or (in2_match and self.mode != 'tb'):
388 393 # New input or when not in tb, continued input.
389 394 # We do not check for continued input when in tb since it is
390 395 # allowable to replace a long stack with an ellipsis.
391 396 mode = 'input'
392 397 if in1_match:
393 398 idx = in1_match.end()
394 399 else: # in2_match
395 400 idx = in2_match.end()
396 401 code = line[idx:]
397 402 insertion = (0, Generic.Prompt, line[:idx])
398 403 return mode, code, insertion
399 404
400 405 # Check for input or continuation prompt (stripped version)
401 406 in1_match_rstrip = self.in1_regex_rstrip.match(line)
402 407 if in1_match_rstrip or (in2_match_rstrip and self.mode != 'tb'):
403 408 # New input or when not in tb, continued input.
404 409 # We do not check for continued input when in tb since it is
405 410 # allowable to replace a long stack with an ellipsis.
406 411 mode = 'input'
407 412 if in1_match_rstrip:
408 413 idx = in1_match_rstrip.end()
409 414 else: # in2_match
410 415 idx = in2_match_rstrip.end()
411 416 code = line[idx:]
412 417 insertion = (0, Generic.Prompt, line[:idx])
413 418 return mode, code, insertion
414 419
415 420 # Check for traceback
416 421 if self.ipytb_start.match(line):
417 422 mode = 'tb'
418 423 code = line
419 424 insertion = None
420 425 return mode, code, insertion
421 426
422 427 # All other stuff...
423 428 if self.mode in ('input', 'output'):
424 429 # We assume all other text is output. Multiline input that
425 430 # does not use the continuation marker cannot be detected.
426 431 # For example, the 3 in the following is clearly output:
427 432 #
428 433 # In [1]: print 3
429 434 # 3
430 435 #
431 436 # But the following second line is part of the input:
432 437 #
433 438 # In [2]: while True:
434 439 # print True
435 440 #
436 441 # In both cases, the 2nd line will be 'output'.
437 442 #
438 443 mode = 'output'
439 444 else:
440 445 mode = 'tb'
441 446
442 447 code = line
443 448 insertion = None
444 449
445 450 return mode, code, insertion
446 451
447 452 def get_tokens_unprocessed(self, text):
448 453 self.reset()
449 454 for match in line_re.finditer(text):
450 455 line = match.group()
451 456 mode, code, insertion = self.get_mci(line)
452 457
453 458 if mode != self.mode:
454 459 # Yield buffered tokens before transitioning to new mode.
455 460 for token in self.buffered_tokens():
456 461 yield token
457 462 self.mode = mode
458 463
459 464 if insertion:
460 465 self.insertions.append((len(self.buffer), [insertion]))
461 466 self.buffer += code
462 467 else:
463 468 for token in self.buffered_tokens():
464 469 yield token
465 470
466 471 class IPyLexer(Lexer):
467 472 """
468 473 Primary lexer for all IPython-like code.
469 474
470 475 This is a simple helper lexer. If the first line of the text begins with
471 476 "In \[[0-9]+\]:", then the entire text is parsed with an IPython console
472 477 lexer. If not, then the entire text is parsed with an IPython lexer.
473 478
474 479 The goal is to reduce the number of lexers that are registered
475 480 with Pygments.
476 481
477 482 """
478 483 name = 'IPy session'
479 484 aliases = ['ipy']
480 485
481 486 def __init__(self, **options):
482 487 self.python3 = get_bool_opt(options, 'python3', False)
483 488 if self.python3:
484 489 self.aliases = ['ipy3']
485 490 else:
486 491 self.aliases = ['ipy2', 'ipy']
487 492
488 493 Lexer.__init__(self, **options)
489 494
490 495 self.IPythonLexer = IPythonLexer(**options)
491 496 self.IPythonConsoleLexer = IPythonConsoleLexer(**options)
492 497
493 498 def get_tokens_unprocessed(self, text):
494 499 # Search for the input prompt anywhere...this allows code blocks to
495 500 # begin with comments as well.
496 501 if re.match(r'.*(In \[[0-9]+\]:)', text.strip(), re.DOTALL):
497 502 lex = self.IPythonConsoleLexer
498 503 else:
499 504 lex = self.IPythonLexer
500 505 for token in lex.get_tokens_unprocessed(text):
501 506 yield token
502 507
General Comments 0
You need to be logged in to leave comments. Login now