##// END OF EJS Templates
Apply 2to3 `next` fix....
Bradley M. Froehle -
Show More
@@ -1,933 +1,933 b''
1 1 """Word completion for IPython.
2 2
3 3 This module is a fork of the rlcompleter module in the Python standard
4 4 library. The original enhancements made to rlcompleter have been sent
5 5 upstream and were accepted as of Python 2.3, but we need a lot more
6 6 functionality specific to IPython, so this module will continue to live as an
7 7 IPython-specific utility.
8 8
9 9 Original rlcompleter documentation:
10 10
11 11 This requires the latest extension to the readline module (the
12 12 completes keywords, built-ins and globals in __main__; when completing
13 13 NAME.NAME..., it evaluates (!) the expression up to the last dot and
14 14 completes its attributes.
15 15
16 16 It's very cool to do "import string" type "string.", hit the
17 17 completion key (twice), and see the list of names defined by the
18 18 string module!
19 19
20 20 Tip: to use the tab key as the completion key, call
21 21
22 22 readline.parse_and_bind("tab: complete")
23 23
24 24 Notes:
25 25
26 26 - Exceptions raised by the completer function are *ignored* (and
27 27 generally cause the completion to fail). This is a feature -- since
28 28 readline sets the tty device in raw (or cbreak) mode, printing a
29 29 traceback wouldn't work well without some complicated hoopla to save,
30 30 reset and restore the tty state.
31 31
32 32 - The evaluation of the NAME.NAME... form may cause arbitrary
33 33 application defined code to be executed if an object with a
34 34 __getattr__ hook is found. Since it is the responsibility of the
35 35 application (or the user) to enable this feature, I consider this an
36 36 acceptable risk. More complicated expressions (e.g. function calls or
37 37 indexing operations) are *not* evaluated.
38 38
39 39 - GNU readline is also used by the built-in functions input() and
40 40 raw_input(), and thus these also benefit/suffer from the completer
41 41 features. Clearly an interactive application can benefit by
42 42 specifying its own completer function and using raw_input() for all
43 43 its input.
44 44
45 45 - When the original stdin is not a tty device, GNU readline is never
46 46 used, and this module (and the readline module) are silently inactive.
47 47 """
48 48
49 49 #*****************************************************************************
50 50 #
51 51 # Since this file is essentially a minimally modified copy of the rlcompleter
52 52 # module which is part of the standard Python distribution, I assume that the
53 53 # proper procedure is to maintain its copyright as belonging to the Python
54 54 # Software Foundation (in addition to my own, for all new code).
55 55 #
56 56 # Copyright (C) 2008 IPython Development Team
57 57 # Copyright (C) 2001 Fernando Perez. <fperez@colorado.edu>
58 58 # Copyright (C) 2001 Python Software Foundation, www.python.org
59 59 #
60 60 # Distributed under the terms of the BSD License. The full license is in
61 61 # the file COPYING, distributed as part of this software.
62 62 #
63 63 #*****************************************************************************
64 64
65 65 #-----------------------------------------------------------------------------
66 66 # Imports
67 67 #-----------------------------------------------------------------------------
68 68
69 69 import __builtin__
70 70 import __main__
71 71 import glob
72 72 import inspect
73 73 import itertools
74 74 import keyword
75 75 import os
76 76 import re
77 77 import shlex
78 78 import sys
79 79
80 80 from IPython.config.configurable import Configurable
81 81 from IPython.core.error import TryNext
82 82 from IPython.core.inputsplitter import ESC_MAGIC
83 83 from IPython.utils import generics
84 84 from IPython.utils import io
85 85 from IPython.utils.dir2 import dir2
86 86 from IPython.utils.process import arg_split
87 87 from IPython.utils.traitlets import CBool, Enum
88 88
89 89 #-----------------------------------------------------------------------------
90 90 # Globals
91 91 #-----------------------------------------------------------------------------
92 92
93 93 # Public API
94 94 __all__ = ['Completer','IPCompleter']
95 95
96 96 if sys.platform == 'win32':
97 97 PROTECTABLES = ' '
98 98 else:
99 99 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
100 100
101 101 #-----------------------------------------------------------------------------
102 102 # Main functions and classes
103 103 #-----------------------------------------------------------------------------
104 104
105 105 def has_open_quotes(s):
106 106 """Return whether a string has open quotes.
107 107
108 108 This simply counts whether the number of quote characters of either type in
109 109 the string is odd.
110 110
111 111 Returns
112 112 -------
113 113 If there is an open quote, the quote character is returned. Else, return
114 114 False.
115 115 """
116 116 # We check " first, then ', so complex cases with nested quotes will get
117 117 # the " to take precedence.
118 118 if s.count('"') % 2:
119 119 return '"'
120 120 elif s.count("'") % 2:
121 121 return "'"
122 122 else:
123 123 return False
124 124
125 125
126 126 def protect_filename(s):
127 127 """Escape a string to protect certain characters."""
128 128
129 129 return "".join([(ch in PROTECTABLES and '\\' + ch or ch)
130 130 for ch in s])
131 131
132 132 def expand_user(path):
133 133 """Expand '~'-style usernames in strings.
134 134
135 135 This is similar to :func:`os.path.expanduser`, but it computes and returns
136 136 extra information that will be useful if the input was being used in
137 137 computing completions, and you wish to return the completions with the
138 138 original '~' instead of its expanded value.
139 139
140 140 Parameters
141 141 ----------
142 142 path : str
143 143 String to be expanded. If no ~ is present, the output is the same as the
144 144 input.
145 145
146 146 Returns
147 147 -------
148 148 newpath : str
149 149 Result of ~ expansion in the input path.
150 150 tilde_expand : bool
151 151 Whether any expansion was performed or not.
152 152 tilde_val : str
153 153 The value that ~ was replaced with.
154 154 """
155 155 # Default values
156 156 tilde_expand = False
157 157 tilde_val = ''
158 158 newpath = path
159 159
160 160 if path.startswith('~'):
161 161 tilde_expand = True
162 162 rest = len(path)-1
163 163 newpath = os.path.expanduser(path)
164 164 if rest:
165 165 tilde_val = newpath[:-rest]
166 166 else:
167 167 tilde_val = newpath
168 168
169 169 return newpath, tilde_expand, tilde_val
170 170
171 171
172 172 def compress_user(path, tilde_expand, tilde_val):
173 173 """Does the opposite of expand_user, with its outputs.
174 174 """
175 175 if tilde_expand:
176 176 return path.replace(tilde_val, '~')
177 177 else:
178 178 return path
179 179
180 180
181 181 class Bunch(object): pass
182 182
183 183
184 184 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
185 185 GREEDY_DELIMS = ' \r\n'
186 186
187 187
188 188 class CompletionSplitter(object):
189 189 """An object to split an input line in a manner similar to readline.
190 190
191 191 By having our own implementation, we can expose readline-like completion in
192 192 a uniform manner to all frontends. This object only needs to be given the
193 193 line of text to be split and the cursor position on said line, and it
194 194 returns the 'word' to be completed on at the cursor after splitting the
195 195 entire line.
196 196
197 197 What characters are used as splitting delimiters can be controlled by
198 198 setting the `delims` attribute (this is a property that internally
199 199 automatically builds the necessary regular expression)"""
200 200
201 201 # Private interface
202 202
203 203 # A string of delimiter characters. The default value makes sense for
204 204 # IPython's most typical usage patterns.
205 205 _delims = DELIMS
206 206
207 207 # The expression (a normal string) to be compiled into a regular expression
208 208 # for actual splitting. We store it as an attribute mostly for ease of
209 209 # debugging, since this type of code can be so tricky to debug.
210 210 _delim_expr = None
211 211
212 212 # The regular expression that does the actual splitting
213 213 _delim_re = None
214 214
215 215 def __init__(self, delims=None):
216 216 delims = CompletionSplitter._delims if delims is None else delims
217 217 self.delims = delims
218 218
219 219 @property
220 220 def delims(self):
221 221 """Return the string of delimiter characters."""
222 222 return self._delims
223 223
224 224 @delims.setter
225 225 def delims(self, delims):
226 226 """Set the delimiters for line splitting."""
227 227 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
228 228 self._delim_re = re.compile(expr)
229 229 self._delims = delims
230 230 self._delim_expr = expr
231 231
232 232 def split_line(self, line, cursor_pos=None):
233 233 """Split a line of text with a cursor at the given position.
234 234 """
235 235 l = line if cursor_pos is None else line[:cursor_pos]
236 236 return self._delim_re.split(l)[-1]
237 237
238 238
239 239 class Completer(Configurable):
240 240
241 241 greedy = CBool(False, config=True,
242 242 help="""Activate greedy completion
243 243
244 244 This will enable completion on elements of lists, results of function calls, etc.,
245 245 but can be unsafe because the code is actually evaluated on TAB.
246 246 """
247 247 )
248 248
249 249
250 250 def __init__(self, namespace=None, global_namespace=None, config=None, **kwargs):
251 251 """Create a new completer for the command line.
252 252
253 253 Completer(namespace=ns,global_namespace=ns2) -> completer instance.
254 254
255 255 If unspecified, the default namespace where completions are performed
256 256 is __main__ (technically, __main__.__dict__). Namespaces should be
257 257 given as dictionaries.
258 258
259 259 An optional second namespace can be given. This allows the completer
260 260 to handle cases where both the local and global scopes need to be
261 261 distinguished.
262 262
263 263 Completer instances should be used as the completion mechanism of
264 264 readline via the set_completer() call:
265 265
266 266 readline.set_completer(Completer(my_namespace).complete)
267 267 """
268 268
269 269 # Don't bind to namespace quite yet, but flag whether the user wants a
270 270 # specific namespace or to use __main__.__dict__. This will allow us
271 271 # to bind to __main__.__dict__ at completion time, not now.
272 272 if namespace is None:
273 273 self.use_main_ns = 1
274 274 else:
275 275 self.use_main_ns = 0
276 276 self.namespace = namespace
277 277
278 278 # The global namespace, if given, can be bound directly
279 279 if global_namespace is None:
280 280 self.global_namespace = {}
281 281 else:
282 282 self.global_namespace = global_namespace
283 283
284 284 super(Completer, self).__init__(config=config, **kwargs)
285 285
286 286 def complete(self, text, state):
287 287 """Return the next possible completion for 'text'.
288 288
289 289 This is called successively with state == 0, 1, 2, ... until it
290 290 returns None. The completion should begin with 'text'.
291 291
292 292 """
293 293 if self.use_main_ns:
294 294 self.namespace = __main__.__dict__
295 295
296 296 if state == 0:
297 297 if "." in text:
298 298 self.matches = self.attr_matches(text)
299 299 else:
300 300 self.matches = self.global_matches(text)
301 301 try:
302 302 return self.matches[state]
303 303 except IndexError:
304 304 return None
305 305
306 306 def global_matches(self, text):
307 307 """Compute matches when text is a simple name.
308 308
309 309 Return a list of all keywords, built-in functions and names currently
310 310 defined in self.namespace or self.global_namespace that match.
311 311
312 312 """
313 313 #print 'Completer->global_matches, txt=%r' % text # dbg
314 314 matches = []
315 315 match_append = matches.append
316 316 n = len(text)
317 317 for lst in [keyword.kwlist,
318 318 __builtin__.__dict__.keys(),
319 319 self.namespace.keys(),
320 320 self.global_namespace.keys()]:
321 321 for word in lst:
322 322 if word[:n] == text and word != "__builtins__":
323 323 match_append(word)
324 324 return matches
325 325
326 326 def attr_matches(self, text):
327 327 """Compute matches when text contains a dot.
328 328
329 329 Assuming the text is of the form NAME.NAME....[NAME], and is
330 330 evaluatable in self.namespace or self.global_namespace, it will be
331 331 evaluated and its attributes (as revealed by dir()) are used as
332 332 possible completions. (For class instances, class members are are
333 333 also considered.)
334 334
335 335 WARNING: this can still invoke arbitrary C code, if an object
336 336 with a __getattr__ hook is evaluated.
337 337
338 338 """
339 339
340 340 #io.rprint('Completer->attr_matches, txt=%r' % text) # dbg
341 341 # Another option, seems to work great. Catches things like ''.<tab>
342 342 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
343 343
344 344 if m:
345 345 expr, attr = m.group(1, 3)
346 346 elif self.greedy:
347 347 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
348 348 if not m2:
349 349 return []
350 350 expr, attr = m2.group(1,2)
351 351 else:
352 352 return []
353 353
354 354 try:
355 355 obj = eval(expr, self.namespace)
356 356 except:
357 357 try:
358 358 obj = eval(expr, self.global_namespace)
359 359 except:
360 360 return []
361 361
362 362 if self.limit_to__all__ and hasattr(obj, '__all__'):
363 363 words = get__all__entries(obj)
364 364 else:
365 365 words = dir2(obj)
366 366
367 367 try:
368 368 words = generics.complete_object(obj, words)
369 369 except TryNext:
370 370 pass
371 371 except Exception:
372 372 # Silence errors from completion function
373 373 #raise # dbg
374 374 pass
375 375 # Build match list to return
376 376 n = len(attr)
377 377 res = ["%s.%s" % (expr, w) for w in words if w[:n] == attr ]
378 378 return res
379 379
380 380
381 381 def get__all__entries(obj):
382 382 """returns the strings in the __all__ attribute"""
383 383 try:
384 384 words = getattr(obj, '__all__')
385 385 except:
386 386 return []
387 387
388 388 return [w for w in words if isinstance(w, basestring)]
389 389
390 390
391 391 class IPCompleter(Completer):
392 392 """Extension of the completer class with IPython-specific features"""
393 393
394 394 def _greedy_changed(self, name, old, new):
395 395 """update the splitter and readline delims when greedy is changed"""
396 396 if new:
397 397 self.splitter.delims = GREEDY_DELIMS
398 398 else:
399 399 self.splitter.delims = DELIMS
400 400
401 401 if self.readline:
402 402 self.readline.set_completer_delims(self.splitter.delims)
403 403
404 404 merge_completions = CBool(True, config=True,
405 405 help="""Whether to merge completion results into a single list
406 406
407 407 If False, only the completion results from the first non-empty
408 408 completer will be returned.
409 409 """
410 410 )
411 411 omit__names = Enum((0,1,2), default_value=2, config=True,
412 412 help="""Instruct the completer to omit private method names
413 413
414 414 Specifically, when completing on ``object.<tab>``.
415 415
416 416 When 2 [default]: all names that start with '_' will be excluded.
417 417
418 418 When 1: all 'magic' names (``__foo__``) will be excluded.
419 419
420 420 When 0: nothing will be excluded.
421 421 """
422 422 )
423 423 limit_to__all__ = CBool(default_value=False, config=True,
424 424 help="""Instruct the completer to use __all__ for the completion
425 425
426 426 Specifically, when completing on ``object.<tab>``.
427 427
428 428 When True: only those names in obj.__all__ will be included.
429 429
430 430 When False [default]: the __all__ attribute is ignored
431 431 """
432 432 )
433 433
434 434 def __init__(self, shell=None, namespace=None, global_namespace=None,
435 435 alias_table=None, use_readline=True,
436 436 config=None, **kwargs):
437 437 """IPCompleter() -> completer
438 438
439 439 Return a completer object suitable for use by the readline library
440 440 via readline.set_completer().
441 441
442 442 Inputs:
443 443
444 444 - shell: a pointer to the ipython shell itself. This is needed
445 445 because this completer knows about magic functions, and those can
446 446 only be accessed via the ipython instance.
447 447
448 448 - namespace: an optional dict where completions are performed.
449 449
450 450 - global_namespace: secondary optional dict for completions, to
451 451 handle cases (such as IPython embedded inside functions) where
452 452 both Python scopes are visible.
453 453
454 454 - If alias_table is supplied, it should be a dictionary of aliases
455 455 to complete.
456 456
457 457 use_readline : bool, optional
458 458 If true, use the readline library. This completer can still function
459 459 without readline, though in that case callers must provide some extra
460 460 information on each call about the current line."""
461 461
462 462 self.magic_escape = ESC_MAGIC
463 463 self.splitter = CompletionSplitter()
464 464
465 465 # Readline configuration, only used by the rlcompleter method.
466 466 if use_readline:
467 467 # We store the right version of readline so that later code
468 468 import IPython.utils.rlineimpl as readline
469 469 self.readline = readline
470 470 else:
471 471 self.readline = None
472 472
473 473 # _greedy_changed() depends on splitter and readline being defined:
474 474 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
475 475 config=config, **kwargs)
476 476
477 477 # List where completion matches will be stored
478 478 self.matches = []
479 479 self.shell = shell
480 480 if alias_table is None:
481 481 alias_table = {}
482 482 self.alias_table = alias_table
483 483 # Regexp to split filenames with spaces in them
484 484 self.space_name_re = re.compile(r'([^\\] )')
485 485 # Hold a local ref. to glob.glob for speed
486 486 self.glob = glob.glob
487 487
488 488 # Determine if we are running on 'dumb' terminals, like (X)Emacs
489 489 # buffers, to avoid completion problems.
490 490 term = os.environ.get('TERM','xterm')
491 491 self.dumb_terminal = term in ['dumb','emacs']
492 492
493 493 # Special handling of backslashes needed in win32 platforms
494 494 if sys.platform == "win32":
495 495 self.clean_glob = self._clean_glob_win32
496 496 else:
497 497 self.clean_glob = self._clean_glob
498 498
499 499 # All active matcher routines for completion
500 500 self.matchers = [self.python_matches,
501 501 self.file_matches,
502 502 self.magic_matches,
503 503 self.alias_matches,
504 504 self.python_func_kw_matches,
505 505 ]
506 506
507 507 def all_completions(self, text):
508 508 """
509 509 Wrapper around the complete method for the benefit of emacs
510 510 and pydb.
511 511 """
512 512 return self.complete(text)[1]
513 513
514 514 def _clean_glob(self,text):
515 515 return self.glob("%s*" % text)
516 516
517 517 def _clean_glob_win32(self,text):
518 518 return [f.replace("\\","/")
519 519 for f in self.glob("%s*" % text)]
520 520
521 521 def file_matches(self, text):
522 522 """Match filenames, expanding ~USER type strings.
523 523
524 524 Most of the seemingly convoluted logic in this completer is an
525 525 attempt to handle filenames with spaces in them. And yet it's not
526 526 quite perfect, because Python's readline doesn't expose all of the
527 527 GNU readline details needed for this to be done correctly.
528 528
529 529 For a filename with a space in it, the printed completions will be
530 530 only the parts after what's already been typed (instead of the
531 531 full completions, as is normally done). I don't think with the
532 532 current (as of Python 2.3) Python readline it's possible to do
533 533 better."""
534 534
535 535 #io.rprint('Completer->file_matches: <%r>' % text) # dbg
536 536
537 537 # chars that require escaping with backslash - i.e. chars
538 538 # that readline treats incorrectly as delimiters, but we
539 539 # don't want to treat as delimiters in filename matching
540 540 # when escaped with backslash
541 541 if text.startswith('!'):
542 542 text = text[1:]
543 543 text_prefix = '!'
544 544 else:
545 545 text_prefix = ''
546 546
547 547 text_until_cursor = self.text_until_cursor
548 548 # track strings with open quotes
549 549 open_quotes = has_open_quotes(text_until_cursor)
550 550
551 551 if '(' in text_until_cursor or '[' in text_until_cursor:
552 552 lsplit = text
553 553 else:
554 554 try:
555 555 # arg_split ~ shlex.split, but with unicode bugs fixed by us
556 556 lsplit = arg_split(text_until_cursor)[-1]
557 557 except ValueError:
558 558 # typically an unmatched ", or backslash without escaped char.
559 559 if open_quotes:
560 560 lsplit = text_until_cursor.split(open_quotes)[-1]
561 561 else:
562 562 return []
563 563 except IndexError:
564 564 # tab pressed on empty line
565 565 lsplit = ""
566 566
567 567 if not open_quotes and lsplit != protect_filename(lsplit):
568 568 # if protectables are found, do matching on the whole escaped name
569 569 has_protectables = True
570 570 text0,text = text,lsplit
571 571 else:
572 572 has_protectables = False
573 573 text = os.path.expanduser(text)
574 574
575 575 if text == "":
576 576 return [text_prefix + protect_filename(f) for f in self.glob("*")]
577 577
578 578 # Compute the matches from the filesystem
579 579 m0 = self.clean_glob(text.replace('\\',''))
580 580
581 581 if has_protectables:
582 582 # If we had protectables, we need to revert our changes to the
583 583 # beginning of filename so that we don't double-write the part
584 584 # of the filename we have so far
585 585 len_lsplit = len(lsplit)
586 586 matches = [text_prefix + text0 +
587 587 protect_filename(f[len_lsplit:]) for f in m0]
588 588 else:
589 589 if open_quotes:
590 590 # if we have a string with an open quote, we don't need to
591 591 # protect the names at all (and we _shouldn't_, as it
592 592 # would cause bugs when the filesystem call is made).
593 593 matches = m0
594 594 else:
595 595 matches = [text_prefix +
596 596 protect_filename(f) for f in m0]
597 597
598 598 #io.rprint('mm', matches) # dbg
599 599
600 600 # Mark directories in input list by appending '/' to their names.
601 601 matches = [x+'/' if os.path.isdir(x) else x for x in matches]
602 602 return matches
603 603
604 604 def magic_matches(self, text):
605 605 """Match magics"""
606 606 #print 'Completer->magic_matches:',text,'lb',self.text_until_cursor # dbg
607 607 # Get all shell magics now rather than statically, so magics loaded at
608 608 # runtime show up too.
609 609 lsm = self.shell.magics_manager.lsmagic()
610 610 line_magics = lsm['line']
611 611 cell_magics = lsm['cell']
612 612 pre = self.magic_escape
613 613 pre2 = pre+pre
614 614
615 615 # Completion logic:
616 616 # - user gives %%: only do cell magics
617 617 # - user gives %: do both line and cell magics
618 618 # - no prefix: do both
619 619 # In other words, line magics are skipped if the user gives %% explicitly
620 620 bare_text = text.lstrip(pre)
621 621 comp = [ pre2+m for m in cell_magics if m.startswith(bare_text)]
622 622 if not text.startswith(pre2):
623 623 comp += [ pre+m for m in line_magics if m.startswith(bare_text)]
624 624 return comp
625 625
626 626 def alias_matches(self, text):
627 627 """Match internal system aliases"""
628 628 #print 'Completer->alias_matches:',text,'lb',self.text_until_cursor # dbg
629 629
630 630 # if we are not in the first 'item', alias matching
631 631 # doesn't make sense - unless we are starting with 'sudo' command.
632 632 main_text = self.text_until_cursor.lstrip()
633 633 if ' ' in main_text and not main_text.startswith('sudo'):
634 634 return []
635 635 text = os.path.expanduser(text)
636 636 aliases = self.alias_table.keys()
637 637 if text == '':
638 638 return aliases
639 639 else:
640 640 return [a for a in aliases if a.startswith(text)]
641 641
642 642 def python_matches(self,text):
643 643 """Match attributes or global python names"""
644 644
645 645 #io.rprint('Completer->python_matches, txt=%r' % text) # dbg
646 646 if "." in text:
647 647 try:
648 648 matches = self.attr_matches(text)
649 649 if text.endswith('.') and self.omit__names:
650 650 if self.omit__names == 1:
651 651 # true if txt is _not_ a __ name, false otherwise:
652 652 no__name = (lambda txt:
653 653 re.match(r'.*\.__.*?__',txt) is None)
654 654 else:
655 655 # true if txt is _not_ a _ name, false otherwise:
656 656 no__name = (lambda txt:
657 657 re.match(r'.*\._.*?',txt) is None)
658 658 matches = filter(no__name, matches)
659 659 except NameError:
660 660 # catches <undefined attributes>.<tab>
661 661 matches = []
662 662 else:
663 663 matches = self.global_matches(text)
664 664
665 665 return matches
666 666
667 667 def _default_arguments(self, obj):
668 668 """Return the list of default arguments of obj if it is callable,
669 669 or empty list otherwise."""
670 670
671 671 if not (inspect.isfunction(obj) or inspect.ismethod(obj)):
672 672 # for classes, check for __init__,__new__
673 673 if inspect.isclass(obj):
674 674 obj = (getattr(obj,'__init__',None) or
675 675 getattr(obj,'__new__',None))
676 676 # for all others, check if they are __call__able
677 677 elif hasattr(obj, '__call__'):
678 678 obj = obj.__call__
679 679 # XXX: is there a way to handle the builtins ?
680 680 try:
681 681 args,_,_1,defaults = inspect.getargspec(obj)
682 682 if defaults:
683 683 return args[-len(defaults):]
684 684 except TypeError: pass
685 685 return []
686 686
687 687 def python_func_kw_matches(self,text):
688 688 """Match named parameters (kwargs) of the last open function"""
689 689
690 690 if "." in text: # a parameter cannot be dotted
691 691 return []
692 692 try: regexp = self.__funcParamsRegex
693 693 except AttributeError:
694 694 regexp = self.__funcParamsRegex = re.compile(r'''
695 695 '.*?(?<!\\)' | # single quoted strings or
696 696 ".*?(?<!\\)" | # double quoted strings or
697 697 \w+ | # identifier
698 698 \S # other characters
699 699 ''', re.VERBOSE | re.DOTALL)
700 700 # 1. find the nearest identifier that comes before an unclosed
701 701 # parenthesis before the cursor
702 702 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
703 703 tokens = regexp.findall(self.text_until_cursor)
704 704 tokens.reverse()
705 705 iterTokens = iter(tokens); openPar = 0
706 706 for token in iterTokens:
707 707 if token == ')':
708 708 openPar -= 1
709 709 elif token == '(':
710 710 openPar += 1
711 711 if openPar > 0:
712 712 # found the last unclosed parenthesis
713 713 break
714 714 else:
715 715 return []
716 716 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
717 717 ids = []
718 718 isId = re.compile(r'\w+$').match
719 719 while True:
720 720 try:
721 ids.append(iterTokens.next())
721 ids.append(next(iterTokens))
722 722 if not isId(ids[-1]):
723 723 ids.pop(); break
724 if not iterTokens.next() == '.':
724 if not next(iterTokens) == '.':
725 725 break
726 726 except StopIteration:
727 727 break
728 728 # lookup the candidate callable matches either using global_matches
729 729 # or attr_matches for dotted names
730 730 if len(ids) == 1:
731 731 callableMatches = self.global_matches(ids[0])
732 732 else:
733 733 callableMatches = self.attr_matches('.'.join(ids[::-1]))
734 734 argMatches = []
735 735 for callableMatch in callableMatches:
736 736 try:
737 737 namedArgs = self._default_arguments(eval(callableMatch,
738 738 self.namespace))
739 739 except:
740 740 continue
741 741 for namedArg in namedArgs:
742 742 if namedArg.startswith(text):
743 743 argMatches.append("%s=" %namedArg)
744 744 return argMatches
745 745
746 746 def dispatch_custom_completer(self, text):
747 747 #io.rprint("Custom! '%s' %s" % (text, self.custom_completers)) # dbg
748 748 line = self.line_buffer
749 749 if not line.strip():
750 750 return None
751 751
752 752 # Create a little structure to pass all the relevant information about
753 753 # the current completion to any custom completer.
754 754 event = Bunch()
755 755 event.line = line
756 756 event.symbol = text
757 757 cmd = line.split(None,1)[0]
758 758 event.command = cmd
759 759 event.text_until_cursor = self.text_until_cursor
760 760
761 761 #print "\ncustom:{%s]\n" % event # dbg
762 762
763 763 # for foo etc, try also to find completer for %foo
764 764 if not cmd.startswith(self.magic_escape):
765 765 try_magic = self.custom_completers.s_matches(
766 766 self.magic_escape + cmd)
767 767 else:
768 768 try_magic = []
769 769
770 770 for c in itertools.chain(self.custom_completers.s_matches(cmd),
771 771 try_magic,
772 772 self.custom_completers.flat_matches(self.text_until_cursor)):
773 773 #print "try",c # dbg
774 774 try:
775 775 res = c(event)
776 776 if res:
777 777 # first, try case sensitive match
778 778 withcase = [r for r in res if r.startswith(text)]
779 779 if withcase:
780 780 return withcase
781 781 # if none, then case insensitive ones are ok too
782 782 text_low = text.lower()
783 783 return [r for r in res if r.lower().startswith(text_low)]
784 784 except TryNext:
785 785 pass
786 786
787 787 return None
788 788
789 789 def complete(self, text=None, line_buffer=None, cursor_pos=None):
790 790 """Find completions for the given text and line context.
791 791
792 792 This is called successively with state == 0, 1, 2, ... until it
793 793 returns None. The completion should begin with 'text'.
794 794
795 795 Note that both the text and the line_buffer are optional, but at least
796 796 one of them must be given.
797 797
798 798 Parameters
799 799 ----------
800 800 text : string, optional
801 801 Text to perform the completion on. If not given, the line buffer
802 802 is split using the instance's CompletionSplitter object.
803 803
804 804 line_buffer : string, optional
805 805 If not given, the completer attempts to obtain the current line
806 806 buffer via readline. This keyword allows clients which are
807 807 requesting for text completions in non-readline contexts to inform
808 808 the completer of the entire text.
809 809
810 810 cursor_pos : int, optional
811 811 Index of the cursor in the full line buffer. Should be provided by
812 812 remote frontends where kernel has no access to frontend state.
813 813
814 814 Returns
815 815 -------
816 816 text : str
817 817 Text that was actually used in the completion.
818 818
819 819 matches : list
820 820 A list of completion matches.
821 821 """
822 822 #io.rprint('\nCOMP1 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
823 823
824 824 # if the cursor position isn't given, the only sane assumption we can
825 825 # make is that it's at the end of the line (the common case)
826 826 if cursor_pos is None:
827 827 cursor_pos = len(line_buffer) if text is None else len(text)
828 828
829 829 # if text is either None or an empty string, rely on the line buffer
830 830 if not text:
831 831 text = self.splitter.split_line(line_buffer, cursor_pos)
832 832
833 833 # If no line buffer is given, assume the input text is all there was
834 834 if line_buffer is None:
835 835 line_buffer = text
836 836
837 837 self.line_buffer = line_buffer
838 838 self.text_until_cursor = self.line_buffer[:cursor_pos]
839 839 #io.rprint('COMP2 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
840 840
841 841 # Start with a clean slate of completions
842 842 self.matches[:] = []
843 843 custom_res = self.dispatch_custom_completer(text)
844 844 if custom_res is not None:
845 845 # did custom completers produce something?
846 846 self.matches = custom_res
847 847 else:
848 848 # Extend the list of completions with the results of each
849 849 # matcher, so we return results to the user from all
850 850 # namespaces.
851 851 if self.merge_completions:
852 852 self.matches = []
853 853 for matcher in self.matchers:
854 854 try:
855 855 self.matches.extend(matcher(text))
856 856 except:
857 857 # Show the ugly traceback if the matcher causes an
858 858 # exception, but do NOT crash the kernel!
859 859 sys.excepthook(*sys.exc_info())
860 860 else:
861 861 for matcher in self.matchers:
862 862 self.matches = matcher(text)
863 863 if self.matches:
864 864 break
865 865 # FIXME: we should extend our api to return a dict with completions for
866 866 # different types of objects. The rlcomplete() method could then
867 867 # simply collapse the dict into a list for readline, but we'd have
868 868 # richer completion semantics in other evironments.
869 869 self.matches = sorted(set(self.matches))
870 870 #io.rprint('COMP TEXT, MATCHES: %r, %r' % (text, self.matches)) # dbg
871 871 return text, self.matches
872 872
873 873 def rlcomplete(self, text, state):
874 874 """Return the state-th possible completion for 'text'.
875 875
876 876 This is called successively with state == 0, 1, 2, ... until it
877 877 returns None. The completion should begin with 'text'.
878 878
879 879 Parameters
880 880 ----------
881 881 text : string
882 882 Text to perform the completion on.
883 883
884 884 state : int
885 885 Counter used by readline.
886 886 """
887 887 if state==0:
888 888
889 889 self.line_buffer = line_buffer = self.readline.get_line_buffer()
890 890 cursor_pos = self.readline.get_endidx()
891 891
892 892 #io.rprint("\nRLCOMPLETE: %r %r %r" %
893 893 # (text, line_buffer, cursor_pos) ) # dbg
894 894
895 895 # if there is only a tab on a line with only whitespace, instead of
896 896 # the mostly useless 'do you want to see all million completions'
897 897 # message, just do the right thing and give the user his tab!
898 898 # Incidentally, this enables pasting of tabbed text from an editor
899 899 # (as long as autoindent is off).
900 900
901 901 # It should be noted that at least pyreadline still shows file
902 902 # completions - is there a way around it?
903 903
904 904 # don't apply this on 'dumb' terminals, such as emacs buffers, so
905 905 # we don't interfere with their own tab-completion mechanism.
906 906 if not (self.dumb_terminal or line_buffer.strip()):
907 907 self.readline.insert_text('\t')
908 908 sys.stdout.flush()
909 909 return None
910 910
911 911 # Note: debugging exceptions that may occur in completion is very
912 912 # tricky, because readline unconditionally silences them. So if
913 913 # during development you suspect a bug in the completion code, turn
914 914 # this flag on temporarily by uncommenting the second form (don't
915 915 # flip the value in the first line, as the '# dbg' marker can be
916 916 # automatically detected and is used elsewhere).
917 917 DEBUG = False
918 918 #DEBUG = True # dbg
919 919 if DEBUG:
920 920 try:
921 921 self.complete(text, line_buffer, cursor_pos)
922 922 except:
923 923 import traceback; traceback.print_exc()
924 924 else:
925 925 # The normal production version is here
926 926
927 927 # This method computes the self.matches array
928 928 self.complete(text, line_buffer, cursor_pos)
929 929
930 930 try:
931 931 return self.matches[state]
932 932 except IndexError:
933 933 return None
@@ -1,1900 +1,1903 b''
1 1 """Pexpect is a Python module for spawning child applications and controlling
2 2 them automatically. Pexpect can be used for automating interactive applications
3 3 such as ssh, ftp, passwd, telnet, etc. It can be used to a automate setup
4 4 scripts for duplicating software package installations on different servers. It
5 5 can be used for automated software testing. Pexpect is in the spirit of Don
6 6 Libes' Expect, but Pexpect is pure Python. Other Expect-like modules for Python
7 7 require TCL and Expect or require C extensions to be compiled. Pexpect does not
8 8 use C, Expect, or TCL extensions. It should work on any platform that supports
9 9 the standard Python pty module. The Pexpect interface focuses on ease of use so
10 10 that simple tasks are easy.
11 11
12 12 There are two main interfaces to the Pexpect system; these are the function,
13 13 run() and the class, spawn. The spawn class is more powerful. The run()
14 14 function is simpler than spawn, and is good for quickly calling program. When
15 15 you call the run() function it executes a given program and then returns the
16 16 output. This is a handy replacement for os.system().
17 17
18 18 For example::
19 19
20 20 pexpect.run('ls -la')
21 21
22 22 The spawn class is the more powerful interface to the Pexpect system. You can
23 23 use this to spawn a child program then interact with it by sending input and
24 24 expecting responses (waiting for patterns in the child's output).
25 25
26 26 For example::
27 27
28 28 child = pexpect.spawn('scp foo myname@host.example.com:.')
29 29 child.expect ('Password:')
30 30 child.sendline (mypassword)
31 31
32 32 This works even for commands that ask for passwords or other input outside of
33 33 the normal stdio streams. For example, ssh reads input directly from the TTY
34 34 device which bypasses stdin.
35 35
36 36 Credits: Noah Spurrier, Richard Holden, Marco Molteni, Kimberley Burchett,
37 37 Robert Stone, Hartmut Goebel, Chad Schroeder, Erick Tryzelaar, Dave Kirby, Ids
38 38 vander Molen, George Todd, Noel Taylor, Nicolas D. Cesar, Alexander Gattin,
39 39 Jacques-Etienne Baudoux, Geoffrey Marshall, Francisco Lourenco, Glen Mabey,
40 40 Karthik Gurusamy, Fernando Perez, Corey Minyard, Jon Cohen, Guillaume
41 41 Chazarain, Andrew Ryan, Nick Craig-Wood, Andrew Stone, Jorgen Grahn, John
42 42 Spiegel, Jan Grant, Shane Kerr and Thomas Kluyver. Let me know if I forgot anyone.
43 43
44 44 Pexpect is free, open source, and all that good stuff.
45 45
46 46 Permission is hereby granted, free of charge, to any person obtaining a copy of
47 47 this software and associated documentation files (the "Software"), to deal in
48 48 the Software without restriction, including without limitation the rights to
49 49 use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
50 50 of the Software, and to permit persons to whom the Software is furnished to do
51 51 so, subject to the following conditions:
52 52
53 53 The above copyright notice and this permission notice shall be included in all
54 54 copies or substantial portions of the Software.
55 55
56 56 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
57 57 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
58 58 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
59 59 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
60 60 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
61 61 OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
62 62 SOFTWARE.
63 63
64 64 Pexpect Copyright (c) 2008-2011 Noah Spurrier
65 65 http://pexpect.sourceforge.net/
66 66 """
67 67
68 68 try:
69 69 import os, sys, time
70 70 import select
71 71 import re
72 72 import struct
73 73 import resource
74 74 import types
75 75 import pty
76 76 import tty
77 77 import termios
78 78 import fcntl
79 79 import errno
80 80 import traceback
81 81 import signal
82 82 except ImportError as e:
83 83 raise ImportError (str(e) + """
84 84
85 85 A critical module was not found. Probably this operating system does not
86 86 support it. Pexpect is intended for UNIX-like operating systems.""")
87 87
88 88 __version__ = '2.6.dev'
89 89 version = __version__
90 90 version_info = (2,6,'dev')
91 91 __all__ = ['ExceptionPexpect', 'EOF', 'TIMEOUT', 'spawn', 'spawnb', 'run', 'which',
92 92 'split_command_line', '__version__']
93 93
94 94 # Exception classes used by this module.
95 95 class ExceptionPexpect(Exception):
96 96
97 97 """Base class for all exceptions raised by this module.
98 98 """
99 99
100 100 def __init__(self, value):
101 101
102 102 self.value = value
103 103
104 104 def __str__(self):
105 105
106 106 return str(self.value)
107 107
108 108 def get_trace(self):
109 109
110 110 """This returns an abbreviated stack trace with lines that only concern
111 111 the caller. In other words, the stack trace inside the Pexpect module
112 112 is not included. """
113 113
114 114 tblist = traceback.extract_tb(sys.exc_info()[2])
115 115 #tblist = filter(self.__filter_not_pexpect, tblist)
116 116 tblist = [item for item in tblist if self.__filter_not_pexpect(item)]
117 117 tblist = traceback.format_list(tblist)
118 118 return ''.join(tblist)
119 119
120 120 def __filter_not_pexpect(self, trace_list_item):
121 121
122 122 """This returns True if list item 0 the string 'pexpect.py' in it. """
123 123
124 124 if trace_list_item[0].find('pexpect.py') == -1:
125 125 return True
126 126 else:
127 127 return False
128 128
129 129 class EOF(ExceptionPexpect):
130 130
131 131 """Raised when EOF is read from a child. This usually means the child has exited."""
132 132
133 133 class TIMEOUT(ExceptionPexpect):
134 134
135 135 """Raised when a read time exceeds the timeout. """
136 136
137 137 ##class TIMEOUT_PATTERN(TIMEOUT):
138 138 ## """Raised when the pattern match time exceeds the timeout.
139 139 ## This is different than a read TIMEOUT because the child process may
140 140 ## give output, thus never give a TIMEOUT, but the output
141 141 ## may never match a pattern.
142 142 ## """
143 143 ##class MAXBUFFER(ExceptionPexpect):
144 144 ## """Raised when a scan buffer fills before matching an expected pattern."""
145 145
146 146 PY3 = (sys.version_info[0] >= 3)
147 147
148 148 def _cast_bytes(s, enc):
149 149 if isinstance(s, unicode):
150 150 return s.encode(enc)
151 151 return s
152 152
153 153 def _cast_unicode(s, enc):
154 154 if isinstance(s, bytes):
155 155 return s.decode(enc)
156 156 return s
157 157
158 158 re_type = type(re.compile(''))
159 159
160 160 def run (command, timeout=-1, withexitstatus=False, events=None, extra_args=None,
161 161 logfile=None, cwd=None, env=None, encoding='utf-8'):
162 162
163 163 """
164 164 This function runs the given command; waits for it to finish; then
165 165 returns all output as a string. STDERR is included in output. If the full
166 166 path to the command is not given then the path is searched.
167 167
168 168 Note that lines are terminated by CR/LF (\\r\\n) combination even on
169 169 UNIX-like systems because this is the standard for pseudo ttys. If you set
170 170 'withexitstatus' to true, then run will return a tuple of (command_output,
171 171 exitstatus). If 'withexitstatus' is false then this returns just
172 172 command_output.
173 173
174 174 The run() function can often be used instead of creating a spawn instance.
175 175 For example, the following code uses spawn::
176 176
177 177 from pexpect import *
178 178 child = spawn('scp foo myname@host.example.com:.')
179 179 child.expect ('(?i)password')
180 180 child.sendline (mypassword)
181 181
182 182 The previous code can be replace with the following::
183 183
184 184 from pexpect import *
185 185 run ('scp foo myname@host.example.com:.', events={'(?i)password': mypassword})
186 186
187 187 Examples
188 188 ========
189 189
190 190 Start the apache daemon on the local machine::
191 191
192 192 from pexpect import *
193 193 run ("/usr/local/apache/bin/apachectl start")
194 194
195 195 Check in a file using SVN::
196 196
197 197 from pexpect import *
198 198 run ("svn ci -m 'automatic commit' my_file.py")
199 199
200 200 Run a command and capture exit status::
201 201
202 202 from pexpect import *
203 203 (command_output, exitstatus) = run ('ls -l /bin', withexitstatus=1)
204 204
205 205 Tricky Examples
206 206 ===============
207 207
208 208 The following will run SSH and execute 'ls -l' on the remote machine. The
209 209 password 'secret' will be sent if the '(?i)password' pattern is ever seen::
210 210
211 211 run ("ssh username@machine.example.com 'ls -l'", events={'(?i)password':'secret\\n'})
212 212
213 213 This will start mencoder to rip a video from DVD. This will also display
214 214 progress ticks every 5 seconds as it runs. For example::
215 215
216 216 from pexpect import *
217 217 def print_ticks(d):
218 218 print d['event_count'],
219 219 run ("mencoder dvd://1 -o video.avi -oac copy -ovc copy", events={TIMEOUT:print_ticks}, timeout=5)
220 220
221 221 The 'events' argument should be a dictionary of patterns and responses.
222 222 Whenever one of the patterns is seen in the command out run() will send the
223 223 associated response string. Note that you should put newlines in your
224 224 string if Enter is necessary. The responses may also contain callback
225 225 functions. Any callback is function that takes a dictionary as an argument.
226 226 The dictionary contains all the locals from the run() function, so you can
227 227 access the child spawn object or any other variable defined in run()
228 228 (event_count, child, and extra_args are the most useful). A callback may
229 229 return True to stop the current run process otherwise run() continues until
230 230 the next event. A callback may also return a string which will be sent to
231 231 the child. 'extra_args' is not used by directly run(). It provides a way to
232 232 pass data to a callback function through run() through the locals
233 233 dictionary passed to a callback."""
234 234
235 235 if timeout == -1:
236 236 child = spawn(command, maxread=2000, logfile=logfile, cwd=cwd, env=env,
237 237 encoding=encoding)
238 238 else:
239 239 child = spawn(command, timeout=timeout, maxread=2000, logfile=logfile,
240 240 cwd=cwd, env=env, encoding=encoding)
241 241 if events is not None:
242 242 patterns = events.keys()
243 243 responses = events.values()
244 244 else:
245 245 patterns=None # We assume that EOF or TIMEOUT will save us.
246 246 responses=None
247 247 child_result_list = []
248 248 event_count = 0
249 249 while 1:
250 250 try:
251 251 index = child.expect (patterns)
252 252 if isinstance(child.after, basestring):
253 253 child_result_list.append(child.before + child.after)
254 254 else: # child.after may have been a TIMEOUT or EOF, so don't cat those.
255 255 child_result_list.append(child.before)
256 256 if isinstance(responses[index], basestring):
257 257 child.send(responses[index])
258 258 elif type(responses[index]) is types.FunctionType:
259 259 callback_result = responses[index](locals())
260 260 sys.stdout.flush()
261 261 if isinstance(callback_result, basestring):
262 262 child.send(callback_result)
263 263 elif callback_result:
264 264 break
265 265 else:
266 266 raise TypeError ('The callback must be a string or function type.')
267 267 event_count = event_count + 1
268 268 except TIMEOUT as e:
269 269 child_result_list.append(child.before)
270 270 break
271 271 except EOF as e:
272 272 child_result_list.append(child.before)
273 273 break
274 274 child_result = child._empty_buffer.join(child_result_list)
275 275 if withexitstatus:
276 276 child.close()
277 277 return (child_result, child.exitstatus)
278 278 else:
279 279 return child_result
280 280
281 281 class spawnb(object):
282 282 """Use this class to start and control child applications with a pure-bytes
283 283 interface."""
284 284
285 285 _buffer_type = bytes
286 286 def _cast_buffer_type(self, s):
287 287 return _cast_bytes(s, self.encoding)
288 288 _empty_buffer = b''
289 289 _pty_newline = b'\r\n'
290 290
291 291 # Some code needs this to exist, but it's mainly for the spawn subclass.
292 292 encoding = 'utf-8'
293 293
294 294 def __init__(self, command, args=[], timeout=30, maxread=2000, searchwindowsize=None,
295 295 logfile=None, cwd=None, env=None):
296 296
297 297 """This is the constructor. The command parameter may be a string that
298 298 includes a command and any arguments to the command. For example::
299 299
300 300 child = pexpect.spawn ('/usr/bin/ftp')
301 301 child = pexpect.spawn ('/usr/bin/ssh user@example.com')
302 302 child = pexpect.spawn ('ls -latr /tmp')
303 303
304 304 You may also construct it with a list of arguments like so::
305 305
306 306 child = pexpect.spawn ('/usr/bin/ftp', [])
307 307 child = pexpect.spawn ('/usr/bin/ssh', ['user@example.com'])
308 308 child = pexpect.spawn ('ls', ['-latr', '/tmp'])
309 309
310 310 After this the child application will be created and will be ready to
311 311 talk to. For normal use, see expect() and send() and sendline().
312 312
313 313 Remember that Pexpect does NOT interpret shell meta characters such as
314 314 redirect, pipe, or wild cards (>, |, or *). This is a common mistake.
315 315 If you want to run a command and pipe it through another command then
316 316 you must also start a shell. For example::
317 317
318 318 child = pexpect.spawn('/bin/bash -c "ls -l | grep LOG > log_list.txt"')
319 319 child.expect(pexpect.EOF)
320 320
321 321 The second form of spawn (where you pass a list of arguments) is useful
322 322 in situations where you wish to spawn a command and pass it its own
323 323 argument list. This can make syntax more clear. For example, the
324 324 following is equivalent to the previous example::
325 325
326 326 shell_cmd = 'ls -l | grep LOG > log_list.txt'
327 327 child = pexpect.spawn('/bin/bash', ['-c', shell_cmd])
328 328 child.expect(pexpect.EOF)
329 329
330 330 The maxread attribute sets the read buffer size. This is maximum number
331 331 of bytes that Pexpect will try to read from a TTY at one time. Setting
332 332 the maxread size to 1 will turn off buffering. Setting the maxread
333 333 value higher may help performance in cases where large amounts of
334 334 output are read back from the child. This feature is useful in
335 335 conjunction with searchwindowsize.
336 336
337 337 The searchwindowsize attribute sets the how far back in the incomming
338 338 seach buffer Pexpect will search for pattern matches. Every time
339 339 Pexpect reads some data from the child it will append the data to the
340 340 incomming buffer. The default is to search from the beginning of the
341 341 imcomming buffer each time new data is read from the child. But this is
342 342 very inefficient if you are running a command that generates a large
343 343 amount of data where you want to match The searchwindowsize does not
344 344 effect the size of the incomming data buffer. You will still have
345 345 access to the full buffer after expect() returns.
346 346
347 347 The logfile member turns on or off logging. All input and output will
348 348 be copied to the given file object. Set logfile to None to stop
349 349 logging. This is the default. Set logfile to sys.stdout to echo
350 350 everything to standard output. The logfile is flushed after each write.
351 351
352 352 Example log input and output to a file::
353 353
354 354 child = pexpect.spawn('some_command')
355 355 fout = open('mylog.txt','w')
356 356 child.logfile = fout
357 357
358 358 Example log to stdout::
359 359
360 360 child = pexpect.spawn('some_command')
361 361 child.logfile = sys.stdout
362 362
363 363 The logfile_read and logfile_send members can be used to separately log
364 364 the input from the child and output sent to the child. Sometimes you
365 365 don't want to see everything you write to the child. You only want to
366 366 log what the child sends back. For example::
367 367
368 368 child = pexpect.spawn('some_command')
369 369 child.logfile_read = sys.stdout
370 370
371 371 To separately log output sent to the child use logfile_send::
372 372
373 373 self.logfile_send = fout
374 374
375 375 The delaybeforesend helps overcome a weird behavior that many users
376 376 were experiencing. The typical problem was that a user would expect() a
377 377 "Password:" prompt and then immediately call sendline() to send the
378 378 password. The user would then see that their password was echoed back
379 379 to them. Passwords don't normally echo. The problem is caused by the
380 380 fact that most applications print out the "Password" prompt and then
381 381 turn off stdin echo, but if you send your password before the
382 382 application turned off echo, then you get your password echoed.
383 383 Normally this wouldn't be a problem when interacting with a human at a
384 384 real keyboard. If you introduce a slight delay just before writing then
385 385 this seems to clear up the problem. This was such a common problem for
386 386 many users that I decided that the default pexpect behavior should be
387 387 to sleep just before writing to the child application. 1/20th of a
388 388 second (50 ms) seems to be enough to clear up the problem. You can set
389 389 delaybeforesend to 0 to return to the old behavior. Most Linux machines
390 390 don't like this to be below 0.03. I don't know why.
391 391
392 392 Note that spawn is clever about finding commands on your path.
393 393 It uses the same logic that "which" uses to find executables.
394 394
395 395 If you wish to get the exit status of the child you must call the
396 396 close() method. The exit or signal status of the child will be stored
397 397 in self.exitstatus or self.signalstatus. If the child exited normally
398 398 then exitstatus will store the exit return code and signalstatus will
399 399 be None. If the child was terminated abnormally with a signal then
400 400 signalstatus will store the signal value and exitstatus will be None.
401 401 If you need more detail you can also read the self.status member which
402 402 stores the status returned by os.waitpid. You can interpret this using
403 403 os.WIFEXITED/os.WEXITSTATUS or os.WIFSIGNALED/os.TERMSIG. """
404 404
405 405 self.STDIN_FILENO = pty.STDIN_FILENO
406 406 self.STDOUT_FILENO = pty.STDOUT_FILENO
407 407 self.STDERR_FILENO = pty.STDERR_FILENO
408 408 self.stdin = sys.stdin
409 409 self.stdout = sys.stdout
410 410 self.stderr = sys.stderr
411 411
412 412 self.searcher = None
413 413 self.ignorecase = False
414 414 self.before = None
415 415 self.after = None
416 416 self.match = None
417 417 self.match_index = None
418 418 self.terminated = True
419 419 self.exitstatus = None
420 420 self.signalstatus = None
421 421 self.status = None # status returned by os.waitpid
422 422 self.flag_eof = False
423 423 self.pid = None
424 424 self.child_fd = -1 # initially closed
425 425 self.timeout = timeout
426 426 self.delimiter = EOF
427 427 self.logfile = logfile
428 428 self.logfile_read = None # input from child (read_nonblocking)
429 429 self.logfile_send = None # output to send (send, sendline)
430 430 self.maxread = maxread # max bytes to read at one time into buffer
431 431 self.buffer = self._empty_buffer # This is the read buffer. See maxread.
432 432 self.searchwindowsize = searchwindowsize # Anything before searchwindowsize point is preserved, but not searched.
433 433 # Most Linux machines don't like delaybeforesend to be below 0.03 (30 ms).
434 434 self.delaybeforesend = 0.05 # Sets sleep time used just before sending data to child. Time in seconds.
435 435 self.delayafterclose = 0.1 # Sets delay in close() method to allow kernel time to update process status. Time in seconds.
436 436 self.delayafterterminate = 0.1 # Sets delay in terminate() method to allow kernel time to update process status. Time in seconds.
437 437 self.softspace = False # File-like object.
438 438 self.name = '<' + repr(self) + '>' # File-like object.
439 439 self.closed = True # File-like object.
440 440 self.cwd = cwd
441 441 self.env = env
442 442 self.__irix_hack = (sys.platform.lower().find('irix')>=0) # This flags if we are running on irix
443 443 # Solaris uses internal __fork_pty(). All others use pty.fork().
444 444 if 'solaris' in sys.platform.lower() or 'sunos5' in sys.platform.lower():
445 445 self.use_native_pty_fork = False
446 446 else:
447 447 self.use_native_pty_fork = True
448 448
449 449
450 450 # allow dummy instances for subclasses that may not use command or args.
451 451 if command is None:
452 452 self.command = None
453 453 self.args = None
454 454 self.name = '<pexpect factory incomplete>'
455 455 else:
456 456 self._spawn (command, args)
457 457
458 458 def __del__(self):
459 459
460 460 """This makes sure that no system resources are left open. Python only
461 461 garbage collects Python objects. OS file descriptors are not Python
462 462 objects, so they must be handled explicitly. If the child file
463 463 descriptor was opened outside of this class (passed to the constructor)
464 464 then this does not close it. """
465 465
466 466 if not self.closed:
467 467 # It is possible for __del__ methods to execute during the
468 468 # teardown of the Python VM itself. Thus self.close() may
469 469 # trigger an exception because os.close may be None.
470 470 # -- Fernando Perez
471 471 try:
472 472 self.close()
473 473 except:
474 474 pass
475 475
476 476 def __str__(self):
477 477
478 478 """This returns a human-readable string that represents the state of
479 479 the object. """
480 480
481 481 s = []
482 482 s.append(repr(self))
483 483 s.append('version: ' + __version__)
484 484 s.append('command: ' + str(self.command))
485 485 s.append('args: ' + str(self.args))
486 486 s.append('searcher: ' + str(self.searcher))
487 487 s.append('buffer (last 100 chars): ' + str(self.buffer)[-100:])
488 488 s.append('before (last 100 chars): ' + str(self.before)[-100:])
489 489 s.append('after: ' + str(self.after))
490 490 s.append('match: ' + str(self.match))
491 491 s.append('match_index: ' + str(self.match_index))
492 492 s.append('exitstatus: ' + str(self.exitstatus))
493 493 s.append('flag_eof: ' + str(self.flag_eof))
494 494 s.append('pid: ' + str(self.pid))
495 495 s.append('child_fd: ' + str(self.child_fd))
496 496 s.append('closed: ' + str(self.closed))
497 497 s.append('timeout: ' + str(self.timeout))
498 498 s.append('delimiter: ' + str(self.delimiter))
499 499 s.append('logfile: ' + str(self.logfile))
500 500 s.append('logfile_read: ' + str(self.logfile_read))
501 501 s.append('logfile_send: ' + str(self.logfile_send))
502 502 s.append('maxread: ' + str(self.maxread))
503 503 s.append('ignorecase: ' + str(self.ignorecase))
504 504 s.append('searchwindowsize: ' + str(self.searchwindowsize))
505 505 s.append('delaybeforesend: ' + str(self.delaybeforesend))
506 506 s.append('delayafterclose: ' + str(self.delayafterclose))
507 507 s.append('delayafterterminate: ' + str(self.delayafterterminate))
508 508 return '\n'.join(s)
509 509
510 510 def _spawn(self,command,args=[]):
511 511
512 512 """This starts the given command in a child process. This does all the
513 513 fork/exec type of stuff for a pty. This is called by __init__. If args
514 514 is empty then command will be parsed (split on spaces) and args will be
515 515 set to parsed arguments. """
516 516
517 517 # The pid and child_fd of this object get set by this method.
518 518 # Note that it is difficult for this method to fail.
519 519 # You cannot detect if the child process cannot start.
520 520 # So the only way you can tell if the child process started
521 521 # or not is to try to read from the file descriptor. If you get
522 522 # EOF immediately then it means that the child is already dead.
523 523 # That may not necessarily be bad because you may haved spawned a child
524 524 # that performs some task; creates no stdout output; and then dies.
525 525
526 526 # If command is an int type then it may represent a file descriptor.
527 527 if type(command) == type(0):
528 528 raise ExceptionPexpect ('Command is an int type. If this is a file descriptor then maybe you want to use fdpexpect.fdspawn which takes an existing file descriptor instead of a command string.')
529 529
530 530 if type (args) != type([]):
531 531 raise TypeError ('The argument, args, must be a list.')
532 532
533 533 if args == []:
534 534 self.args = split_command_line(command)
535 535 self.command = self.args[0]
536 536 else:
537 537 self.args = args[:] # work with a copy
538 538 self.args.insert (0, command)
539 539 self.command = command
540 540
541 541 command_with_path = which(self.command)
542 542 if command_with_path is None:
543 543 raise ExceptionPexpect ('The command was not found or was not executable: %s.' % self.command)
544 544 self.command = command_with_path
545 545 self.args[0] = self.command
546 546
547 547 self.name = '<' + ' '.join (self.args) + '>'
548 548
549 549 assert self.pid is None, 'The pid member should be None.'
550 550 assert self.command is not None, 'The command member should not be None.'
551 551
552 552 if self.use_native_pty_fork:
553 553 try:
554 554 self.pid, self.child_fd = pty.fork()
555 555 except OSError as e:
556 556 raise ExceptionPexpect('Error! pty.fork() failed: ' + str(e))
557 557 else: # Use internal __fork_pty
558 558 self.pid, self.child_fd = self.__fork_pty()
559 559
560 560 if self.pid == 0: # Child
561 561 try:
562 562 self.child_fd = sys.stdout.fileno() # used by setwinsize()
563 563 self.setwinsize(24, 80)
564 564 except:
565 565 # Some platforms do not like setwinsize (Cygwin).
566 566 # This will cause problem when running applications that
567 567 # are very picky about window size.
568 568 # This is a serious limitation, but not a show stopper.
569 569 pass
570 570 # Do not allow child to inherit open file descriptors from parent.
571 571 max_fd = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
572 572 for i in range (3, max_fd):
573 573 try:
574 574 os.close (i)
575 575 except OSError:
576 576 pass
577 577
578 578 # I don't know why this works, but ignoring SIGHUP fixes a
579 579 # problem when trying to start a Java daemon with sudo
580 580 # (specifically, Tomcat).
581 581 signal.signal(signal.SIGHUP, signal.SIG_IGN)
582 582
583 583 if self.cwd is not None:
584 584 os.chdir(self.cwd)
585 585 if self.env is None:
586 586 os.execv(self.command, self.args)
587 587 else:
588 588 os.execvpe(self.command, self.args, self.env)
589 589
590 590 # Parent
591 591 self.terminated = False
592 592 self.closed = False
593 593
594 594 def __fork_pty(self):
595 595
596 596 """This implements a substitute for the forkpty system call. This
597 597 should be more portable than the pty.fork() function. Specifically,
598 598 this should work on Solaris.
599 599
600 600 Modified 10.06.05 by Geoff Marshall: Implemented __fork_pty() method to
601 601 resolve the issue with Python's pty.fork() not supporting Solaris,
602 602 particularly ssh. Based on patch to posixmodule.c authored by Noah
603 603 Spurrier::
604 604
605 605 http://mail.python.org/pipermail/python-dev/2003-May/035281.html
606 606
607 607 """
608 608
609 609 parent_fd, child_fd = os.openpty()
610 610 if parent_fd < 0 or child_fd < 0:
611 611 raise ExceptionPexpect("Error! Could not open pty with os.openpty().")
612 612
613 613 pid = os.fork()
614 614 if pid < 0:
615 615 raise ExceptionPexpect("Error! Failed os.fork().")
616 616 elif pid == 0:
617 617 # Child.
618 618 os.close(parent_fd)
619 619 self.__pty_make_controlling_tty(child_fd)
620 620
621 621 os.dup2(child_fd, 0)
622 622 os.dup2(child_fd, 1)
623 623 os.dup2(child_fd, 2)
624 624
625 625 if child_fd > 2:
626 626 os.close(child_fd)
627 627 else:
628 628 # Parent.
629 629 os.close(child_fd)
630 630
631 631 return pid, parent_fd
632 632
633 633 def __pty_make_controlling_tty(self, tty_fd):
634 634
635 635 """This makes the pseudo-terminal the controlling tty. This should be
636 636 more portable than the pty.fork() function. Specifically, this should
637 637 work on Solaris. """
638 638
639 639 child_name = os.ttyname(tty_fd)
640 640
641 641 # Disconnect from controlling tty. Harmless if not already connected.
642 642 try:
643 643 fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY);
644 644 if fd >= 0:
645 645 os.close(fd)
646 646 except:
647 647 # Already disconnected. This happens if running inside cron.
648 648 pass
649 649
650 650 os.setsid()
651 651
652 652 # Verify we are disconnected from controlling tty
653 653 # by attempting to open it again.
654 654 try:
655 655 fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY);
656 656 if fd >= 0:
657 657 os.close(fd)
658 658 raise ExceptionPexpect("Error! Failed to disconnect from controlling tty. It is still possible to open /dev/tty.")
659 659 except:
660 660 # Good! We are disconnected from a controlling tty.
661 661 pass
662 662
663 663 # Verify we can open child pty.
664 664 fd = os.open(child_name, os.O_RDWR);
665 665 if fd < 0:
666 666 raise ExceptionPexpect("Error! Could not open child pty, " + child_name)
667 667 else:
668 668 os.close(fd)
669 669
670 670 # Verify we now have a controlling tty.
671 671 fd = os.open("/dev/tty", os.O_WRONLY)
672 672 if fd < 0:
673 673 raise ExceptionPexpect("Error! Could not open controlling tty, /dev/tty")
674 674 else:
675 675 os.close(fd)
676 676
677 677 def fileno (self): # File-like object.
678 678
679 679 """This returns the file descriptor of the pty for the child.
680 680 """
681 681
682 682 return self.child_fd
683 683
684 684 def close (self, force=True): # File-like object.
685 685
686 686 """This closes the connection with the child application. Note that
687 687 calling close() more than once is valid. This emulates standard Python
688 688 behavior with files. Set force to True if you want to make sure that
689 689 the child is terminated (SIGKILL is sent if the child ignores SIGHUP
690 690 and SIGINT). """
691 691
692 692 if not self.closed:
693 693 self.flush()
694 694 os.close (self.child_fd)
695 695 time.sleep(self.delayafterclose) # Give kernel time to update process status.
696 696 if self.isalive():
697 697 if not self.terminate(force):
698 698 raise ExceptionPexpect ('close() could not terminate the child using terminate()')
699 699 self.child_fd = -1
700 700 self.closed = True
701 701 #self.pid = None
702 702
703 703 def flush (self): # File-like object.
704 704
705 705 """This does nothing. It is here to support the interface for a
706 706 File-like object. """
707 707
708 708 pass
709 709
710 710 def isatty (self): # File-like object.
711 711
712 712 """This returns True if the file descriptor is open and connected to a
713 713 tty(-like) device, else False. """
714 714
715 715 return os.isatty(self.child_fd)
716 716
717 717 def waitnoecho (self, timeout=-1):
718 718
719 719 """This waits until the terminal ECHO flag is set False. This returns
720 720 True if the echo mode is off. This returns False if the ECHO flag was
721 721 not set False before the timeout. This can be used to detect when the
722 722 child is waiting for a password. Usually a child application will turn
723 723 off echo mode when it is waiting for the user to enter a password. For
724 724 example, instead of expecting the "password:" prompt you can wait for
725 725 the child to set ECHO off::
726 726
727 727 p = pexpect.spawn ('ssh user@example.com')
728 728 p.waitnoecho()
729 729 p.sendline(mypassword)
730 730
731 731 If timeout==-1 then this method will use the value in self.timeout.
732 732 If timeout==None then this method to block until ECHO flag is False.
733 733 """
734 734
735 735 if timeout == -1:
736 736 timeout = self.timeout
737 737 if timeout is not None:
738 738 end_time = time.time() + timeout
739 739 while True:
740 740 if not self.getecho():
741 741 return True
742 742 if timeout < 0 and timeout is not None:
743 743 return False
744 744 if timeout is not None:
745 745 timeout = end_time - time.time()
746 746 time.sleep(0.1)
747 747
748 748 def getecho (self):
749 749
750 750 """This returns the terminal echo mode. This returns True if echo is
751 751 on or False if echo is off. Child applications that are expecting you
752 752 to enter a password often set ECHO False. See waitnoecho(). """
753 753
754 754 attr = termios.tcgetattr(self.child_fd)
755 755 if attr[3] & termios.ECHO:
756 756 return True
757 757 return False
758 758
759 759 def setecho (self, state):
760 760
761 761 """This sets the terminal echo mode on or off. Note that anything the
762 762 child sent before the echo will be lost, so you should be sure that
763 763 your input buffer is empty before you call setecho(). For example, the
764 764 following will work as expected::
765 765
766 766 p = pexpect.spawn('cat')
767 767 p.sendline ('1234') # We will see this twice (once from tty echo and again from cat).
768 768 p.expect (['1234'])
769 769 p.expect (['1234'])
770 770 p.setecho(False) # Turn off tty echo
771 771 p.sendline ('abcd') # We will set this only once (echoed by cat).
772 772 p.sendline ('wxyz') # We will set this only once (echoed by cat)
773 773 p.expect (['abcd'])
774 774 p.expect (['wxyz'])
775 775
776 776 The following WILL NOT WORK because the lines sent before the setecho
777 777 will be lost::
778 778
779 779 p = pexpect.spawn('cat')
780 780 p.sendline ('1234') # We will see this twice (once from tty echo and again from cat).
781 781 p.setecho(False) # Turn off tty echo
782 782 p.sendline ('abcd') # We will set this only once (echoed by cat).
783 783 p.sendline ('wxyz') # We will set this only once (echoed by cat)
784 784 p.expect (['1234'])
785 785 p.expect (['1234'])
786 786 p.expect (['abcd'])
787 787 p.expect (['wxyz'])
788 788 """
789 789
790 790 self.child_fd
791 791 attr = termios.tcgetattr(self.child_fd)
792 792 if state:
793 793 attr[3] = attr[3] | termios.ECHO
794 794 else:
795 795 attr[3] = attr[3] & ~termios.ECHO
796 796 # I tried TCSADRAIN and TCSAFLUSH, but these were inconsistent
797 797 # and blocked on some platforms. TCSADRAIN is probably ideal if it worked.
798 798 termios.tcsetattr(self.child_fd, termios.TCSANOW, attr)
799 799
800 800 def read_nonblocking (self, size = 1, timeout = -1):
801 801
802 802 """This reads at most size bytes from the child application. It
803 803 includes a timeout. If the read does not complete within the timeout
804 804 period then a TIMEOUT exception is raised. If the end of file is read
805 805 then an EOF exception will be raised. If a log file was set using
806 806 setlog() then all data will also be written to the log file.
807 807
808 808 If timeout is None then the read may block indefinitely. If timeout is -1
809 809 then the self.timeout value is used. If timeout is 0 then the child is
810 810 polled and if there was no data immediately ready then this will raise
811 811 a TIMEOUT exception.
812 812
813 813 The timeout refers only to the amount of time to read at least one
814 814 character. This is not effected by the 'size' parameter, so if you call
815 815 read_nonblocking(size=100, timeout=30) and only one character is
816 816 available right away then one character will be returned immediately.
817 817 It will not wait for 30 seconds for another 99 characters to come in.
818 818
819 819 This is a wrapper around os.read(). It uses select.select() to
820 820 implement the timeout. """
821 821
822 822 if self.closed:
823 823 raise ValueError ('I/O operation on closed file in read_nonblocking().')
824 824
825 825 if timeout == -1:
826 826 timeout = self.timeout
827 827
828 828 # Note that some systems such as Solaris do not give an EOF when
829 829 # the child dies. In fact, you can still try to read
830 830 # from the child_fd -- it will block forever or until TIMEOUT.
831 831 # For this case, I test isalive() before doing any reading.
832 832 # If isalive() is false, then I pretend that this is the same as EOF.
833 833 if not self.isalive():
834 834 r,w,e = self.__select([self.child_fd], [], [], 0) # timeout of 0 means "poll"
835 835 if not r:
836 836 self.flag_eof = True
837 837 raise EOF ('End Of File (EOF) in read_nonblocking(). Braindead platform.')
838 838 elif self.__irix_hack:
839 839 # This is a hack for Irix. It seems that Irix requires a long delay before checking isalive.
840 840 # This adds a 2 second delay, but only when the child is terminated.
841 841 r, w, e = self.__select([self.child_fd], [], [], 2)
842 842 if not r and not self.isalive():
843 843 self.flag_eof = True
844 844 raise EOF ('End Of File (EOF) in read_nonblocking(). Pokey platform.')
845 845
846 846 r,w,e = self.__select([self.child_fd], [], [], timeout)
847 847
848 848 if not r:
849 849 if not self.isalive():
850 850 # Some platforms, such as Irix, will claim that their processes are alive;
851 851 # then timeout on the select; and then finally admit that they are not alive.
852 852 self.flag_eof = True
853 853 raise EOF ('End of File (EOF) in read_nonblocking(). Very pokey platform.')
854 854 else:
855 855 raise TIMEOUT ('Timeout exceeded in read_nonblocking().')
856 856
857 857 if self.child_fd in r:
858 858 try:
859 859 s = os.read(self.child_fd, size)
860 860 except OSError as e: # Linux does this
861 861 self.flag_eof = True
862 862 raise EOF ('End Of File (EOF) in read_nonblocking(). Exception style platform.')
863 863 if s == b'': # BSD style
864 864 self.flag_eof = True
865 865 raise EOF ('End Of File (EOF) in read_nonblocking(). Empty string style platform.')
866 866
867 867 s2 = self._cast_buffer_type(s)
868 868 if self.logfile is not None:
869 869 self.logfile.write(s2)
870 870 self.logfile.flush()
871 871 if self.logfile_read is not None:
872 872 self.logfile_read.write(s2)
873 873 self.logfile_read.flush()
874 874
875 875 return s
876 876
877 877 raise ExceptionPexpect ('Reached an unexpected state in read_nonblocking().')
878 878
879 879 def read (self, size = -1): # File-like object.
880 880 """This reads at most "size" bytes from the file (less if the read hits
881 881 EOF before obtaining size bytes). If the size argument is negative or
882 882 omitted, read all data until EOF is reached. The bytes are returned as
883 883 a string object. An empty string is returned when EOF is encountered
884 884 immediately. """
885 885
886 886 if size == 0:
887 887 return self._empty_buffer
888 888 if size < 0:
889 889 self.expect (self.delimiter) # delimiter default is EOF
890 890 return self.before
891 891
892 892 # I could have done this more directly by not using expect(), but
893 893 # I deliberately decided to couple read() to expect() so that
894 894 # I would catch any bugs early and ensure consistant behavior.
895 895 # It's a little less efficient, but there is less for me to
896 896 # worry about if I have to later modify read() or expect().
897 897 # Note, it's OK if size==-1 in the regex. That just means it
898 898 # will never match anything in which case we stop only on EOF.
899 899 if self._buffer_type is bytes:
900 900 pat = (u'.{%d}' % size).encode('ascii')
901 901 else:
902 902 pat = u'.{%d}' % size
903 903 cre = re.compile(pat, re.DOTALL)
904 904 index = self.expect ([cre, self.delimiter]) # delimiter default is EOF
905 905 if index == 0:
906 906 return self.after ### self.before should be ''. Should I assert this?
907 907 return self.before
908 908
909 909 def readline(self, size = -1):
910 910 """This reads and returns one entire line. A trailing newline is kept
911 911 in the string, but may be absent when a file ends with an incomplete
912 912 line. Note: This readline() looks for a \\r\\n pair even on UNIX
913 913 because this is what the pseudo tty device returns. So contrary to what
914 914 you may expect you will receive the newline as \\r\\n. An empty string
915 915 is returned when EOF is hit immediately. Currently, the size argument is
916 916 mostly ignored, so this behavior is not standard for a file-like
917 917 object. If size is 0 then an empty string is returned. """
918 918
919 919 if size == 0:
920 920 return self._empty_buffer
921 921 index = self.expect ([self._pty_newline, self.delimiter]) # delimiter default is EOF
922 922 if index == 0:
923 923 return self.before + self._pty_newline
924 924 return self.before
925 925
926 926 def __iter__ (self): # File-like object.
927 927
928 928 """This is to support iterators over a file-like object.
929 929 """
930 930
931 931 return self
932 932
933 def next (self): # File-like object.
933 def __next__ (self): # File-like object.
934 934
935 935 """This is to support iterators over a file-like object.
936 936 """
937 937
938 938 result = self.readline()
939 939 if result == self._empty_buffer:
940 940 raise StopIteration
941 941 return result
942 942
943 if not PY3:
944 next = __next__ # File-like object.
945
943 946 def readlines (self, sizehint = -1): # File-like object.
944 947
945 948 """This reads until EOF using readline() and returns a list containing
946 949 the lines thus read. The optional "sizehint" argument is ignored. """
947 950
948 951 lines = []
949 952 while True:
950 953 line = self.readline()
951 954 if not line:
952 955 break
953 956 lines.append(line)
954 957 return lines
955 958
956 959 def write(self, s): # File-like object.
957 960
958 961 """This is similar to send() except that there is no return value.
959 962 """
960 963
961 964 self.send (s)
962 965
963 966 def writelines (self, sequence): # File-like object.
964 967
965 968 """This calls write() for each element in the sequence. The sequence
966 969 can be any iterable object producing strings, typically a list of
967 970 strings. This does not add line separators There is no return value.
968 971 """
969 972
970 973 for s in sequence:
971 974 self.write (s)
972 975
973 976 def send(self, s):
974 977
975 978 """This sends a string to the child process. This returns the number of
976 979 bytes written. If a log file was set then the data is also written to
977 980 the log. """
978 981
979 982 time.sleep(self.delaybeforesend)
980 983
981 984 s2 = self._cast_buffer_type(s)
982 985 if self.logfile is not None:
983 986 self.logfile.write(s2)
984 987 self.logfile.flush()
985 988 if self.logfile_send is not None:
986 989 self.logfile_send.write(s2)
987 990 self.logfile_send.flush()
988 991 c = os.write (self.child_fd, _cast_bytes(s, self.encoding))
989 992 return c
990 993
991 994 def sendline(self, s=''):
992 995
993 996 """This is like send(), but it adds a line feed (os.linesep). This
994 997 returns the number of bytes written. """
995 998
996 999 n = self.send (s)
997 1000 n = n + self.send (os.linesep)
998 1001 return n
999 1002
1000 1003 def sendcontrol(self, char):
1001 1004
1002 1005 """This sends a control character to the child such as Ctrl-C or
1003 1006 Ctrl-D. For example, to send a Ctrl-G (ASCII 7)::
1004 1007
1005 1008 child.sendcontrol('g')
1006 1009
1007 1010 See also, sendintr() and sendeof().
1008 1011 """
1009 1012
1010 1013 char = char.lower()
1011 1014 a = ord(char)
1012 1015 if a>=97 and a<=122:
1013 1016 a = a - ord('a') + 1
1014 1017 return self.send (chr(a))
1015 1018 d = {'@':0, '`':0,
1016 1019 '[':27, '{':27,
1017 1020 '\\':28, '|':28,
1018 1021 ']':29, '}': 29,
1019 1022 '^':30, '~':30,
1020 1023 '_':31,
1021 1024 '?':127}
1022 1025 if char not in d:
1023 1026 return 0
1024 1027 return self.send (chr(d[char]))
1025 1028
1026 1029 def sendeof(self):
1027 1030
1028 1031 """This sends an EOF to the child. This sends a character which causes
1029 1032 the pending parent output buffer to be sent to the waiting child
1030 1033 program without waiting for end-of-line. If it is the first character
1031 1034 of the line, the read() in the user program returns 0, which signifies
1032 1035 end-of-file. This means to work as expected a sendeof() has to be
1033 1036 called at the beginning of a line. This method does not send a newline.
1034 1037 It is the responsibility of the caller to ensure the eof is sent at the
1035 1038 beginning of a line. """
1036 1039
1037 1040 ### Hmmm... how do I send an EOF?
1038 1041 ###C if ((m = write(pty, *buf, p - *buf)) < 0)
1039 1042 ###C return (errno == EWOULDBLOCK) ? n : -1;
1040 1043 #fd = sys.stdin.fileno()
1041 1044 #old = termios.tcgetattr(fd) # remember current state
1042 1045 #attr = termios.tcgetattr(fd)
1043 1046 #attr[3] = attr[3] | termios.ICANON # ICANON must be set to recognize EOF
1044 1047 #try: # use try/finally to ensure state gets restored
1045 1048 # termios.tcsetattr(fd, termios.TCSADRAIN, attr)
1046 1049 # if hasattr(termios, 'CEOF'):
1047 1050 # os.write (self.child_fd, '%c' % termios.CEOF)
1048 1051 # else:
1049 1052 # # Silly platform does not define CEOF so assume CTRL-D
1050 1053 # os.write (self.child_fd, '%c' % 4)
1051 1054 #finally: # restore state
1052 1055 # termios.tcsetattr(fd, termios.TCSADRAIN, old)
1053 1056 if hasattr(termios, 'VEOF'):
1054 1057 char = termios.tcgetattr(self.child_fd)[6][termios.VEOF]
1055 1058 else:
1056 1059 # platform does not define VEOF so assume CTRL-D
1057 1060 char = chr(4)
1058 1061 self.send(char)
1059 1062
1060 1063 def sendintr(self):
1061 1064
1062 1065 """This sends a SIGINT to the child. It does not require
1063 1066 the SIGINT to be the first character on a line. """
1064 1067
1065 1068 if hasattr(termios, 'VINTR'):
1066 1069 char = termios.tcgetattr(self.child_fd)[6][termios.VINTR]
1067 1070 else:
1068 1071 # platform does not define VINTR so assume CTRL-C
1069 1072 char = chr(3)
1070 1073 self.send (char)
1071 1074
1072 1075 def eof (self):
1073 1076
1074 1077 """This returns True if the EOF exception was ever raised.
1075 1078 """
1076 1079
1077 1080 return self.flag_eof
1078 1081
1079 1082 def terminate(self, force=False):
1080 1083
1081 1084 """This forces a child process to terminate. It starts nicely with
1082 1085 SIGHUP and SIGINT. If "force" is True then moves onto SIGKILL. This
1083 1086 returns True if the child was terminated. This returns False if the
1084 1087 child could not be terminated. """
1085 1088
1086 1089 if not self.isalive():
1087 1090 return True
1088 1091 try:
1089 1092 self.kill(signal.SIGHUP)
1090 1093 time.sleep(self.delayafterterminate)
1091 1094 if not self.isalive():
1092 1095 return True
1093 1096 self.kill(signal.SIGCONT)
1094 1097 time.sleep(self.delayafterterminate)
1095 1098 if not self.isalive():
1096 1099 return True
1097 1100 self.kill(signal.SIGINT)
1098 1101 time.sleep(self.delayafterterminate)
1099 1102 if not self.isalive():
1100 1103 return True
1101 1104 if force:
1102 1105 self.kill(signal.SIGKILL)
1103 1106 time.sleep(self.delayafterterminate)
1104 1107 if not self.isalive():
1105 1108 return True
1106 1109 else:
1107 1110 return False
1108 1111 return False
1109 1112 except OSError as e:
1110 1113 # I think there are kernel timing issues that sometimes cause
1111 1114 # this to happen. I think isalive() reports True, but the
1112 1115 # process is dead to the kernel.
1113 1116 # Make one last attempt to see if the kernel is up to date.
1114 1117 time.sleep(self.delayafterterminate)
1115 1118 if not self.isalive():
1116 1119 return True
1117 1120 else:
1118 1121 return False
1119 1122
1120 1123 def wait(self):
1121 1124
1122 1125 """This waits until the child exits. This is a blocking call. This will
1123 1126 not read any data from the child, so this will block forever if the
1124 1127 child has unread output and has terminated. In other words, the child
1125 1128 may have printed output then called exit(); but, technically, the child
1126 1129 is still alive until its output is read. """
1127 1130
1128 1131 if self.isalive():
1129 1132 pid, status = os.waitpid(self.pid, 0)
1130 1133 else:
1131 1134 raise ExceptionPexpect ('Cannot wait for dead child process.')
1132 1135 self.exitstatus = os.WEXITSTATUS(status)
1133 1136 if os.WIFEXITED (status):
1134 1137 self.status = status
1135 1138 self.exitstatus = os.WEXITSTATUS(status)
1136 1139 self.signalstatus = None
1137 1140 self.terminated = True
1138 1141 elif os.WIFSIGNALED (status):
1139 1142 self.status = status
1140 1143 self.exitstatus = None
1141 1144 self.signalstatus = os.WTERMSIG(status)
1142 1145 self.terminated = True
1143 1146 elif os.WIFSTOPPED (status):
1144 1147 raise ExceptionPexpect ('Wait was called for a child process that is stopped. This is not supported. Is some other process attempting job control with our child pid?')
1145 1148 return self.exitstatus
1146 1149
1147 1150 def isalive(self):
1148 1151
1149 1152 """This tests if the child process is running or not. This is
1150 1153 non-blocking. If the child was terminated then this will read the
1151 1154 exitstatus or signalstatus of the child. This returns True if the child
1152 1155 process appears to be running or False if not. It can take literally
1153 1156 SECONDS for Solaris to return the right status. """
1154 1157
1155 1158 if self.terminated:
1156 1159 return False
1157 1160
1158 1161 if self.flag_eof:
1159 1162 # This is for Linux, which requires the blocking form of waitpid to get
1160 1163 # status of a defunct process. This is super-lame. The flag_eof would have
1161 1164 # been set in read_nonblocking(), so this should be safe.
1162 1165 waitpid_options = 0
1163 1166 else:
1164 1167 waitpid_options = os.WNOHANG
1165 1168
1166 1169 try:
1167 1170 pid, status = os.waitpid(self.pid, waitpid_options)
1168 1171 except OSError as e: # No child processes
1169 1172 if e.errno == errno.ECHILD:
1170 1173 raise ExceptionPexpect ('isalive() encountered condition where "terminated" is 0, but there was no child process. Did someone else call waitpid() on our process?')
1171 1174 else:
1172 1175 raise e
1173 1176
1174 1177 # I have to do this twice for Solaris. I can't even believe that I figured this out...
1175 1178 # If waitpid() returns 0 it means that no child process wishes to
1176 1179 # report, and the value of status is undefined.
1177 1180 if pid == 0:
1178 1181 try:
1179 1182 pid, status = os.waitpid(self.pid, waitpid_options) ### os.WNOHANG) # Solaris!
1180 1183 except OSError as e: # This should never happen...
1181 1184 if e[0] == errno.ECHILD:
1182 1185 raise ExceptionPexpect ('isalive() encountered condition that should never happen. There was no child process. Did someone else call waitpid() on our process?')
1183 1186 else:
1184 1187 raise e
1185 1188
1186 1189 # If pid is still 0 after two calls to waitpid() then
1187 1190 # the process really is alive. This seems to work on all platforms, except
1188 1191 # for Irix which seems to require a blocking call on waitpid or select, so I let read_nonblocking
1189 1192 # take care of this situation (unfortunately, this requires waiting through the timeout).
1190 1193 if pid == 0:
1191 1194 return True
1192 1195
1193 1196 if pid == 0:
1194 1197 return True
1195 1198
1196 1199 if os.WIFEXITED (status):
1197 1200 self.status = status
1198 1201 self.exitstatus = os.WEXITSTATUS(status)
1199 1202 self.signalstatus = None
1200 1203 self.terminated = True
1201 1204 elif os.WIFSIGNALED (status):
1202 1205 self.status = status
1203 1206 self.exitstatus = None
1204 1207 self.signalstatus = os.WTERMSIG(status)
1205 1208 self.terminated = True
1206 1209 elif os.WIFSTOPPED (status):
1207 1210 raise ExceptionPexpect ('isalive() encountered condition where child process is stopped. This is not supported. Is some other process attempting job control with our child pid?')
1208 1211 return False
1209 1212
1210 1213 def kill(self, sig):
1211 1214
1212 1215 """This sends the given signal to the child application. In keeping
1213 1216 with UNIX tradition it has a misleading name. It does not necessarily
1214 1217 kill the child unless you send the right signal. """
1215 1218
1216 1219 # Same as os.kill, but the pid is given for you.
1217 1220 if self.isalive():
1218 1221 os.kill(self.pid, sig)
1219 1222
1220 1223 def compile_pattern_list(self, patterns):
1221 1224
1222 1225 """This compiles a pattern-string or a list of pattern-strings.
1223 1226 Patterns must be a StringType, EOF, TIMEOUT, SRE_Pattern, or a list of
1224 1227 those. Patterns may also be None which results in an empty list (you
1225 1228 might do this if waiting for an EOF or TIMEOUT condition without
1226 1229 expecting any pattern).
1227 1230
1228 1231 This is used by expect() when calling expect_list(). Thus expect() is
1229 1232 nothing more than::
1230 1233
1231 1234 cpl = self.compile_pattern_list(pl)
1232 1235 return self.expect_list(cpl, timeout)
1233 1236
1234 1237 If you are using expect() within a loop it may be more
1235 1238 efficient to compile the patterns first and then call expect_list().
1236 1239 This avoid calls in a loop to compile_pattern_list()::
1237 1240
1238 1241 cpl = self.compile_pattern_list(my_pattern)
1239 1242 while some_condition:
1240 1243 ...
1241 1244 i = self.expect_list(clp, timeout)
1242 1245 ...
1243 1246 """
1244 1247
1245 1248 if patterns is None:
1246 1249 return []
1247 1250 if not isinstance(patterns, list):
1248 1251 patterns = [patterns]
1249 1252
1250 1253 compile_flags = re.DOTALL # Allow dot to match \n
1251 1254 if self.ignorecase:
1252 1255 compile_flags = compile_flags | re.IGNORECASE
1253 1256 compiled_pattern_list = []
1254 1257 for p in patterns:
1255 1258 if isinstance(p, (bytes, unicode)):
1256 1259 p = self._cast_buffer_type(p)
1257 1260 compiled_pattern_list.append(re.compile(p, compile_flags))
1258 1261 elif p is EOF:
1259 1262 compiled_pattern_list.append(EOF)
1260 1263 elif p is TIMEOUT:
1261 1264 compiled_pattern_list.append(TIMEOUT)
1262 1265 elif type(p) is re_type:
1263 1266 p = self._prepare_regex_pattern(p)
1264 1267 compiled_pattern_list.append(p)
1265 1268 else:
1266 1269 raise TypeError ('Argument must be one of StringTypes, EOF, TIMEOUT, SRE_Pattern, or a list of those type. %s' % str(type(p)))
1267 1270
1268 1271 return compiled_pattern_list
1269 1272
1270 1273 def _prepare_regex_pattern(self, p):
1271 1274 "Recompile unicode regexes as bytes regexes. Overridden in subclass."
1272 1275 if isinstance(p.pattern, unicode):
1273 1276 p = re.compile(p.pattern.encode('utf-8'), p.flags &~ re.UNICODE)
1274 1277 return p
1275 1278
1276 1279 def expect(self, pattern, timeout = -1, searchwindowsize=-1):
1277 1280
1278 1281 """This seeks through the stream until a pattern is matched. The
1279 1282 pattern is overloaded and may take several types. The pattern can be a
1280 1283 StringType, EOF, a compiled re, or a list of any of those types.
1281 1284 Strings will be compiled to re types. This returns the index into the
1282 1285 pattern list. If the pattern was not a list this returns index 0 on a
1283 1286 successful match. This may raise exceptions for EOF or TIMEOUT. To
1284 1287 avoid the EOF or TIMEOUT exceptions add EOF or TIMEOUT to the pattern
1285 1288 list. That will cause expect to match an EOF or TIMEOUT condition
1286 1289 instead of raising an exception.
1287 1290
1288 1291 If you pass a list of patterns and more than one matches, the first match
1289 1292 in the stream is chosen. If more than one pattern matches at that point,
1290 1293 the leftmost in the pattern list is chosen. For example::
1291 1294
1292 1295 # the input is 'foobar'
1293 1296 index = p.expect (['bar', 'foo', 'foobar'])
1294 1297 # returns 1 ('foo') even though 'foobar' is a "better" match
1295 1298
1296 1299 Please note, however, that buffering can affect this behavior, since
1297 1300 input arrives in unpredictable chunks. For example::
1298 1301
1299 1302 # the input is 'foobar'
1300 1303 index = p.expect (['foobar', 'foo'])
1301 1304 # returns 0 ('foobar') if all input is available at once,
1302 1305 # but returs 1 ('foo') if parts of the final 'bar' arrive late
1303 1306
1304 1307 After a match is found the instance attributes 'before', 'after' and
1305 1308 'match' will be set. You can see all the data read before the match in
1306 1309 'before'. You can see the data that was matched in 'after'. The
1307 1310 re.MatchObject used in the re match will be in 'match'. If an error
1308 1311 occurred then 'before' will be set to all the data read so far and
1309 1312 'after' and 'match' will be None.
1310 1313
1311 1314 If timeout is -1 then timeout will be set to the self.timeout value.
1312 1315
1313 1316 A list entry may be EOF or TIMEOUT instead of a string. This will
1314 1317 catch these exceptions and return the index of the list entry instead
1315 1318 of raising the exception. The attribute 'after' will be set to the
1316 1319 exception type. The attribute 'match' will be None. This allows you to
1317 1320 write code like this::
1318 1321
1319 1322 index = p.expect (['good', 'bad', pexpect.EOF, pexpect.TIMEOUT])
1320 1323 if index == 0:
1321 1324 do_something()
1322 1325 elif index == 1:
1323 1326 do_something_else()
1324 1327 elif index == 2:
1325 1328 do_some_other_thing()
1326 1329 elif index == 3:
1327 1330 do_something_completely_different()
1328 1331
1329 1332 instead of code like this::
1330 1333
1331 1334 try:
1332 1335 index = p.expect (['good', 'bad'])
1333 1336 if index == 0:
1334 1337 do_something()
1335 1338 elif index == 1:
1336 1339 do_something_else()
1337 1340 except EOF:
1338 1341 do_some_other_thing()
1339 1342 except TIMEOUT:
1340 1343 do_something_completely_different()
1341 1344
1342 1345 These two forms are equivalent. It all depends on what you want. You
1343 1346 can also just expect the EOF if you are waiting for all output of a
1344 1347 child to finish. For example::
1345 1348
1346 1349 p = pexpect.spawn('/bin/ls')
1347 1350 p.expect (pexpect.EOF)
1348 1351 print p.before
1349 1352
1350 1353 If you are trying to optimize for speed then see expect_list().
1351 1354 """
1352 1355
1353 1356 compiled_pattern_list = self.compile_pattern_list(pattern)
1354 1357 return self.expect_list(compiled_pattern_list, timeout, searchwindowsize)
1355 1358
1356 1359 def expect_list(self, pattern_list, timeout = -1, searchwindowsize = -1):
1357 1360
1358 1361 """This takes a list of compiled regular expressions and returns the
1359 1362 index into the pattern_list that matched the child output. The list may
1360 1363 also contain EOF or TIMEOUT (which are not compiled regular
1361 1364 expressions). This method is similar to the expect() method except that
1362 1365 expect_list() does not recompile the pattern list on every call. This
1363 1366 may help if you are trying to optimize for speed, otherwise just use
1364 1367 the expect() method. This is called by expect(). If timeout==-1 then
1365 1368 the self.timeout value is used. If searchwindowsize==-1 then the
1366 1369 self.searchwindowsize value is used. """
1367 1370
1368 1371 return self.expect_loop(searcher_re(pattern_list), timeout, searchwindowsize)
1369 1372
1370 1373 def expect_exact(self, pattern_list, timeout = -1, searchwindowsize = -1):
1371 1374
1372 1375 """This is similar to expect(), but uses plain string matching instead
1373 1376 of compiled regular expressions in 'pattern_list'. The 'pattern_list'
1374 1377 may be a string; a list or other sequence of strings; or TIMEOUT and
1375 1378 EOF.
1376 1379
1377 1380 This call might be faster than expect() for two reasons: string
1378 1381 searching is faster than RE matching and it is possible to limit the
1379 1382 search to just the end of the input buffer.
1380 1383
1381 1384 This method is also useful when you don't want to have to worry about
1382 1385 escaping regular expression characters that you want to match."""
1383 1386
1384 1387 if isinstance(pattern_list, (bytes, unicode)) or pattern_list in (TIMEOUT, EOF):
1385 1388 pattern_list = [pattern_list]
1386 1389 return self.expect_loop(searcher_string(pattern_list), timeout, searchwindowsize)
1387 1390
1388 1391 def expect_loop(self, searcher, timeout = -1, searchwindowsize = -1):
1389 1392
1390 1393 """This is the common loop used inside expect. The 'searcher' should be
1391 1394 an instance of searcher_re or searcher_string, which describes how and what
1392 1395 to search for in the input.
1393 1396
1394 1397 See expect() for other arguments, return value and exceptions. """
1395 1398
1396 1399 self.searcher = searcher
1397 1400
1398 1401 if timeout == -1:
1399 1402 timeout = self.timeout
1400 1403 if timeout is not None:
1401 1404 end_time = time.time() + timeout
1402 1405 if searchwindowsize == -1:
1403 1406 searchwindowsize = self.searchwindowsize
1404 1407
1405 1408 try:
1406 1409 incoming = self.buffer
1407 1410 freshlen = len(incoming)
1408 1411 while True: # Keep reading until exception or return.
1409 1412 index = searcher.search(incoming, freshlen, searchwindowsize)
1410 1413 if index >= 0:
1411 1414 self.buffer = incoming[searcher.end : ]
1412 1415 self.before = incoming[ : searcher.start]
1413 1416 self.after = incoming[searcher.start : searcher.end]
1414 1417 self.match = searcher.match
1415 1418 self.match_index = index
1416 1419 return self.match_index
1417 1420 # No match at this point
1418 1421 if timeout is not None and timeout < 0:
1419 1422 raise TIMEOUT ('Timeout exceeded in expect_any().')
1420 1423 # Still have time left, so read more data
1421 1424 c = self.read_nonblocking (self.maxread, timeout)
1422 1425 freshlen = len(c)
1423 1426 time.sleep (0.0001)
1424 1427 incoming = incoming + c
1425 1428 if timeout is not None:
1426 1429 timeout = end_time - time.time()
1427 1430 except EOF as e:
1428 1431 self.buffer = self._empty_buffer
1429 1432 self.before = incoming
1430 1433 self.after = EOF
1431 1434 index = searcher.eof_index
1432 1435 if index >= 0:
1433 1436 self.match = EOF
1434 1437 self.match_index = index
1435 1438 return self.match_index
1436 1439 else:
1437 1440 self.match = None
1438 1441 self.match_index = None
1439 1442 raise EOF (str(e) + '\n' + str(self))
1440 1443 except TIMEOUT as e:
1441 1444 self.buffer = incoming
1442 1445 self.before = incoming
1443 1446 self.after = TIMEOUT
1444 1447 index = searcher.timeout_index
1445 1448 if index >= 0:
1446 1449 self.match = TIMEOUT
1447 1450 self.match_index = index
1448 1451 return self.match_index
1449 1452 else:
1450 1453 self.match = None
1451 1454 self.match_index = None
1452 1455 raise TIMEOUT (str(e) + '\n' + str(self))
1453 1456 except:
1454 1457 self.before = incoming
1455 1458 self.after = None
1456 1459 self.match = None
1457 1460 self.match_index = None
1458 1461 raise
1459 1462
1460 1463 def getwinsize(self):
1461 1464
1462 1465 """This returns the terminal window size of the child tty. The return
1463 1466 value is a tuple of (rows, cols). """
1464 1467
1465 1468 TIOCGWINSZ = getattr(termios, 'TIOCGWINSZ', 1074295912L)
1466 1469 s = struct.pack('HHHH', 0, 0, 0, 0)
1467 1470 x = fcntl.ioctl(self.fileno(), TIOCGWINSZ, s)
1468 1471 return struct.unpack('HHHH', x)[0:2]
1469 1472
1470 1473 def setwinsize(self, r, c):
1471 1474
1472 1475 """This sets the terminal window size of the child tty. This will cause
1473 1476 a SIGWINCH signal to be sent to the child. This does not change the
1474 1477 physical window size. It changes the size reported to TTY-aware
1475 1478 applications like vi or curses -- applications that respond to the
1476 1479 SIGWINCH signal. """
1477 1480
1478 1481 # Check for buggy platforms. Some Python versions on some platforms
1479 1482 # (notably OSF1 Alpha and RedHat 7.1) truncate the value for
1480 1483 # termios.TIOCSWINSZ. It is not clear why this happens.
1481 1484 # These platforms don't seem to handle the signed int very well;
1482 1485 # yet other platforms like OpenBSD have a large negative value for
1483 1486 # TIOCSWINSZ and they don't have a truncate problem.
1484 1487 # Newer versions of Linux have totally different values for TIOCSWINSZ.
1485 1488 # Note that this fix is a hack.
1486 1489 TIOCSWINSZ = getattr(termios, 'TIOCSWINSZ', -2146929561)
1487 1490 if TIOCSWINSZ == 2148037735L: # L is not required in Python >= 2.2.
1488 1491 TIOCSWINSZ = -2146929561 # Same bits, but with sign.
1489 1492 # Note, assume ws_xpixel and ws_ypixel are zero.
1490 1493 s = struct.pack('HHHH', r, c, 0, 0)
1491 1494 fcntl.ioctl(self.fileno(), TIOCSWINSZ, s)
1492 1495
1493 1496 def interact(self, escape_character = b'\x1d', input_filter = None, output_filter = None):
1494 1497
1495 1498 """This gives control of the child process to the interactive user (the
1496 1499 human at the keyboard). Keystrokes are sent to the child process, and
1497 1500 the stdout and stderr output of the child process is printed. This
1498 1501 simply echos the child stdout and child stderr to the real stdout and
1499 1502 it echos the real stdin to the child stdin. When the user types the
1500 1503 escape_character this method will stop. The default for
1501 1504 escape_character is ^]. This should not be confused with ASCII 27 --
1502 1505 the ESC character. ASCII 29 was chosen for historical merit because
1503 1506 this is the character used by 'telnet' as the escape character. The
1504 1507 escape_character will not be sent to the child process.
1505 1508
1506 1509 You may pass in optional input and output filter functions. These
1507 1510 functions should take a string and return a string. The output_filter
1508 1511 will be passed all the output from the child process. The input_filter
1509 1512 will be passed all the keyboard input from the user. The input_filter
1510 1513 is run BEFORE the check for the escape_character.
1511 1514
1512 1515 Note that if you change the window size of the parent the SIGWINCH
1513 1516 signal will not be passed through to the child. If you want the child
1514 1517 window size to change when the parent's window size changes then do
1515 1518 something like the following example::
1516 1519
1517 1520 import pexpect, struct, fcntl, termios, signal, sys
1518 1521 def sigwinch_passthrough (sig, data):
1519 1522 s = struct.pack("HHHH", 0, 0, 0, 0)
1520 1523 a = struct.unpack('hhhh', fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ , s))
1521 1524 global p
1522 1525 p.setwinsize(a[0],a[1])
1523 1526 p = pexpect.spawn('/bin/bash') # Note this is global and used in sigwinch_passthrough.
1524 1527 signal.signal(signal.SIGWINCH, sigwinch_passthrough)
1525 1528 p.interact()
1526 1529 """
1527 1530
1528 1531 # Flush the buffer.
1529 1532 if PY3: self.stdout.write(_cast_unicode(self.buffer, self.encoding))
1530 1533 else: self.stdout.write(self.buffer)
1531 1534 self.stdout.flush()
1532 1535 self.buffer = self._empty_buffer
1533 1536 mode = tty.tcgetattr(self.STDIN_FILENO)
1534 1537 tty.setraw(self.STDIN_FILENO)
1535 1538 try:
1536 1539 self.__interact_copy(escape_character, input_filter, output_filter)
1537 1540 finally:
1538 1541 tty.tcsetattr(self.STDIN_FILENO, tty.TCSAFLUSH, mode)
1539 1542
1540 1543 def __interact_writen(self, fd, data):
1541 1544
1542 1545 """This is used by the interact() method.
1543 1546 """
1544 1547
1545 1548 while data != b'' and self.isalive():
1546 1549 n = os.write(fd, data)
1547 1550 data = data[n:]
1548 1551
1549 1552 def __interact_read(self, fd):
1550 1553
1551 1554 """This is used by the interact() method.
1552 1555 """
1553 1556
1554 1557 return os.read(fd, 1000)
1555 1558
1556 1559 def __interact_copy(self, escape_character = None, input_filter = None, output_filter = None):
1557 1560
1558 1561 """This is used by the interact() method.
1559 1562 """
1560 1563
1561 1564 while self.isalive():
1562 1565 r,w,e = self.__select([self.child_fd, self.STDIN_FILENO], [], [])
1563 1566 if self.child_fd in r:
1564 1567 data = self.__interact_read(self.child_fd)
1565 1568 if output_filter: data = output_filter(data)
1566 1569 if self.logfile is not None:
1567 1570 self.logfile.write (data)
1568 1571 self.logfile.flush()
1569 1572 os.write(self.STDOUT_FILENO, data)
1570 1573 if self.STDIN_FILENO in r:
1571 1574 data = self.__interact_read(self.STDIN_FILENO)
1572 1575 if input_filter: data = input_filter(data)
1573 1576 i = data.rfind(escape_character)
1574 1577 if i != -1:
1575 1578 data = data[:i]
1576 1579 self.__interact_writen(self.child_fd, data)
1577 1580 break
1578 1581 self.__interact_writen(self.child_fd, data)
1579 1582
1580 1583 def __select (self, iwtd, owtd, ewtd, timeout=None):
1581 1584
1582 1585 """This is a wrapper around select.select() that ignores signals. If
1583 1586 select.select raises a select.error exception and errno is an EINTR
1584 1587 error then it is ignored. Mainly this is used to ignore sigwinch
1585 1588 (terminal resize). """
1586 1589
1587 1590 # if select() is interrupted by a signal (errno==EINTR) then
1588 1591 # we loop back and enter the select() again.
1589 1592 if timeout is not None:
1590 1593 end_time = time.time() + timeout
1591 1594 while True:
1592 1595 try:
1593 1596 return select.select (iwtd, owtd, ewtd, timeout)
1594 1597 except select.error as e:
1595 1598 if e.args[0] == errno.EINTR:
1596 1599 # if we loop back we have to subtract the amount of time we already waited.
1597 1600 if timeout is not None:
1598 1601 timeout = end_time - time.time()
1599 1602 if timeout < 0:
1600 1603 return ([],[],[])
1601 1604 else: # something else caused the select.error, so this really is an exception
1602 1605 raise
1603 1606
1604 1607 class spawn(spawnb):
1605 1608 """This is the main class interface for Pexpect. Use this class to start
1606 1609 and control child applications."""
1607 1610
1608 1611 _buffer_type = unicode
1609 1612 def _cast_buffer_type(self, s):
1610 1613 return _cast_unicode(s, self.encoding)
1611 1614 _empty_buffer = u''
1612 1615 _pty_newline = u'\r\n'
1613 1616
1614 1617 def __init__(self, command, args=[], timeout=30, maxread=2000, searchwindowsize=None,
1615 1618 logfile=None, cwd=None, env=None, encoding='utf-8'):
1616 1619 super(spawn, self).__init__(command, args, timeout=timeout, maxread=maxread,
1617 1620 searchwindowsize=searchwindowsize, logfile=logfile, cwd=cwd, env=env)
1618 1621 self.encoding = encoding
1619 1622
1620 1623 def _prepare_regex_pattern(self, p):
1621 1624 "Recompile bytes regexes as unicode regexes."
1622 1625 if isinstance(p.pattern, bytes):
1623 1626 p = re.compile(p.pattern.decode(self.encoding), p.flags)
1624 1627 return p
1625 1628
1626 1629 def read_nonblocking(self, size=1, timeout=-1):
1627 1630 return super(spawn, self).read_nonblocking(size=size, timeout=timeout)\
1628 1631 .decode(self.encoding)
1629 1632
1630 1633 read_nonblocking.__doc__ = spawnb.read_nonblocking.__doc__
1631 1634
1632 1635
1633 1636 ##############################################################################
1634 1637 # End of spawn class
1635 1638 ##############################################################################
1636 1639
1637 1640 class searcher_string (object):
1638 1641
1639 1642 """This is a plain string search helper for the spawn.expect_any() method.
1640 1643 This helper class is for speed. For more powerful regex patterns
1641 1644 see the helper class, searcher_re.
1642 1645
1643 1646 Attributes:
1644 1647
1645 1648 eof_index - index of EOF, or -1
1646 1649 timeout_index - index of TIMEOUT, or -1
1647 1650
1648 1651 After a successful match by the search() method the following attributes
1649 1652 are available:
1650 1653
1651 1654 start - index into the buffer, first byte of match
1652 1655 end - index into the buffer, first byte after match
1653 1656 match - the matching string itself
1654 1657
1655 1658 """
1656 1659
1657 1660 def __init__(self, strings):
1658 1661
1659 1662 """This creates an instance of searcher_string. This argument 'strings'
1660 1663 may be a list; a sequence of strings; or the EOF or TIMEOUT types. """
1661 1664
1662 1665 self.eof_index = -1
1663 1666 self.timeout_index = -1
1664 1667 self._strings = []
1665 1668 for n, s in enumerate(strings):
1666 1669 if s is EOF:
1667 1670 self.eof_index = n
1668 1671 continue
1669 1672 if s is TIMEOUT:
1670 1673 self.timeout_index = n
1671 1674 continue
1672 1675 self._strings.append((n, s))
1673 1676
1674 1677 def __str__(self):
1675 1678
1676 1679 """This returns a human-readable string that represents the state of
1677 1680 the object."""
1678 1681
1679 1682 ss = [ (ns[0],' %d: "%s"' % ns) for ns in self._strings ]
1680 1683 ss.append((-1,'searcher_string:'))
1681 1684 if self.eof_index >= 0:
1682 1685 ss.append ((self.eof_index,' %d: EOF' % self.eof_index))
1683 1686 if self.timeout_index >= 0:
1684 1687 ss.append ((self.timeout_index,' %d: TIMEOUT' % self.timeout_index))
1685 1688 ss.sort()
1686 1689 return '\n'.join(a[1] for a in ss)
1687 1690
1688 1691 def search(self, buffer, freshlen, searchwindowsize=None):
1689 1692
1690 1693 """This searches 'buffer' for the first occurence of one of the search
1691 1694 strings. 'freshlen' must indicate the number of bytes at the end of
1692 1695 'buffer' which have not been searched before. It helps to avoid
1693 1696 searching the same, possibly big, buffer over and over again.
1694 1697
1695 1698 See class spawn for the 'searchwindowsize' argument.
1696 1699
1697 1700 If there is a match this returns the index of that string, and sets
1698 1701 'start', 'end' and 'match'. Otherwise, this returns -1. """
1699 1702
1700 1703 absurd_match = len(buffer)
1701 1704 first_match = absurd_match
1702 1705
1703 1706 # 'freshlen' helps a lot here. Further optimizations could
1704 1707 # possibly include:
1705 1708 #
1706 1709 # using something like the Boyer-Moore Fast String Searching
1707 1710 # Algorithm; pre-compiling the search through a list of
1708 1711 # strings into something that can scan the input once to
1709 1712 # search for all N strings; realize that if we search for
1710 1713 # ['bar', 'baz'] and the input is '...foo' we need not bother
1711 1714 # rescanning until we've read three more bytes.
1712 1715 #
1713 1716 # Sadly, I don't know enough about this interesting topic. /grahn
1714 1717
1715 1718 for index, s in self._strings:
1716 1719 if searchwindowsize is None:
1717 1720 # the match, if any, can only be in the fresh data,
1718 1721 # or at the very end of the old data
1719 1722 offset = -(freshlen+len(s))
1720 1723 else:
1721 1724 # better obey searchwindowsize
1722 1725 offset = -searchwindowsize
1723 1726 n = buffer.find(s, offset)
1724 1727 if n >= 0 and n < first_match:
1725 1728 first_match = n
1726 1729 best_index, best_match = index, s
1727 1730 if first_match == absurd_match:
1728 1731 return -1
1729 1732 self.match = best_match
1730 1733 self.start = first_match
1731 1734 self.end = self.start + len(self.match)
1732 1735 return best_index
1733 1736
1734 1737 class searcher_re (object):
1735 1738
1736 1739 """This is regular expression string search helper for the
1737 1740 spawn.expect_any() method. This helper class is for powerful
1738 1741 pattern matching. For speed, see the helper class, searcher_string.
1739 1742
1740 1743 Attributes:
1741 1744
1742 1745 eof_index - index of EOF, or -1
1743 1746 timeout_index - index of TIMEOUT, or -1
1744 1747
1745 1748 After a successful match by the search() method the following attributes
1746 1749 are available:
1747 1750
1748 1751 start - index into the buffer, first byte of match
1749 1752 end - index into the buffer, first byte after match
1750 1753 match - the re.match object returned by a succesful re.search
1751 1754
1752 1755 """
1753 1756
1754 1757 def __init__(self, patterns):
1755 1758
1756 1759 """This creates an instance that searches for 'patterns' Where
1757 1760 'patterns' may be a list or other sequence of compiled regular
1758 1761 expressions, or the EOF or TIMEOUT types."""
1759 1762
1760 1763 self.eof_index = -1
1761 1764 self.timeout_index = -1
1762 1765 self._searches = []
1763 1766 for n, s in enumerate(patterns):
1764 1767 if s is EOF:
1765 1768 self.eof_index = n
1766 1769 continue
1767 1770 if s is TIMEOUT:
1768 1771 self.timeout_index = n
1769 1772 continue
1770 1773 self._searches.append((n, s))
1771 1774
1772 1775 def __str__(self):
1773 1776
1774 1777 """This returns a human-readable string that represents the state of
1775 1778 the object."""
1776 1779
1777 1780 ss = [ (n,' %d: re.compile("%s")' % (n,str(s.pattern))) for n,s in self._searches]
1778 1781 ss.append((-1,'searcher_re:'))
1779 1782 if self.eof_index >= 0:
1780 1783 ss.append ((self.eof_index,' %d: EOF' % self.eof_index))
1781 1784 if self.timeout_index >= 0:
1782 1785 ss.append ((self.timeout_index,' %d: TIMEOUT' % self.timeout_index))
1783 1786 ss.sort()
1784 1787 return '\n'.join(a[1] for a in ss)
1785 1788
1786 1789 def search(self, buffer, freshlen, searchwindowsize=None):
1787 1790
1788 1791 """This searches 'buffer' for the first occurence of one of the regular
1789 1792 expressions. 'freshlen' must indicate the number of bytes at the end of
1790 1793 'buffer' which have not been searched before.
1791 1794
1792 1795 See class spawn for the 'searchwindowsize' argument.
1793 1796
1794 1797 If there is a match this returns the index of that string, and sets
1795 1798 'start', 'end' and 'match'. Otherwise, returns -1."""
1796 1799
1797 1800 absurd_match = len(buffer)
1798 1801 first_match = absurd_match
1799 1802 # 'freshlen' doesn't help here -- we cannot predict the
1800 1803 # length of a match, and the re module provides no help.
1801 1804 if searchwindowsize is None:
1802 1805 searchstart = 0
1803 1806 else:
1804 1807 searchstart = max(0, len(buffer)-searchwindowsize)
1805 1808 for index, s in self._searches:
1806 1809 match = s.search(buffer, searchstart)
1807 1810 if match is None:
1808 1811 continue
1809 1812 n = match.start()
1810 1813 if n < first_match:
1811 1814 first_match = n
1812 1815 the_match = match
1813 1816 best_index = index
1814 1817 if first_match == absurd_match:
1815 1818 return -1
1816 1819 self.start = first_match
1817 1820 self.match = the_match
1818 1821 self.end = self.match.end()
1819 1822 return best_index
1820 1823
1821 1824 def which (filename):
1822 1825
1823 1826 """This takes a given filename; tries to find it in the environment path;
1824 1827 then checks if it is executable. This returns the full path to the filename
1825 1828 if found and executable. Otherwise this returns None."""
1826 1829
1827 1830 # Special case where filename already contains a path.
1828 1831 if os.path.dirname(filename) != '':
1829 1832 if os.access (filename, os.X_OK):
1830 1833 return filename
1831 1834
1832 1835 if not os.environ.has_key('PATH') or os.environ['PATH'] == '':
1833 1836 p = os.defpath
1834 1837 else:
1835 1838 p = os.environ['PATH']
1836 1839
1837 1840 pathlist = p.split(os.pathsep)
1838 1841
1839 1842 for path in pathlist:
1840 1843 f = os.path.join(path, filename)
1841 1844 if os.access(f, os.X_OK):
1842 1845 return f
1843 1846 return None
1844 1847
1845 1848 def split_command_line(command_line):
1846 1849
1847 1850 """This splits a command line into a list of arguments. It splits arguments
1848 1851 on spaces, but handles embedded quotes, doublequotes, and escaped
1849 1852 characters. It's impossible to do this with a regular expression, so I
1850 1853 wrote a little state machine to parse the command line. """
1851 1854
1852 1855 arg_list = []
1853 1856 arg = ''
1854 1857
1855 1858 # Constants to name the states we can be in.
1856 1859 state_basic = 0
1857 1860 state_esc = 1
1858 1861 state_singlequote = 2
1859 1862 state_doublequote = 3
1860 1863 state_whitespace = 4 # The state of consuming whitespace between commands.
1861 1864 state = state_basic
1862 1865
1863 1866 for c in command_line:
1864 1867 if state == state_basic or state == state_whitespace:
1865 1868 if c == '\\': # Escape the next character
1866 1869 state = state_esc
1867 1870 elif c == r"'": # Handle single quote
1868 1871 state = state_singlequote
1869 1872 elif c == r'"': # Handle double quote
1870 1873 state = state_doublequote
1871 1874 elif c.isspace():
1872 1875 # Add arg to arg_list if we aren't in the middle of whitespace.
1873 1876 if state == state_whitespace:
1874 1877 None # Do nothing.
1875 1878 else:
1876 1879 arg_list.append(arg)
1877 1880 arg = ''
1878 1881 state = state_whitespace
1879 1882 else:
1880 1883 arg = arg + c
1881 1884 state = state_basic
1882 1885 elif state == state_esc:
1883 1886 arg = arg + c
1884 1887 state = state_basic
1885 1888 elif state == state_singlequote:
1886 1889 if c == r"'":
1887 1890 state = state_basic
1888 1891 else:
1889 1892 arg = arg + c
1890 1893 elif state == state_doublequote:
1891 1894 if c == r'"':
1892 1895 state = state_basic
1893 1896 else:
1894 1897 arg = arg + c
1895 1898
1896 1899 if arg != '':
1897 1900 arg_list.append(arg)
1898 1901 return arg_list
1899 1902
1900 1903 # vi:set sr et ts=4 sw=4 ft=python :
@@ -1,224 +1,224 b''
1 1 # System library imports.
2 2 from IPython.external.qt import QtGui
3 3 from pygments.formatters.html import HtmlFormatter
4 4 from pygments.lexer import RegexLexer, _TokenType, Text, Error
5 5 from pygments.lexers import PythonLexer
6 6 from pygments.styles import get_style_by_name
7 7
8 8
9 9 def get_tokens_unprocessed(self, text, stack=('root',)):
10 10 """ Split ``text`` into (tokentype, text) pairs.
11 11
12 12 Monkeypatched to store the final stack on the object itself.
13 13 """
14 14 pos = 0
15 15 tokendefs = self._tokens
16 16 if hasattr(self, '_saved_state_stack'):
17 17 statestack = list(self._saved_state_stack)
18 18 else:
19 19 statestack = list(stack)
20 20 statetokens = tokendefs[statestack[-1]]
21 21 while 1:
22 22 for rexmatch, action, new_state in statetokens:
23 23 m = rexmatch(text, pos)
24 24 if m:
25 25 if type(action) is _TokenType:
26 26 yield pos, action, m.group()
27 27 else:
28 28 for item in action(self, m):
29 29 yield item
30 30 pos = m.end()
31 31 if new_state is not None:
32 32 # state transition
33 33 if isinstance(new_state, tuple):
34 34 for state in new_state:
35 35 if state == '#pop':
36 36 statestack.pop()
37 37 elif state == '#push':
38 38 statestack.append(statestack[-1])
39 39 else:
40 40 statestack.append(state)
41 41 elif isinstance(new_state, int):
42 42 # pop
43 43 del statestack[new_state:]
44 44 elif new_state == '#push':
45 45 statestack.append(statestack[-1])
46 46 else:
47 47 assert False, "wrong state def: %r" % new_state
48 48 statetokens = tokendefs[statestack[-1]]
49 49 break
50 50 else:
51 51 try:
52 52 if text[pos] == '\n':
53 53 # at EOL, reset state to "root"
54 54 pos += 1
55 55 statestack = ['root']
56 56 statetokens = tokendefs['root']
57 57 yield pos, Text, u'\n'
58 58 continue
59 59 yield pos, Error, text[pos]
60 60 pos += 1
61 61 except IndexError:
62 62 break
63 63 self._saved_state_stack = list(statestack)
64 64
65 65 # Monkeypatch!
66 66 RegexLexer.get_tokens_unprocessed = get_tokens_unprocessed
67 67
68 68
69 69 class PygmentsBlockUserData(QtGui.QTextBlockUserData):
70 70 """ Storage for the user data associated with each line.
71 71 """
72 72
73 73 syntax_stack = ('root',)
74 74
75 75 def __init__(self, **kwds):
76 76 for key, value in kwds.iteritems():
77 77 setattr(self, key, value)
78 78 QtGui.QTextBlockUserData.__init__(self)
79 79
80 80 def __repr__(self):
81 81 attrs = ['syntax_stack']
82 82 kwds = ', '.join([ '%s=%r' % (attr, getattr(self, attr))
83 83 for attr in attrs ])
84 84 return 'PygmentsBlockUserData(%s)' % kwds
85 85
86 86
87 87 class PygmentsHighlighter(QtGui.QSyntaxHighlighter):
88 88 """ Syntax highlighter that uses Pygments for parsing. """
89 89
90 90 #---------------------------------------------------------------------------
91 91 # 'QSyntaxHighlighter' interface
92 92 #---------------------------------------------------------------------------
93 93
94 94 def __init__(self, parent, lexer=None):
95 95 super(PygmentsHighlighter, self).__init__(parent)
96 96
97 97 self._document = QtGui.QTextDocument()
98 98 self._formatter = HtmlFormatter(nowrap=True)
99 99 self._lexer = lexer if lexer else PythonLexer()
100 100 self.set_style('default')
101 101
102 102 def highlightBlock(self, string):
103 103 """ Highlight a block of text.
104 104 """
105 105 prev_data = self.currentBlock().previous().userData()
106 106 if prev_data is not None:
107 107 self._lexer._saved_state_stack = prev_data.syntax_stack
108 108 elif hasattr(self._lexer, '_saved_state_stack'):
109 109 del self._lexer._saved_state_stack
110 110
111 111 # Lex the text using Pygments
112 112 index = 0
113 113 for token, text in self._lexer.get_tokens(string):
114 114 length = len(text)
115 115 self.setFormat(index, length, self._get_format(token))
116 116 index += length
117 117
118 118 if hasattr(self._lexer, '_saved_state_stack'):
119 119 data = PygmentsBlockUserData(
120 120 syntax_stack=self._lexer._saved_state_stack)
121 121 self.currentBlock().setUserData(data)
122 122 # Clean up for the next go-round.
123 123 del self._lexer._saved_state_stack
124 124
125 125 #---------------------------------------------------------------------------
126 126 # 'PygmentsHighlighter' interface
127 127 #---------------------------------------------------------------------------
128 128
129 129 def set_style(self, style):
130 130 """ Sets the style to the specified Pygments style.
131 131 """
132 132 if isinstance(style, basestring):
133 133 style = get_style_by_name(style)
134 134 self._style = style
135 135 self._clear_caches()
136 136
137 137 def set_style_sheet(self, stylesheet):
138 138 """ Sets a CSS stylesheet. The classes in the stylesheet should
139 139 correspond to those generated by:
140 140
141 141 pygmentize -S <style> -f html
142 142
143 143 Note that 'set_style' and 'set_style_sheet' completely override each
144 144 other, i.e. they cannot be used in conjunction.
145 145 """
146 146 self._document.setDefaultStyleSheet(stylesheet)
147 147 self._style = None
148 148 self._clear_caches()
149 149
150 150 #---------------------------------------------------------------------------
151 151 # Protected interface
152 152 #---------------------------------------------------------------------------
153 153
154 154 def _clear_caches(self):
155 155 """ Clear caches for brushes and formats.
156 156 """
157 157 self._brushes = {}
158 158 self._formats = {}
159 159
160 160 def _get_format(self, token):
161 161 """ Returns a QTextCharFormat for token or None.
162 162 """
163 163 if token in self._formats:
164 164 return self._formats[token]
165 165
166 166 if self._style is None:
167 167 result = self._get_format_from_document(token, self._document)
168 168 else:
169 169 result = self._get_format_from_style(token, self._style)
170 170
171 171 self._formats[token] = result
172 172 return result
173 173
174 174 def _get_format_from_document(self, token, document):
175 175 """ Returns a QTextCharFormat for token by
176 176 """
177 code, html = self._formatter._format_lines([(token, u'dummy')]).next()
177 code, html = next(self._formatter._format_lines([(token, u'dummy')]))
178 178 self._document.setHtml(html)
179 179 return QtGui.QTextCursor(self._document).charFormat()
180 180
181 181 def _get_format_from_style(self, token, style):
182 182 """ Returns a QTextCharFormat for token by reading a Pygments style.
183 183 """
184 184 result = QtGui.QTextCharFormat()
185 185 for key, value in style.style_for_token(token).items():
186 186 if value:
187 187 if key == 'color':
188 188 result.setForeground(self._get_brush(value))
189 189 elif key == 'bgcolor':
190 190 result.setBackground(self._get_brush(value))
191 191 elif key == 'bold':
192 192 result.setFontWeight(QtGui.QFont.Bold)
193 193 elif key == 'italic':
194 194 result.setFontItalic(True)
195 195 elif key == 'underline':
196 196 result.setUnderlineStyle(
197 197 QtGui.QTextCharFormat.SingleUnderline)
198 198 elif key == 'sans':
199 199 result.setFontStyleHint(QtGui.QFont.SansSerif)
200 200 elif key == 'roman':
201 201 result.setFontStyleHint(QtGui.QFont.Times)
202 202 elif key == 'mono':
203 203 result.setFontStyleHint(QtGui.QFont.TypeWriter)
204 204 return result
205 205
206 206 def _get_brush(self, color):
207 207 """ Returns a brush for the color.
208 208 """
209 209 result = self._brushes.get(color)
210 210 if result is None:
211 211 qcolor = self._get_color(color)
212 212 result = QtGui.QBrush(qcolor)
213 213 self._brushes[color] = result
214 214 return result
215 215
216 216 def _get_color(self, color):
217 217 """ Returns a QColor built from a Pygments color string.
218 218 """
219 219 qcolor = QtGui.QColor()
220 220 qcolor.setRgb(int(color[:2], base=16),
221 221 int(color[2:4], base=16),
222 222 int(color[4:6], base=16))
223 223 return qcolor
224 224
@@ -1,97 +1,97 b''
1 1 """Implementation of the parametric test support for Python 2.x
2 2 """
3 3
4 4 #-----------------------------------------------------------------------------
5 5 # Copyright (C) 2009-2011 The IPython Development Team
6 6 #
7 7 # Distributed under the terms of the BSD License. The full license is in
8 8 # the file COPYING, distributed as part of this software.
9 9 #-----------------------------------------------------------------------------
10 10
11 11 #-----------------------------------------------------------------------------
12 12 # Imports
13 13 #-----------------------------------------------------------------------------
14 14
15 15 import sys
16 16 import unittest
17 17 from compiler.consts import CO_GENERATOR
18 18
19 19 #-----------------------------------------------------------------------------
20 20 # Classes and functions
21 21 #-----------------------------------------------------------------------------
22 22
23 23 def isgenerator(func):
24 24 try:
25 25 return func.func_code.co_flags & CO_GENERATOR != 0
26 26 except AttributeError:
27 27 return False
28 28
29 29 class ParametricTestCase(unittest.TestCase):
30 30 """Write parametric tests in normal unittest testcase form.
31 31
32 32 Limitations: the last iteration misses printing out a newline when running
33 33 in verbose mode.
34 34 """
35 35 def run_parametric(self, result, testMethod):
36 36 # But if we have a test generator, we iterate it ourselves
37 37 testgen = testMethod()
38 38 while True:
39 39 try:
40 40 # Initialize test
41 41 result.startTest(self)
42 42
43 43 # SetUp
44 44 try:
45 45 self.setUp()
46 46 except KeyboardInterrupt:
47 47 raise
48 48 except:
49 49 result.addError(self, sys.exc_info())
50 50 return
51 51 # Test execution
52 52 ok = False
53 53 try:
54 testgen.next()
54 next(testgen)
55 55 ok = True
56 56 except StopIteration:
57 57 # We stop the loop
58 58 break
59 59 except self.failureException:
60 60 result.addFailure(self, sys.exc_info())
61 61 except KeyboardInterrupt:
62 62 raise
63 63 except:
64 64 result.addError(self, sys.exc_info())
65 65 # TearDown
66 66 try:
67 67 self.tearDown()
68 68 except KeyboardInterrupt:
69 69 raise
70 70 except:
71 71 result.addError(self, sys.exc_info())
72 72 ok = False
73 73 if ok: result.addSuccess(self)
74 74
75 75 finally:
76 76 result.stopTest(self)
77 77
78 78 def run(self, result=None):
79 79 if result is None:
80 80 result = self.defaultTestResult()
81 81 testMethod = getattr(self, self._testMethodName)
82 82 # For normal tests, we just call the base class and return that
83 83 if isgenerator(testMethod):
84 84 return self.run_parametric(result, testMethod)
85 85 else:
86 86 return super(ParametricTestCase, self).run(result)
87 87
88 88
89 89 def parametric(func):
90 90 """Decorator to make a simple function into a normal test via unittest."""
91 91
92 92 class Tester(ParametricTestCase):
93 93 test = staticmethod(func)
94 94
95 95 Tester.__name__ = func.__name__
96 96
97 97 return Tester
@@ -1,195 +1,195 b''
1 1 """Common utilities for the various process_* implementations.
2 2
3 3 This file is only meant to be imported by the platform-specific implementations
4 4 of subprocess utilities, and it contains tools that are common to all of them.
5 5 """
6 6
7 7 #-----------------------------------------------------------------------------
8 8 # Copyright (C) 2010-2011 The IPython Development Team
9 9 #
10 10 # Distributed under the terms of the BSD License. The full license is in
11 11 # the file COPYING, distributed as part of this software.
12 12 #-----------------------------------------------------------------------------
13 13
14 14 #-----------------------------------------------------------------------------
15 15 # Imports
16 16 #-----------------------------------------------------------------------------
17 17 import subprocess
18 18 import shlex
19 19 import sys
20 20
21 21 from IPython.utils import py3compat
22 22
23 23 #-----------------------------------------------------------------------------
24 24 # Function definitions
25 25 #-----------------------------------------------------------------------------
26 26
27 27 def read_no_interrupt(p):
28 28 """Read from a pipe ignoring EINTR errors.
29 29
30 30 This is necessary because when reading from pipes with GUI event loops
31 31 running in the background, often interrupts are raised that stop the
32 32 command from completing."""
33 33 import errno
34 34
35 35 try:
36 36 return p.read()
37 37 except IOError as err:
38 38 if err.errno != errno.EINTR:
39 39 raise
40 40
41 41
42 42 def process_handler(cmd, callback, stderr=subprocess.PIPE):
43 43 """Open a command in a shell subprocess and execute a callback.
44 44
45 45 This function provides common scaffolding for creating subprocess.Popen()
46 46 calls. It creates a Popen object and then calls the callback with it.
47 47
48 48 Parameters
49 49 ----------
50 50 cmd : str
51 51 A string to be executed with the underlying system shell (by calling
52 52 :func:`Popen` with ``shell=True``.
53 53
54 54 callback : callable
55 55 A one-argument function that will be called with the Popen object.
56 56
57 57 stderr : file descriptor number, optional
58 58 By default this is set to ``subprocess.PIPE``, but you can also pass the
59 59 value ``subprocess.STDOUT`` to force the subprocess' stderr to go into
60 60 the same file descriptor as its stdout. This is useful to read stdout
61 61 and stderr combined in the order they are generated.
62 62
63 63 Returns
64 64 -------
65 65 The return value of the provided callback is returned.
66 66 """
67 67 sys.stdout.flush()
68 68 sys.stderr.flush()
69 69 # On win32, close_fds can't be true when using pipes for stdin/out/err
70 70 close_fds = sys.platform != 'win32'
71 71 p = subprocess.Popen(cmd, shell=True,
72 72 stdin=subprocess.PIPE,
73 73 stdout=subprocess.PIPE,
74 74 stderr=stderr,
75 75 close_fds=close_fds)
76 76
77 77 try:
78 78 out = callback(p)
79 79 except KeyboardInterrupt:
80 80 print('^C')
81 81 sys.stdout.flush()
82 82 sys.stderr.flush()
83 83 out = None
84 84 finally:
85 85 # Make really sure that we don't leave processes behind, in case the
86 86 # call above raises an exception
87 87 # We start by assuming the subprocess finished (to avoid NameErrors
88 88 # later depending on the path taken)
89 89 if p.returncode is None:
90 90 try:
91 91 p.terminate()
92 92 p.poll()
93 93 except OSError:
94 94 pass
95 95 # One last try on our way out
96 96 if p.returncode is None:
97 97 try:
98 98 p.kill()
99 99 except OSError:
100 100 pass
101 101
102 102 return out
103 103
104 104
105 105 def getoutput(cmd):
106 106 """Return standard output of executing cmd in a shell.
107 107
108 108 Accepts the same arguments as os.system().
109 109
110 110 Parameters
111 111 ----------
112 112 cmd : str
113 113 A command to be executed in the system shell.
114 114
115 115 Returns
116 116 -------
117 117 stdout : str
118 118 """
119 119
120 120 out = process_handler(cmd, lambda p: p.communicate()[0], subprocess.STDOUT)
121 121 if out is None:
122 122 return ''
123 123 return py3compat.bytes_to_str(out)
124 124
125 125
126 126 def getoutputerror(cmd):
127 127 """Return (standard output, standard error) of executing cmd in a shell.
128 128
129 129 Accepts the same arguments as os.system().
130 130
131 131 Parameters
132 132 ----------
133 133 cmd : str
134 134 A command to be executed in the system shell.
135 135
136 136 Returns
137 137 -------
138 138 stdout : str
139 139 stderr : str
140 140 """
141 141
142 142 out_err = process_handler(cmd, lambda p: p.communicate())
143 143 if out_err is None:
144 144 return '', ''
145 145 out, err = out_err
146 146 return py3compat.bytes_to_str(out), py3compat.bytes_to_str(err)
147 147
148 148
149 149 def arg_split(s, posix=False, strict=True):
150 150 """Split a command line's arguments in a shell-like manner.
151 151
152 152 This is a modified version of the standard library's shlex.split()
153 153 function, but with a default of posix=False for splitting, so that quotes
154 154 in inputs are respected.
155 155
156 156 if strict=False, then any errors shlex.split would raise will result in the
157 157 unparsed remainder being the last element of the list, rather than raising.
158 158 This is because we sometimes use arg_split to parse things other than
159 159 command-line args.
160 160 """
161 161
162 162 # Unfortunately, python's shlex module is buggy with unicode input:
163 163 # http://bugs.python.org/issue1170
164 164 # At least encoding the input when it's unicode seems to help, but there
165 165 # may be more problems lurking. Apparently this is fixed in python3.
166 166 is_unicode = False
167 167 if (not py3compat.PY3) and isinstance(s, unicode):
168 168 is_unicode = True
169 169 s = s.encode('utf-8')
170 170 lex = shlex.shlex(s, posix=posix)
171 171 lex.whitespace_split = True
172 172 # Extract tokens, ensuring that things like leaving open quotes
173 173 # does not cause this to raise. This is important, because we
174 174 # sometimes pass Python source through this (e.g. %timeit f(" ")),
175 175 # and it shouldn't raise an exception.
176 176 # It may be a bad idea to parse things that are not command-line args
177 177 # through this function, but we do, so let's be safe about it.
178 178 lex.commenters='' #fix for GH-1269
179 179 tokens = []
180 180 while True:
181 181 try:
182 tokens.append(lex.next())
182 tokens.append(next(lex))
183 183 except StopIteration:
184 184 break
185 185 except ValueError:
186 186 if strict:
187 187 raise
188 188 # couldn't parse, get remaining blob as last token
189 189 tokens.append(lex.token)
190 190 break
191 191
192 192 if is_unicode:
193 193 # Convert the tokens back to unicode.
194 194 tokens = [x.decode('utf-8') for x in tokens]
195 195 return tokens
@@ -1,91 +1,95 b''
1 1 import sys
2 2 import time
3 3 from io import StringIO
4 4
5 5 from session import extract_header, Message
6 6
7 7 from IPython.utils import io, text, encoding
8 from IPython.utils import py3compat
8 9
9 10 #-----------------------------------------------------------------------------
10 11 # Globals
11 12 #-----------------------------------------------------------------------------
12 13
13 14 #-----------------------------------------------------------------------------
14 15 # Stream classes
15 16 #-----------------------------------------------------------------------------
16 17
17 18 class OutStream(object):
18 19 """A file like object that publishes the stream to a 0MQ PUB socket."""
19 20
20 21 # The time interval between automatic flushes, in seconds.
21 22 flush_interval = 0.05
22 23 topic=None
23 24
24 25 def __init__(self, session, pub_socket, name):
25 26 self.session = session
26 27 self.pub_socket = pub_socket
27 28 self.name = name
28 29 self.parent_header = {}
29 30 self._new_buffer()
30 31
31 32 def set_parent(self, parent):
32 33 self.parent_header = extract_header(parent)
33 34
34 35 def close(self):
35 36 self.pub_socket = None
36 37
37 38 def flush(self):
38 39 #io.rprint('>>>flushing output buffer: %s<<<' % self.name) # dbg
39 40 if self.pub_socket is None:
40 41 raise ValueError(u'I/O operation on closed file')
41 42 else:
42 43 data = self._buffer.getvalue()
43 44 if data:
44 45 content = {u'name':self.name, u'data':data}
45 46 msg = self.session.send(self.pub_socket, u'stream', content=content,
46 47 parent=self.parent_header, ident=self.topic)
47 48
48 49 if hasattr(self.pub_socket, 'flush'):
49 50 # socket itself has flush (presumably ZMQStream)
50 51 self.pub_socket.flush()
51 52 self._buffer.close()
52 53 self._new_buffer()
53 54
54 55 def isatty(self):
55 56 return False
56 57
57 def next(self):
58 def __next__(self):
58 59 raise IOError('Read not supported on a write only stream.')
59 60
61 if not py3compat.PY3:
62 next = __next__
63
60 64 def read(self, size=-1):
61 65 raise IOError('Read not supported on a write only stream.')
62 66
63 67 def readline(self, size=-1):
64 68 raise IOError('Read not supported on a write only stream.')
65 69
66 70 def write(self, string):
67 71 if self.pub_socket is None:
68 72 raise ValueError('I/O operation on closed file')
69 73 else:
70 74 # Make sure that we're handling unicode
71 75 if not isinstance(string, unicode):
72 76 enc = encoding.DEFAULT_ENCODING
73 77 string = string.decode(enc, 'replace')
74 78
75 79 self._buffer.write(string)
76 80 current_time = time.time()
77 81 if self._start <= 0:
78 82 self._start = current_time
79 83 elif current_time - self._start > self.flush_interval:
80 84 self.flush()
81 85
82 86 def writelines(self, sequence):
83 87 if self.pub_socket is None:
84 88 raise ValueError('I/O operation on closed file')
85 89 else:
86 90 for string in sequence:
87 91 self.write(string)
88 92
89 93 def _new_buffer(self):
90 94 self._buffer = StringIO()
91 95 self._start = -1
@@ -1,298 +1,299 b''
1 1 #!/usr/bin/env python
2 2 # -*- coding: utf-8 -*-
3 3 """Setup script for IPython.
4 4
5 5 Under Posix environments it works like a typical setup.py script.
6 6 Under Windows, the command sdist is not supported, since IPython
7 7 requires utilities which are not available under Windows."""
8 8
9 9 #-----------------------------------------------------------------------------
10 10 # Copyright (c) 2008-2011, IPython Development Team.
11 11 # Copyright (c) 2001-2007, Fernando Perez <fernando.perez@colorado.edu>
12 12 # Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
13 13 # Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
14 14 #
15 15 # Distributed under the terms of the Modified BSD License.
16 16 #
17 17 # The full license is in the file COPYING.txt, distributed with this software.
18 18 #-----------------------------------------------------------------------------
19 19
20 20 #-----------------------------------------------------------------------------
21 21 # Minimal Python version sanity check
22 22 #-----------------------------------------------------------------------------
23 23 from __future__ import print_function
24 24
25 25 import sys
26 26
27 27 # This check is also made in IPython/__init__, don't forget to update both when
28 28 # changing Python version requirements.
29 29 #~ if sys.version[0:3] < '2.6':
30 30 #~ error = """\
31 31 #~ ERROR: 'IPython requires Python Version 2.6 or above.'
32 32 #~ Exiting."""
33 33 #~ print >> sys.stderr, error
34 34 #~ sys.exit(1)
35 35
36 36 PY3 = (sys.version_info[0] >= 3)
37 37
38 38 # At least we're on the python version we need, move on.
39 39
40 40 #-------------------------------------------------------------------------------
41 41 # Imports
42 42 #-------------------------------------------------------------------------------
43 43
44 44 # Stdlib imports
45 45 import os
46 46 import shutil
47 47
48 48 from glob import glob
49 49
50 50 # BEFORE importing distutils, remove MANIFEST. distutils doesn't properly
51 51 # update it when the contents of directories change.
52 52 if os.path.exists('MANIFEST'): os.remove('MANIFEST')
53 53
54 54 from distutils.core import setup
55 55
56 56 # On Python 3, we need distribute (new setuptools) to do the 2to3 conversion
57 57 if PY3:
58 58 import setuptools
59 59
60 60 # Our own imports
61 61 from setupbase import target_update
62 62
63 63 from setupbase import (
64 64 setup_args,
65 65 find_packages,
66 66 find_package_data,
67 67 find_scripts,
68 68 find_data_files,
69 69 check_for_dependencies,
70 70 record_commit_info,
71 71 )
72 72 from setupext import setupext
73 73
74 74 isfile = os.path.isfile
75 75 pjoin = os.path.join
76 76
77 77 #-----------------------------------------------------------------------------
78 78 # Function definitions
79 79 #-----------------------------------------------------------------------------
80 80
81 81 def cleanup():
82 82 """Clean up the junk left around by the build process"""
83 83 if "develop" not in sys.argv:
84 84 try:
85 85 shutil.rmtree('ipython.egg-info')
86 86 except:
87 87 try:
88 88 os.unlink('ipython.egg-info')
89 89 except:
90 90 pass
91 91
92 92 #-------------------------------------------------------------------------------
93 93 # Handle OS specific things
94 94 #-------------------------------------------------------------------------------
95 95
96 96 if os.name == 'posix':
97 97 os_name = 'posix'
98 98 elif os.name in ['nt','dos']:
99 99 os_name = 'windows'
100 100 else:
101 101 print('Unsupported operating system:',os.name)
102 102 sys.exit(1)
103 103
104 104 # Under Windows, 'sdist' has not been supported. Now that the docs build with
105 105 # Sphinx it might work, but let's not turn it on until someone confirms that it
106 106 # actually works.
107 107 if os_name == 'windows' and 'sdist' in sys.argv:
108 108 print('The sdist command is not available under Windows. Exiting.')
109 109 sys.exit(1)
110 110
111 111 #-------------------------------------------------------------------------------
112 112 # Things related to the IPython documentation
113 113 #-------------------------------------------------------------------------------
114 114
115 115 # update the manuals when building a source dist
116 116 if len(sys.argv) >= 2 and sys.argv[1] in ('sdist','bdist_rpm'):
117 117 import textwrap
118 118
119 119 # List of things to be updated. Each entry is a triplet of args for
120 120 # target_update()
121 121 to_update = [
122 122 # FIXME - Disabled for now: we need to redo an automatic way
123 123 # of generating the magic info inside the rst.
124 124 #('docs/magic.tex',
125 125 #['IPython/Magic.py'],
126 126 #"cd doc && ./update_magic.sh" ),
127 127
128 128 ('docs/man/ipcluster.1.gz',
129 129 ['docs/man/ipcluster.1'],
130 130 'cd docs/man && gzip -9c ipcluster.1 > ipcluster.1.gz'),
131 131
132 132 ('docs/man/ipcontroller.1.gz',
133 133 ['docs/man/ipcontroller.1'],
134 134 'cd docs/man && gzip -9c ipcontroller.1 > ipcontroller.1.gz'),
135 135
136 136 ('docs/man/ipengine.1.gz',
137 137 ['docs/man/ipengine.1'],
138 138 'cd docs/man && gzip -9c ipengine.1 > ipengine.1.gz'),
139 139
140 140 ('docs/man/iplogger.1.gz',
141 141 ['docs/man/iplogger.1'],
142 142 'cd docs/man && gzip -9c iplogger.1 > iplogger.1.gz'),
143 143
144 144 ('docs/man/ipython.1.gz',
145 145 ['docs/man/ipython.1'],
146 146 'cd docs/man && gzip -9c ipython.1 > ipython.1.gz'),
147 147
148 148 ('docs/man/irunner.1.gz',
149 149 ['docs/man/irunner.1'],
150 150 'cd docs/man && gzip -9c irunner.1 > irunner.1.gz'),
151 151
152 152 ('docs/man/pycolor.1.gz',
153 153 ['docs/man/pycolor.1'],
154 154 'cd docs/man && gzip -9c pycolor.1 > pycolor.1.gz'),
155 155 ]
156 156
157 157
158 158 [ target_update(*t) for t in to_update ]
159 159
160 160 #---------------------------------------------------------------------------
161 161 # Find all the packages, package data, and data_files
162 162 #---------------------------------------------------------------------------
163 163
164 164 packages = find_packages()
165 165 package_data = find_package_data()
166 166 data_files = find_data_files()
167 167
168 168 setup_args['packages'] = packages
169 169 setup_args['package_data'] = package_data
170 170 setup_args['data_files'] = data_files
171 171
172 172 #---------------------------------------------------------------------------
173 173 # custom distutils commands
174 174 #---------------------------------------------------------------------------
175 175 # imports here, so they are after setuptools import if there was one
176 176 from distutils.command.sdist import sdist
177 177 from distutils.command.upload import upload
178 178
179 179 class UploadWindowsInstallers(upload):
180 180
181 181 description = "Upload Windows installers to PyPI (only used from tools/release_windows.py)"
182 182 user_options = upload.user_options + [
183 183 ('files=', 'f', 'exe file (or glob) to upload')
184 184 ]
185 185 def initialize_options(self):
186 186 upload.initialize_options(self)
187 187 meta = self.distribution.metadata
188 188 base = '{name}-{version}'.format(
189 189 name=meta.get_name(),
190 190 version=meta.get_version()
191 191 )
192 192 self.files = os.path.join('dist', '%s.*.exe' % base)
193 193
194 194 def run(self):
195 195 for dist_file in glob(self.files):
196 196 self.upload_file('bdist_wininst', 'any', dist_file)
197 197
198 198 setup_args['cmdclass'] = {
199 199 'build_py': record_commit_info('IPython'),
200 200 'sdist' : record_commit_info('IPython', sdist),
201 201 'upload_wininst' : UploadWindowsInstallers,
202 202 }
203 203
204 204 #---------------------------------------------------------------------------
205 205 # Handle scripts, dependencies, and setuptools specific things
206 206 #---------------------------------------------------------------------------
207 207
208 208 # For some commands, use setuptools. Note that we do NOT list install here!
209 209 # If you want a setuptools-enhanced install, just run 'setupegg.py install'
210 210 needs_setuptools = set(('develop', 'release', 'bdist_egg', 'bdist_rpm',
211 211 'bdist', 'bdist_dumb', 'bdist_wininst', 'install_egg_info',
212 212 'egg_info', 'easy_install', 'upload',
213 213 ))
214 214 if sys.platform == 'win32':
215 215 # Depend on setuptools for install on *Windows only*
216 216 # If we get script-installation working without setuptools,
217 217 # then we can back off, but until then use it.
218 218 # See Issue #369 on GitHub for more
219 219 needs_setuptools.add('install')
220 220
221 221 if len(needs_setuptools.intersection(sys.argv)) > 0:
222 222 import setuptools
223 223
224 224 # This dict is used for passing extra arguments that are setuptools
225 225 # specific to setup
226 226 setuptools_extra_args = {}
227 227
228 228 if 'setuptools' in sys.modules:
229 229 setuptools_extra_args['zip_safe'] = False
230 230 setuptools_extra_args['entry_points'] = find_scripts(True)
231 231 setup_args['extras_require'] = dict(
232 232 parallel = 'pyzmq>=2.1.4',
233 233 zmq = 'pyzmq>=2.1.4',
234 234 doc = 'Sphinx>=0.3',
235 235 test = 'nose>=0.10.1',
236 236 notebook = 'tornado>=2.0'
237 237 )
238 238 requires = setup_args.setdefault('install_requires', [])
239 239 setupext.display_status = False
240 240 if not setupext.check_for_readline():
241 241 if sys.platform == 'darwin':
242 242 requires.append('readline')
243 243 elif sys.platform.startswith('win'):
244 244 # Pyreadline 64 bit windows issue solved in versions >=1.7.1
245 245 # Also solves issues with some older versions of pyreadline that
246 246 # satisfy the unconstrained depdendency.
247 247 requires.append('pyreadline>=1.7.1')
248 248 else:
249 249 pass
250 250 # do we want to install readline here?
251 251
252 252 # Script to be run by the windows binary installer after the default setup
253 253 # routine, to add shortcuts and similar windows-only things. Windows
254 254 # post-install scripts MUST reside in the scripts/ dir, otherwise distutils
255 255 # doesn't find them.
256 256 if 'bdist_wininst' in sys.argv:
257 257 if len(sys.argv) > 2 and \
258 258 ('sdist' in sys.argv or 'bdist_rpm' in sys.argv):
259 259 print >> sys.stderr, "ERROR: bdist_wininst must be run alone. Exiting."
260 260 sys.exit(1)
261 261 setup_args['scripts'] = [pjoin('scripts','ipython_win_post_install.py')]
262 262 setup_args['options'] = {"bdist_wininst":
263 263 {"install_script":
264 264 "ipython_win_post_install.py"}}
265 265
266 266 if PY3:
267 267 setuptools_extra_args['use_2to3'] = True
268 268 # we try to make a 2.6, 2.7, and 3.1 to 3.3 python compatible code
269 269 # so we explicitly disable some 2to3 fixes to be sure we aren't forgetting
270 270 # anything.
271 271 setuptools_extra_args['use_2to3_exclude_fixers'] = [
272 272 'lib2to3.fixes.fix_except',
273 273 'lib2to3.fixes.fix_apply',
274 274 'lib2to3.fixes.fix_repr',
275 'lib2to3.fixes.fix_next',
275 276 ]
276 277 from setuptools.command.build_py import build_py
277 278 setup_args['cmdclass'] = {'build_py': record_commit_info('IPython', build_cmd=build_py)}
278 279 setuptools_extra_args['entry_points'] = find_scripts(True, suffix='3')
279 280 setuptools._dont_write_bytecode = True
280 281 else:
281 282 # If we are running without setuptools, call this function which will
282 283 # check for dependencies an inform the user what is needed. This is
283 284 # just to make life easy for users.
284 285 check_for_dependencies()
285 286 setup_args['scripts'] = find_scripts(False)
286 287
287 288 #---------------------------------------------------------------------------
288 289 # Do the actual setup now
289 290 #---------------------------------------------------------------------------
290 291
291 292 setup_args.update(setuptools_extra_args)
292 293
293 294 def main():
294 295 setup(**setup_args)
295 296 cleanup()
296 297
297 298 if __name__ == '__main__':
298 299 main()
General Comments 0
You need to be logged in to leave comments. Login now