##// END OF EJS Templates
Merge pull request #12893 from Carreau/doc-reformat...
Carol Willing -
r26455:e0d15ef6 merge
parent child Browse files
Show More
@@ -1,212 +1,210 b''
1 1 """Common utilities for the various process_* implementations.
2 2
3 3 This file is only meant to be imported by the platform-specific implementations
4 4 of subprocess utilities, and it contains tools that are common to all of them.
5 5 """
6 6
7 7 #-----------------------------------------------------------------------------
8 8 # Copyright (C) 2010-2011 The IPython Development Team
9 9 #
10 10 # Distributed under the terms of the BSD License. The full license is in
11 11 # the file COPYING, distributed as part of this software.
12 12 #-----------------------------------------------------------------------------
13 13
14 14 #-----------------------------------------------------------------------------
15 15 # Imports
16 16 #-----------------------------------------------------------------------------
17 17 import subprocess
18 18 import shlex
19 19 import sys
20 20 import os
21 21
22 22 from IPython.utils import py3compat
23 23
24 24 #-----------------------------------------------------------------------------
25 25 # Function definitions
26 26 #-----------------------------------------------------------------------------
27 27
28 28 def read_no_interrupt(p):
29 29 """Read from a pipe ignoring EINTR errors.
30 30
31 31 This is necessary because when reading from pipes with GUI event loops
32 32 running in the background, often interrupts are raised that stop the
33 33 command from completing."""
34 34 import errno
35 35
36 36 try:
37 37 return p.read()
38 38 except IOError as err:
39 39 if err.errno != errno.EINTR:
40 40 raise
41 41
42 42
43 43 def process_handler(cmd, callback, stderr=subprocess.PIPE):
44 44 """Open a command in a shell subprocess and execute a callback.
45 45
46 46 This function provides common scaffolding for creating subprocess.Popen()
47 47 calls. It creates a Popen object and then calls the callback with it.
48 48
49 49 Parameters
50 50 ----------
51 51 cmd : str or list
52 52 A command to be executed by the system, using :class:`subprocess.Popen`.
53 53 If a string is passed, it will be run in the system shell. If a list is
54 54 passed, it will be used directly as arguments.
55
56 55 callback : callable
57 56 A one-argument function that will be called with the Popen object.
58
59 57 stderr : file descriptor number, optional
60 58 By default this is set to ``subprocess.PIPE``, but you can also pass the
61 59 value ``subprocess.STDOUT`` to force the subprocess' stderr to go into
62 60 the same file descriptor as its stdout. This is useful to read stdout
63 61 and stderr combined in the order they are generated.
64 62
65 63 Returns
66 64 -------
67 65 The return value of the provided callback is returned.
68 66 """
69 67 sys.stdout.flush()
70 68 sys.stderr.flush()
71 69 # On win32, close_fds can't be true when using pipes for stdin/out/err
72 70 close_fds = sys.platform != 'win32'
73 71 # Determine if cmd should be run with system shell.
74 72 shell = isinstance(cmd, str)
75 73 # On POSIX systems run shell commands with user-preferred shell.
76 74 executable = None
77 75 if shell and os.name == 'posix' and 'SHELL' in os.environ:
78 76 executable = os.environ['SHELL']
79 77 p = subprocess.Popen(cmd, shell=shell,
80 78 executable=executable,
81 79 stdin=subprocess.PIPE,
82 80 stdout=subprocess.PIPE,
83 81 stderr=stderr,
84 82 close_fds=close_fds)
85 83
86 84 try:
87 85 out = callback(p)
88 86 except KeyboardInterrupt:
89 87 print('^C')
90 88 sys.stdout.flush()
91 89 sys.stderr.flush()
92 90 out = None
93 91 finally:
94 92 # Make really sure that we don't leave processes behind, in case the
95 93 # call above raises an exception
96 94 # We start by assuming the subprocess finished (to avoid NameErrors
97 95 # later depending on the path taken)
98 96 if p.returncode is None:
99 97 try:
100 98 p.terminate()
101 99 p.poll()
102 100 except OSError:
103 101 pass
104 102 # One last try on our way out
105 103 if p.returncode is None:
106 104 try:
107 105 p.kill()
108 106 except OSError:
109 107 pass
110 108
111 109 return out
112 110
113 111
114 112 def getoutput(cmd):
115 113 """Run a command and return its stdout/stderr as a string.
116 114
117 115 Parameters
118 116 ----------
119 117 cmd : str or list
120 118 A command to be executed in the system shell.
121 119
122 120 Returns
123 121 -------
124 122 output : str
125 123 A string containing the combination of stdout and stderr from the
126 124 subprocess, in whatever order the subprocess originally wrote to its
127 125 file descriptors (so the order of the information in this string is the
128 126 correct order as would be seen if running the command in a terminal).
129 127 """
130 128 out = process_handler(cmd, lambda p: p.communicate()[0], subprocess.STDOUT)
131 129 if out is None:
132 130 return ''
133 131 return py3compat.decode(out)
134 132
135 133
136 134 def getoutputerror(cmd):
137 135 """Return (standard output, standard error) of executing cmd in a shell.
138 136
139 137 Accepts the same arguments as os.system().
140 138
141 139 Parameters
142 140 ----------
143 141 cmd : str or list
144 142 A command to be executed in the system shell.
145 143
146 144 Returns
147 145 -------
148 146 stdout : str
149 147 stderr : str
150 148 """
151 149 return get_output_error_code(cmd)[:2]
152 150
153 151 def get_output_error_code(cmd):
154 152 """Return (standard output, standard error, return code) of executing cmd
155 153 in a shell.
156 154
157 155 Accepts the same arguments as os.system().
158 156
159 157 Parameters
160 158 ----------
161 159 cmd : str or list
162 160 A command to be executed in the system shell.
163 161
164 162 Returns
165 163 -------
166 164 stdout : str
167 165 stderr : str
168 166 returncode: int
169 167 """
170 168
171 169 out_err, p = process_handler(cmd, lambda p: (p.communicate(), p))
172 170 if out_err is None:
173 171 return '', '', p.returncode
174 172 out, err = out_err
175 173 return py3compat.decode(out), py3compat.decode(err), p.returncode
176 174
177 175 def arg_split(s, posix=False, strict=True):
178 176 """Split a command line's arguments in a shell-like manner.
179 177
180 178 This is a modified version of the standard library's shlex.split()
181 179 function, but with a default of posix=False for splitting, so that quotes
182 180 in inputs are respected.
183 181
184 182 if strict=False, then any errors shlex.split would raise will result in the
185 183 unparsed remainder being the last element of the list, rather than raising.
186 184 This is because we sometimes use arg_split to parse things other than
187 185 command-line args.
188 186 """
189 187
190 188 lex = shlex.shlex(s, posix=posix)
191 189 lex.whitespace_split = True
192 190 # Extract tokens, ensuring that things like leaving open quotes
193 191 # does not cause this to raise. This is important, because we
194 192 # sometimes pass Python source through this (e.g. %timeit f(" ")),
195 193 # and it shouldn't raise an exception.
196 194 # It may be a bad idea to parse things that are not command-line args
197 195 # through this function, but we do, so let's be safe about it.
198 196 lex.commenters='' #fix for GH-1269
199 197 tokens = []
200 198 while True:
201 199 try:
202 200 tokens.append(next(lex))
203 201 except StopIteration:
204 202 break
205 203 except ValueError:
206 204 if strict:
207 205 raise
208 206 # couldn't parse, get remaining blob as last token
209 207 tokens.append(lex.token)
210 208 break
211 209
212 210 return tokens
1 NO CONTENT: modified file
1 NO CONTENT: modified file
1 NO CONTENT: modified file
1 NO CONTENT: modified file
1 NO CONTENT: modified file
@@ -1,94 +1,92 b''
1 1 # encoding: utf-8
2 2 """
3 3 Utilities for working with stack frames.
4 4 """
5 5
6 6 #-----------------------------------------------------------------------------
7 7 # Copyright (C) 2008-2011 The IPython Development Team
8 8 #
9 9 # Distributed under the terms of the BSD License. The full license is in
10 10 # the file COPYING, distributed as part of this software.
11 11 #-----------------------------------------------------------------------------
12 12
13 13 #-----------------------------------------------------------------------------
14 14 # Imports
15 15 #-----------------------------------------------------------------------------
16 16
17 17 import sys
18 18
19 19 #-----------------------------------------------------------------------------
20 20 # Code
21 21 #-----------------------------------------------------------------------------
22 22
23 23 def extract_vars(*names,**kw):
24 24 """Extract a set of variables by name from another frame.
25 25
26 26 Parameters
27 27 ----------
28 28 *names : str
29 29 One or more variable names which will be extracted from the caller's
30 30 frame.
31
32 depth : integer, optional
31 **kw : integer, optional
33 32 How many frames in the stack to walk when looking for your variables.
34 33 The default is 0, which will use the frame where the call was made.
35 34
36
37 35 Examples
38 36 --------
39 37 ::
40 38
41 39 In [2]: def func(x):
42 40 ...: y = 1
43 41 ...: print(sorted(extract_vars('x','y').items()))
44 42 ...:
45 43
46 44 In [3]: func('hello')
47 45 [('x', 'hello'), ('y', 1)]
48 46 """
49 47
50 48 depth = kw.get('depth',0)
51 49
52 50 callerNS = sys._getframe(depth+1).f_locals
53 51 return dict((k,callerNS[k]) for k in names)
54 52
55 53
56 54 def extract_vars_above(*names):
57 55 """Extract a set of variables by name from another frame.
58 56
59 57 Similar to extractVars(), but with a specified depth of 1, so that names
60 58 are extracted exactly from above the caller.
61 59
62 60 This is simply a convenience function so that the very common case (for us)
63 61 of skipping exactly 1 frame doesn't have to construct a special dict for
64 62 keyword passing."""
65 63
66 64 callerNS = sys._getframe(2).f_locals
67 65 return dict((k,callerNS[k]) for k in names)
68 66
69 67
70 68 def debugx(expr,pre_msg=''):
71 69 """Print the value of an expression from the caller's frame.
72 70
73 71 Takes an expression, evaluates it in the caller's frame and prints both
74 72 the given expression and the resulting value (as well as a debug mark
75 73 indicating the name of the calling function. The input must be of a form
76 74 suitable for eval().
77 75
78 76 An optional message can be passed, which will be prepended to the printed
79 77 expr->value pair."""
80 78
81 79 cf = sys._getframe(1)
82 80 print('[DBG:%s] %s%s -> %r' % (cf.f_code.co_name,pre_msg,expr,
83 81 eval(expr,cf.f_globals,cf.f_locals)))
84 82
85 83
86 84 # deactivate it by uncommenting the following line, which makes it a no-op
87 85 #def debugx(expr,pre_msg=''): pass
88 86
89 87 def extract_module_locals(depth=0):
90 88 """Returns (module, locals) of the function `depth` frames away from the caller"""
91 89 f = sys._getframe(depth + 1)
92 90 global_ns = f.f_globals
93 91 module = sys.modules[global_ns['__name__']]
94 92 return (module, f.f_locals)
@@ -1,30 +1,29 b''
1 1 # encoding: utf-8
2 2 """Generic functions for extending IPython.
3 3 """
4 4
5 5 from IPython.core.error import TryNext
6 6 from functools import singledispatch
7 7
8 8
9 9 @singledispatch
10 10 def inspect_object(obj):
11 11 """Called when you do obj?"""
12 12 raise TryNext
13 13
14 14
15 15 @singledispatch
16 16 def complete_object(obj, prev_completions):
17 17 """Custom completer dispatching for python objects.
18 18
19 19 Parameters
20 20 ----------
21 21 obj : object
22 22 The object to complete.
23 23 prev_completions : list
24 24 List of attributes discovered so far.
25
26 25 This should return the list of attributes in obj. If you only wish to
27 26 add to the attributes already discovered normally, return
28 27 own_attrs + prev_completions.
29 28 """
30 29 raise TryNext
1 NO CONTENT: modified file
@@ -1,249 +1,246 b''
1 1 # encoding: utf-8
2 2 """
3 3 IO related utilities.
4 4 """
5 5
6 6 # Copyright (c) IPython Development Team.
7 7 # Distributed under the terms of the Modified BSD License.
8 8
9 9
10 10
11 11 import atexit
12 12 import os
13 13 import sys
14 14 import tempfile
15 15 import warnings
16 16 from pathlib import Path
17 17 from warnings import warn
18 18
19 19 from IPython.utils.decorators import undoc
20 20 from .capture import CapturedIO, capture_output
21 21
22 22 @undoc
23 23 class IOStream:
24 24
25 25 def __init__(self, stream, fallback=None):
26 26 warn('IOStream is deprecated since IPython 5.0, use sys.{stdin,stdout,stderr} instead',
27 27 DeprecationWarning, stacklevel=2)
28 28 if not hasattr(stream,'write') or not hasattr(stream,'flush'):
29 29 if fallback is not None:
30 30 stream = fallback
31 31 else:
32 32 raise ValueError("fallback required, but not specified")
33 33 self.stream = stream
34 34 self._swrite = stream.write
35 35
36 36 # clone all methods not overridden:
37 37 def clone(meth):
38 38 return not hasattr(self, meth) and not meth.startswith('_')
39 39 for meth in filter(clone, dir(stream)):
40 40 try:
41 41 val = getattr(stream, meth)
42 42 except AttributeError:
43 43 pass
44 44 else:
45 45 setattr(self, meth, val)
46 46
47 47 def __repr__(self):
48 48 cls = self.__class__
49 49 tpl = '{mod}.{cls}({args})'
50 50 return tpl.format(mod=cls.__module__, cls=cls.__name__, args=self.stream)
51 51
52 52 def write(self,data):
53 53 warn('IOStream is deprecated since IPython 5.0, use sys.{stdin,stdout,stderr} instead',
54 54 DeprecationWarning, stacklevel=2)
55 55 try:
56 56 self._swrite(data)
57 57 except:
58 58 try:
59 59 # print handles some unicode issues which may trip a plain
60 60 # write() call. Emulate write() by using an empty end
61 61 # argument.
62 62 print(data, end='', file=self.stream)
63 63 except:
64 64 # if we get here, something is seriously broken.
65 65 print('ERROR - failed to write data to stream:', self.stream,
66 66 file=sys.stderr)
67 67
68 68 def writelines(self, lines):
69 69 warn('IOStream is deprecated since IPython 5.0, use sys.{stdin,stdout,stderr} instead',
70 70 DeprecationWarning, stacklevel=2)
71 71 if isinstance(lines, str):
72 72 lines = [lines]
73 73 for line in lines:
74 74 self.write(line)
75 75
76 76 # This class used to have a writeln method, but regular files and streams
77 77 # in Python don't have this method. We need to keep this completely
78 78 # compatible so we removed it.
79 79
80 80 @property
81 81 def closed(self):
82 82 return self.stream.closed
83 83
84 84 def close(self):
85 85 pass
86 86
87 87 # setup stdin/stdout/stderr to sys.stdin/sys.stdout/sys.stderr
88 88 devnull = open(os.devnull, 'w')
89 89 atexit.register(devnull.close)
90 90
91 91 # io.std* are deprecated, but don't show our own deprecation warnings
92 92 # during initialization of the deprecated API.
93 93 with warnings.catch_warnings():
94 94 warnings.simplefilter('ignore', DeprecationWarning)
95 95 stdin = IOStream(sys.stdin, fallback=devnull)
96 96 stdout = IOStream(sys.stdout, fallback=devnull)
97 97 stderr = IOStream(sys.stderr, fallback=devnull)
98 98
99 99 class Tee(object):
100 100 """A class to duplicate an output stream to stdout/err.
101 101
102 102 This works in a manner very similar to the Unix 'tee' command.
103 103
104 104 When the object is closed or deleted, it closes the original file given to
105 105 it for duplication.
106 106 """
107 107 # Inspired by:
108 108 # http://mail.python.org/pipermail/python-list/2007-May/442737.html
109 109
110 110 def __init__(self, file_or_name, mode="w", channel='stdout'):
111 111 """Construct a new Tee object.
112 112
113 113 Parameters
114 114 ----------
115 115 file_or_name : filename or open filehandle (writable)
116 116 File that will be duplicated
117
118 117 mode : optional, valid mode for open().
119 118 If a filename was give, open with this mode.
120
121 119 channel : str, one of ['stdout', 'stderr']
122 120 """
123 121 if channel not in ['stdout', 'stderr']:
124 122 raise ValueError('Invalid channel spec %s' % channel)
125 123
126 124 if hasattr(file_or_name, 'write') and hasattr(file_or_name, 'seek'):
127 125 self.file = file_or_name
128 126 else:
129 127 self.file = open(file_or_name, mode)
130 128 self.channel = channel
131 129 self.ostream = getattr(sys, channel)
132 130 setattr(sys, channel, self)
133 131 self._closed = False
134 132
135 133 def close(self):
136 134 """Close the file and restore the channel."""
137 135 self.flush()
138 136 setattr(sys, self.channel, self.ostream)
139 137 self.file.close()
140 138 self._closed = True
141 139
142 140 def write(self, data):
143 141 """Write data to both channels."""
144 142 self.file.write(data)
145 143 self.ostream.write(data)
146 144 self.ostream.flush()
147 145
148 146 def flush(self):
149 147 """Flush both channels."""
150 148 self.file.flush()
151 149 self.ostream.flush()
152 150
153 151 def __del__(self):
154 152 if not self._closed:
155 153 self.close()
156 154
157 155
158 156 def ask_yes_no(prompt, default=None, interrupt=None):
159 157 """Asks a question and returns a boolean (y/n) answer.
160 158
161 159 If default is given (one of 'y','n'), it is used if the user input is
162 160 empty. If interrupt is given (one of 'y','n'), it is used if the user
163 161 presses Ctrl-C. Otherwise the question is repeated until an answer is
164 162 given.
165 163
166 164 An EOF is treated as the default answer. If there is no default, an
167 165 exception is raised to prevent infinite loops.
168 166
169 167 Valid answers are: y/yes/n/no (match is not case sensitive)."""
170 168
171 169 answers = {'y':True,'n':False,'yes':True,'no':False}
172 170 ans = None
173 171 while ans not in answers.keys():
174 172 try:
175 173 ans = input(prompt+' ').lower()
176 174 if not ans: # response was an empty string
177 175 ans = default
178 176 except KeyboardInterrupt:
179 177 if interrupt:
180 178 ans = interrupt
181 179 print("\r")
182 180 except EOFError:
183 181 if default in answers.keys():
184 182 ans = default
185 183 print()
186 184 else:
187 185 raise
188 186
189 187 return answers[ans]
190 188
191 189
192 190 def temp_pyfile(src, ext='.py'):
193 191 """Make a temporary python file, return filename and filehandle.
194 192
195 193 Parameters
196 194 ----------
197 195 src : string or list of strings (no need for ending newlines if list)
198 196 Source code to be written to the file.
199
200 197 ext : optional, string
201 198 Extension for the generated file.
202 199
203 200 Returns
204 201 -------
205 202 (filename, open filehandle)
206 203 It is the caller's responsibility to close the open file and unlink it.
207 204 """
208 205 fname = tempfile.mkstemp(ext)[1]
209 206 with open(Path(fname), "w") as f:
210 207 f.write(src)
211 208 f.flush()
212 209 return fname
213 210
214 211 @undoc
215 212 def atomic_writing(*args, **kwargs):
216 213 """DEPRECATED: moved to notebook.services.contents.fileio"""
217 214 warn("IPython.utils.io.atomic_writing has moved to notebook.services.contents.fileio since IPython 4.0", DeprecationWarning, stacklevel=2)
218 215 from notebook.services.contents.fileio import atomic_writing
219 216 return atomic_writing(*args, **kwargs)
220 217
221 218 @undoc
222 219 def raw_print(*args, **kw):
223 220 """DEPRECATED: Raw print to sys.__stdout__, otherwise identical interface to print()."""
224 221 warn("IPython.utils.io.raw_print has been deprecated since IPython 7.0", DeprecationWarning, stacklevel=2)
225 222
226 223 print(*args, sep=kw.get('sep', ' '), end=kw.get('end', '\n'),
227 224 file=sys.__stdout__)
228 225 sys.__stdout__.flush()
229 226
230 227 @undoc
231 228 def raw_print_err(*args, **kw):
232 229 """DEPRECATED: Raw print to sys.__stderr__, otherwise identical interface to print()."""
233 230 warn("IPython.utils.io.raw_print_err has been deprecated since IPython 7.0", DeprecationWarning, stacklevel=2)
234 231
235 232 print(*args, sep=kw.get('sep', ' '), end=kw.get('end', '\n'),
236 233 file=sys.__stderr__)
237 234 sys.__stderr__.flush()
238 235
239 236 # used by IPykernel <- 4.9. Removed during IPython 7-dev period and re-added
240 237 # Keep for a version or two then should remove
241 238 rprint = raw_print
242 239 rprinte = raw_print_err
243 240
244 241 @undoc
245 242 def unicode_std_stream(stream='stdout'):
246 243 """DEPRECATED, moved to nbconvert.utils.io"""
247 244 warn("IPython.utils.io.unicode_std_stream has moved to nbconvert.utils.io since IPython 4.0", DeprecationWarning, stacklevel=2)
248 245 from nbconvert.utils.io import unicode_std_stream
249 246 return unicode_std_stream(stream)
@@ -1,391 +1,379 b''
1 1 # encoding: utf-8
2 2 """A dict subclass that supports attribute style access.
3 3
4 4 Authors:
5 5
6 6 * Fernando Perez (original)
7 7 * Brian Granger (refactoring to a dict subclass)
8 8 """
9 9
10 10 #-----------------------------------------------------------------------------
11 11 # Copyright (C) 2008-2011 The IPython Development Team
12 12 #
13 13 # Distributed under the terms of the BSD License. The full license is in
14 14 # the file COPYING, distributed as part of this software.
15 15 #-----------------------------------------------------------------------------
16 16
17 17 #-----------------------------------------------------------------------------
18 18 # Imports
19 19 #-----------------------------------------------------------------------------
20 20
21 21 __all__ = ['Struct']
22 22
23 23 #-----------------------------------------------------------------------------
24 24 # Code
25 25 #-----------------------------------------------------------------------------
26 26
27 27
28 28 class Struct(dict):
29 29 """A dict subclass with attribute style access.
30 30
31 31 This dict subclass has a a few extra features:
32 32
33 33 * Attribute style access.
34 34 * Protection of class members (like keys, items) when using attribute
35 35 style access.
36 36 * The ability to restrict assignment to only existing keys.
37 37 * Intelligent merging.
38 38 * Overloaded operators.
39 39 """
40 40 _allownew = True
41 41 def __init__(self, *args, **kw):
42 42 """Initialize with a dictionary, another Struct, or data.
43 43
44 44 Parameters
45 45 ----------
46 args : dict, Struct
46 *args : dict, Struct
47 47 Initialize with one dict or Struct
48 kw : dict
48 **kw : dict
49 49 Initialize with key, value pairs.
50 50
51 51 Examples
52 52 --------
53
54 53 >>> s = Struct(a=10,b=30)
55 54 >>> s.a
56 55 10
57 56 >>> s.b
58 57 30
59 58 >>> s2 = Struct(s,c=30)
60 59 >>> sorted(s2.keys())
61 60 ['a', 'b', 'c']
62 61 """
63 62 object.__setattr__(self, '_allownew', True)
64 63 dict.__init__(self, *args, **kw)
65 64
66 65 def __setitem__(self, key, value):
67 66 """Set an item with check for allownew.
68 67
69 68 Examples
70 69 --------
71
72 70 >>> s = Struct()
73 71 >>> s['a'] = 10
74 72 >>> s.allow_new_attr(False)
75 73 >>> s['a'] = 10
76 74 >>> s['a']
77 75 10
78 76 >>> try:
79 77 ... s['b'] = 20
80 78 ... except KeyError:
81 79 ... print('this is not allowed')
82 80 ...
83 81 this is not allowed
84 82 """
85 83 if not self._allownew and key not in self:
86 84 raise KeyError(
87 85 "can't create new attribute %s when allow_new_attr(False)" % key)
88 86 dict.__setitem__(self, key, value)
89 87
90 88 def __setattr__(self, key, value):
91 89 """Set an attr with protection of class members.
92 90
93 91 This calls :meth:`self.__setitem__` but convert :exc:`KeyError` to
94 92 :exc:`AttributeError`.
95 93
96 94 Examples
97 95 --------
98
99 96 >>> s = Struct()
100 97 >>> s.a = 10
101 98 >>> s.a
102 99 10
103 100 >>> try:
104 101 ... s.get = 10
105 102 ... except AttributeError:
106 103 ... print("you can't set a class member")
107 104 ...
108 105 you can't set a class member
109 106 """
110 107 # If key is an str it might be a class member or instance var
111 108 if isinstance(key, str):
112 109 # I can't simply call hasattr here because it calls getattr, which
113 110 # calls self.__getattr__, which returns True for keys in
114 111 # self._data. But I only want keys in the class and in
115 112 # self.__dict__
116 113 if key in self.__dict__ or hasattr(Struct, key):
117 114 raise AttributeError(
118 115 'attr %s is a protected member of class Struct.' % key
119 116 )
120 117 try:
121 118 self.__setitem__(key, value)
122 119 except KeyError as e:
123 120 raise AttributeError(e) from e
124 121
125 122 def __getattr__(self, key):
126 123 """Get an attr by calling :meth:`dict.__getitem__`.
127 124
128 125 Like :meth:`__setattr__`, this method converts :exc:`KeyError` to
129 126 :exc:`AttributeError`.
130 127
131 128 Examples
132 129 --------
133
134 130 >>> s = Struct(a=10)
135 131 >>> s.a
136 132 10
137 133 >>> type(s.get)
138 134 <... 'builtin_function_or_method'>
139 135 >>> try:
140 136 ... s.b
141 137 ... except AttributeError:
142 138 ... print("I don't have that key")
143 139 ...
144 140 I don't have that key
145 141 """
146 142 try:
147 143 result = self[key]
148 144 except KeyError as e:
149 145 raise AttributeError(key) from e
150 146 else:
151 147 return result
152 148
153 149 def __iadd__(self, other):
154 150 """s += s2 is a shorthand for s.merge(s2).
155 151
156 152 Examples
157 153 --------
158
159 154 >>> s = Struct(a=10,b=30)
160 155 >>> s2 = Struct(a=20,c=40)
161 156 >>> s += s2
162 157 >>> sorted(s.keys())
163 158 ['a', 'b', 'c']
164 159 """
165 160 self.merge(other)
166 161 return self
167 162
168 163 def __add__(self,other):
169 164 """s + s2 -> New Struct made from s.merge(s2).
170 165
171 166 Examples
172 167 --------
173
174 168 >>> s1 = Struct(a=10,b=30)
175 169 >>> s2 = Struct(a=20,c=40)
176 170 >>> s = s1 + s2
177 171 >>> sorted(s.keys())
178 172 ['a', 'b', 'c']
179 173 """
180 174 sout = self.copy()
181 175 sout.merge(other)
182 176 return sout
183 177
184 178 def __sub__(self,other):
185 179 """s1 - s2 -> remove keys in s2 from s1.
186 180
187 181 Examples
188 182 --------
189
190 183 >>> s1 = Struct(a=10,b=30)
191 184 >>> s2 = Struct(a=40)
192 185 >>> s = s1 - s2
193 186 >>> s
194 187 {'b': 30}
195 188 """
196 189 sout = self.copy()
197 190 sout -= other
198 191 return sout
199 192
200 193 def __isub__(self,other):
201 194 """Inplace remove keys from self that are in other.
202 195
203 196 Examples
204 197 --------
205
206 198 >>> s1 = Struct(a=10,b=30)
207 199 >>> s2 = Struct(a=40)
208 200 >>> s1 -= s2
209 201 >>> s1
210 202 {'b': 30}
211 203 """
212 204 for k in other.keys():
213 205 if k in self:
214 206 del self[k]
215 207 return self
216 208
217 209 def __dict_invert(self, data):
218 210 """Helper function for merge.
219 211
220 212 Takes a dictionary whose values are lists and returns a dict with
221 213 the elements of each list as keys and the original keys as values.
222 214 """
223 215 outdict = {}
224 216 for k,lst in data.items():
225 217 if isinstance(lst, str):
226 218 lst = lst.split()
227 219 for entry in lst:
228 220 outdict[entry] = k
229 221 return outdict
230 222
231 223 def dict(self):
232 224 return self
233 225
234 226 def copy(self):
235 227 """Return a copy as a Struct.
236 228
237 229 Examples
238 230 --------
239
240 231 >>> s = Struct(a=10,b=30)
241 232 >>> s2 = s.copy()
242 233 >>> type(s2) is Struct
243 234 True
244 235 """
245 236 return Struct(dict.copy(self))
246 237
247 238 def hasattr(self, key):
248 239 """hasattr function available as a method.
249 240
250 241 Implemented like has_key.
251 242
252 243 Examples
253 244 --------
254
255 245 >>> s = Struct(a=10)
256 246 >>> s.hasattr('a')
257 247 True
258 248 >>> s.hasattr('b')
259 249 False
260 250 >>> s.hasattr('get')
261 251 False
262 252 """
263 253 return key in self
264 254
265 255 def allow_new_attr(self, allow = True):
266 256 """Set whether new attributes can be created in this Struct.
267 257
268 258 This can be used to catch typos by verifying that the attribute user
269 259 tries to change already exists in this Struct.
270 260 """
271 261 object.__setattr__(self, '_allownew', allow)
272 262
273 263 def merge(self, __loc_data__=None, __conflict_solve=None, **kw):
274 264 """Merge two Structs with customizable conflict resolution.
275 265
276 266 This is similar to :meth:`update`, but much more flexible. First, a
277 267 dict is made from data+key=value pairs. When merging this dict with
278 268 the Struct S, the optional dictionary 'conflict' is used to decide
279 269 what to do.
280 270
281 271 If conflict is not given, the default behavior is to preserve any keys
282 272 with their current value (the opposite of the :meth:`update` method's
283 273 behavior).
284 274
285 275 Parameters
286 276 ----------
287 __loc_data : dict, Struct
277 __loc_data__ : dict, Struct
288 278 The data to merge into self
289 279 __conflict_solve : dict
290 280 The conflict policy dict. The keys are binary functions used to
291 281 resolve the conflict and the values are lists of strings naming
292 282 the keys the conflict resolution function applies to. Instead of
293 283 a list of strings a space separated string can be used, like
294 284 'a b c'.
295 kw : dict
285 **kw : dict
296 286 Additional key, value pairs to merge in
297 287
298 288 Notes
299 289 -----
300
301 290 The `__conflict_solve` dict is a dictionary of binary functions which will be used to
302 291 solve key conflicts. Here is an example::
303 292
304 293 __conflict_solve = dict(
305 294 func1=['a','b','c'],
306 295 func2=['d','e']
307 296 )
308 297
309 298 In this case, the function :func:`func1` will be used to resolve
310 299 keys 'a', 'b' and 'c' and the function :func:`func2` will be used for
311 300 keys 'd' and 'e'. This could also be written as::
312 301
313 302 __conflict_solve = dict(func1='a b c',func2='d e')
314 303
315 304 These functions will be called for each key they apply to with the
316 305 form::
317 306
318 307 func1(self['a'], other['a'])
319 308
320 309 The return value is used as the final merged value.
321 310
322 311 As a convenience, merge() provides five (the most commonly needed)
323 312 pre-defined policies: preserve, update, add, add_flip and add_s. The
324 313 easiest explanation is their implementation::
325 314
326 315 preserve = lambda old,new: old
327 316 update = lambda old,new: new
328 317 add = lambda old,new: old + new
329 318 add_flip = lambda old,new: new + old # note change of order!
330 319 add_s = lambda old,new: old + ' ' + new # only for str!
331 320
332 321 You can use those four words (as strings) as keys instead
333 322 of defining them as functions, and the merge method will substitute
334 323 the appropriate functions for you.
335 324
336 325 For more complicated conflict resolution policies, you still need to
337 326 construct your own functions.
338 327
339 328 Examples
340 329 --------
341
342 330 This show the default policy:
343 331
344 332 >>> s = Struct(a=10,b=30)
345 333 >>> s2 = Struct(a=20,c=40)
346 334 >>> s.merge(s2)
347 335 >>> sorted(s.items())
348 336 [('a', 10), ('b', 30), ('c', 40)]
349 337
350 338 Now, show how to specify a conflict dict:
351 339
352 340 >>> s = Struct(a=10,b=30)
353 341 >>> s2 = Struct(a=20,b=40)
354 342 >>> conflict = {'update':'a','add':'b'}
355 343 >>> s.merge(s2,conflict)
356 344 >>> sorted(s.items())
357 345 [('a', 20), ('b', 70)]
358 346 """
359 347
360 348 data_dict = dict(__loc_data__,**kw)
361 349
362 350 # policies for conflict resolution: two argument functions which return
363 351 # the value that will go in the new struct
364 352 preserve = lambda old,new: old
365 353 update = lambda old,new: new
366 354 add = lambda old,new: old + new
367 355 add_flip = lambda old,new: new + old # note change of order!
368 356 add_s = lambda old,new: old + ' ' + new
369 357
370 358 # default policy is to keep current keys when there's a conflict
371 359 conflict_solve = dict.fromkeys(self, preserve)
372 360
373 361 # the conflict_solve dictionary is given by the user 'inverted': we
374 362 # need a name-function mapping, it comes as a function -> names
375 363 # dict. Make a local copy (b/c we'll make changes), replace user
376 364 # strings for the three builtin policies and invert it.
377 365 if __conflict_solve:
378 366 inv_conflict_solve_user = __conflict_solve.copy()
379 367 for name, func in [('preserve',preserve), ('update',update),
380 368 ('add',add), ('add_flip',add_flip),
381 369 ('add_s',add_s)]:
382 370 if name in inv_conflict_solve_user.keys():
383 371 inv_conflict_solve_user[func] = inv_conflict_solve_user[name]
384 372 del inv_conflict_solve_user[name]
385 373 conflict_solve.update(self.__dict_invert(inv_conflict_solve_user))
386 374 for key in data_dict:
387 375 if key not in self:
388 376 self[key] = data_dict[key]
389 377 else:
390 378 self[key] = conflict_solve[key](self[key],data_dict[key])
391 379
1 NO CONTENT: modified file
1 NO CONTENT: modified file
@@ -1,436 +1,440 b''
1 1 # encoding: utf-8
2 2 """
3 3 Utilities for path handling.
4 4 """
5 5
6 6 # Copyright (c) IPython Development Team.
7 7 # Distributed under the terms of the Modified BSD License.
8 8
9 9 import os
10 10 import sys
11 11 import errno
12 12 import shutil
13 13 import random
14 14 import glob
15 15 from warnings import warn
16 16
17 17 from IPython.utils.process import system
18 18 from IPython.utils.decorators import undoc
19 19
20 20 #-----------------------------------------------------------------------------
21 21 # Code
22 22 #-----------------------------------------------------------------------------
23 23 fs_encoding = sys.getfilesystemencoding()
24 24
25 25 def _writable_dir(path):
26 26 """Whether `path` is a directory, to which the user has write access."""
27 27 return os.path.isdir(path) and os.access(path, os.W_OK)
28 28
29 29 if sys.platform == 'win32':
30 30 def _get_long_path_name(path):
31 31 """Get a long path name (expand ~) on Windows using ctypes.
32 32
33 33 Examples
34 34 --------
35 35
36 36 >>> get_long_path_name('c:\\docume~1')
37 37 'c:\\\\Documents and Settings'
38 38
39 39 """
40 40 try:
41 41 import ctypes
42 42 except ImportError as e:
43 43 raise ImportError('you need to have ctypes installed for this to work') from e
44 44 _GetLongPathName = ctypes.windll.kernel32.GetLongPathNameW
45 45 _GetLongPathName.argtypes = [ctypes.c_wchar_p, ctypes.c_wchar_p,
46 46 ctypes.c_uint ]
47 47
48 48 buf = ctypes.create_unicode_buffer(260)
49 49 rv = _GetLongPathName(path, buf, 260)
50 50 if rv == 0 or rv > 260:
51 51 return path
52 52 else:
53 53 return buf.value
54 54 else:
55 55 def _get_long_path_name(path):
56 56 """Dummy no-op."""
57 57 return path
58 58
59 59
60 60
61 61 def get_long_path_name(path):
62 62 """Expand a path into its long form.
63 63
64 64 On Windows this expands any ~ in the paths. On other platforms, it is
65 65 a null operation.
66 66 """
67 67 return _get_long_path_name(path)
68 68
69 69
70 70 def unquote_filename(name, win32=(sys.platform=='win32')):
71 71 """ On Windows, remove leading and trailing quotes from filenames.
72 72
73 73 This function has been deprecated and should not be used any more:
74 74 unquoting is now taken care of by :func:`IPython.utils.process.arg_split`.
75 75 """
76 76 warn("'unquote_filename' is deprecated since IPython 5.0 and should not "
77 77 "be used anymore", DeprecationWarning, stacklevel=2)
78 78 if win32:
79 79 if name.startswith(("'", '"')) and name.endswith(("'", '"')):
80 80 name = name[1:-1]
81 81 return name
82 82
83 83
84 84 def compress_user(path):
85 85 """Reverse of :func:`os.path.expanduser`
86 86 """
87 87 home = os.path.expanduser('~')
88 88 if path.startswith(home):
89 89 path = "~" + path[len(home):]
90 90 return path
91 91
92 92 def get_py_filename(name, force_win32=None):
93 93 """Return a valid python filename in the current directory.
94 94
95 95 If the given name is not a file, it adds '.py' and searches again.
96 96 Raises IOError with an informative message if the file isn't found.
97 97 """
98 98
99 99 name = os.path.expanduser(name)
100 100 if force_win32 is not None:
101 101 warn("The 'force_win32' argument to 'get_py_filename' is deprecated "
102 102 "since IPython 5.0 and should not be used anymore",
103 103 DeprecationWarning, stacklevel=2)
104 104 if not os.path.isfile(name) and not name.endswith('.py'):
105 105 name += '.py'
106 106 if os.path.isfile(name):
107 107 return name
108 108 else:
109 109 raise IOError('File `%r` not found.' % name)
110 110
111 111
112 def filefind(filename, path_dirs=None):
112 def filefind(filename: str, path_dirs=None) -> str:
113 113 """Find a file by looking through a sequence of paths.
114 114
115 115 This iterates through a sequence of paths looking for a file and returns
116 116 the full, absolute path of the first occurrence of the file. If no set of
117 117 path dirs is given, the filename is tested as is, after running through
118 118 :func:`expandvars` and :func:`expanduser`. Thus a simple call::
119 119
120 120 filefind('myfile.txt')
121 121
122 122 will find the file in the current working dir, but::
123 123
124 124 filefind('~/myfile.txt')
125 125
126 126 Will find the file in the users home directory. This function does not
127 127 automatically try any paths, such as the cwd or the user's home directory.
128 128
129 129 Parameters
130 130 ----------
131 131 filename : str
132 132 The filename to look for.
133 133 path_dirs : str, None or sequence of str
134 134 The sequence of paths to look for the file in. If None, the filename
135 135 need to be absolute or be in the cwd. If a string, the string is
136 136 put into a sequence and the searched. If a sequence, walk through
137 137 each element and join with ``filename``, calling :func:`expandvars`
138 138 and :func:`expanduser` before testing for existence.
139 139
140 140 Returns
141 141 -------
142 Raises :exc:`IOError` or returns absolute path to file.
142 path : str
143 returns absolute path to file.
144
145 Raises
146 ------
147 IOError
143 148 """
144 149
145 150 # If paths are quoted, abspath gets confused, strip them...
146 151 filename = filename.strip('"').strip("'")
147 152 # If the input is an absolute path, just check it exists
148 153 if os.path.isabs(filename) and os.path.isfile(filename):
149 154 return filename
150 155
151 156 if path_dirs is None:
152 157 path_dirs = ("",)
153 158 elif isinstance(path_dirs, str):
154 159 path_dirs = (path_dirs,)
155 160
156 161 for path in path_dirs:
157 162 if path == '.': path = os.getcwd()
158 163 testname = expand_path(os.path.join(path, filename))
159 164 if os.path.isfile(testname):
160 165 return os.path.abspath(testname)
161 166
162 167 raise IOError("File %r does not exist in any of the search paths: %r" %
163 168 (filename, path_dirs) )
164 169
165 170
166 171 class HomeDirError(Exception):
167 172 pass
168 173
169 174
170 175 def get_home_dir(require_writable=False) -> str:
171 176 """Return the 'home' directory, as a unicode string.
172 177
173 178 Uses os.path.expanduser('~'), and checks for writability.
174 179
175 180 See stdlib docs for how this is determined.
176 181 For Python <3.8, $HOME is first priority on *ALL* platforms.
177 182 For Python >=3.8 on Windows, %HOME% is no longer considered.
178 183
179 184 Parameters
180 185 ----------
181
182 186 require_writable : bool [default: False]
183 187 if True:
184 188 guarantees the return value is a writable directory, otherwise
185 189 raises HomeDirError
186 190 if False:
187 191 The path is resolved, but it is not guaranteed to exist or be writable.
188 192 """
189 193
190 194 homedir = os.path.expanduser('~')
191 195 # Next line will make things work even when /home/ is a symlink to
192 196 # /usr/home as it is on FreeBSD, for example
193 197 homedir = os.path.realpath(homedir)
194 198
195 199 if not _writable_dir(homedir) and os.name == 'nt':
196 200 # expanduser failed, use the registry to get the 'My Documents' folder.
197 201 try:
198 202 import winreg as wreg
199 203 with wreg.OpenKey(
200 204 wreg.HKEY_CURRENT_USER,
201 205 r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
202 206 ) as key:
203 207 homedir = wreg.QueryValueEx(key,'Personal')[0]
204 208 except:
205 209 pass
206 210
207 211 if (not require_writable) or _writable_dir(homedir):
208 212 assert isinstance(homedir, str), "Homedir shoudl be unicode not bytes"
209 213 return homedir
210 214 else:
211 215 raise HomeDirError('%s is not a writable dir, '
212 216 'set $HOME environment variable to override' % homedir)
213 217
214 218 def get_xdg_dir():
215 219 """Return the XDG_CONFIG_HOME, if it is defined and exists, else None.
216 220
217 221 This is only for non-OS X posix (Linux,Unix,etc.) systems.
218 222 """
219 223
220 224 env = os.environ
221 225
222 226 if os.name == 'posix' and sys.platform != 'darwin':
223 227 # Linux, Unix, AIX, etc.
224 228 # use ~/.config if empty OR not set
225 229 xdg = env.get("XDG_CONFIG_HOME", None) or os.path.join(get_home_dir(), '.config')
226 230 if xdg and _writable_dir(xdg):
227 231 assert isinstance(xdg, str)
228 232 return xdg
229 233
230 234 return None
231 235
232 236
233 237 def get_xdg_cache_dir():
234 238 """Return the XDG_CACHE_HOME, if it is defined and exists, else None.
235 239
236 240 This is only for non-OS X posix (Linux,Unix,etc.) systems.
237 241 """
238 242
239 243 env = os.environ
240 244
241 245 if os.name == 'posix' and sys.platform != 'darwin':
242 246 # Linux, Unix, AIX, etc.
243 247 # use ~/.cache if empty OR not set
244 248 xdg = env.get("XDG_CACHE_HOME", None) or os.path.join(get_home_dir(), '.cache')
245 249 if xdg and _writable_dir(xdg):
246 250 assert isinstance(xdg, str)
247 251 return xdg
248 252
249 253 return None
250 254
251 255
252 256 @undoc
253 257 def get_ipython_dir():
254 258 warn("get_ipython_dir has moved to the IPython.paths module since IPython 4.0.", DeprecationWarning, stacklevel=2)
255 259 from IPython.paths import get_ipython_dir
256 260 return get_ipython_dir()
257 261
258 262 @undoc
259 263 def get_ipython_cache_dir():
260 264 warn("get_ipython_cache_dir has moved to the IPython.paths module since IPython 4.0.", DeprecationWarning, stacklevel=2)
261 265 from IPython.paths import get_ipython_cache_dir
262 266 return get_ipython_cache_dir()
263 267
264 268 @undoc
265 269 def get_ipython_package_dir():
266 270 warn("get_ipython_package_dir has moved to the IPython.paths module since IPython 4.0.", DeprecationWarning, stacklevel=2)
267 271 from IPython.paths import get_ipython_package_dir
268 272 return get_ipython_package_dir()
269 273
270 274 @undoc
271 275 def get_ipython_module_path(module_str):
272 276 warn("get_ipython_module_path has moved to the IPython.paths module since IPython 4.0.", DeprecationWarning, stacklevel=2)
273 277 from IPython.paths import get_ipython_module_path
274 278 return get_ipython_module_path(module_str)
275 279
276 280 @undoc
277 281 def locate_profile(profile='default'):
278 282 warn("locate_profile has moved to the IPython.paths module since IPython 4.0.", DeprecationWarning, stacklevel=2)
279 283 from IPython.paths import locate_profile
280 284 return locate_profile(profile=profile)
281 285
282 286 def expand_path(s):
283 287 """Expand $VARS and ~names in a string, like a shell
284 288
285 289 :Examples:
286 290
287 291 In [2]: os.environ['FOO']='test'
288 292
289 293 In [3]: expand_path('variable FOO is $FOO')
290 294 Out[3]: 'variable FOO is test'
291 295 """
292 296 # This is a pretty subtle hack. When expand user is given a UNC path
293 297 # on Windows (\\server\share$\%username%), os.path.expandvars, removes
294 298 # the $ to get (\\server\share\%username%). I think it considered $
295 299 # alone an empty var. But, we need the $ to remains there (it indicates
296 300 # a hidden share).
297 301 if os.name=='nt':
298 302 s = s.replace('$\\', 'IPYTHON_TEMP')
299 303 s = os.path.expandvars(os.path.expanduser(s))
300 304 if os.name=='nt':
301 305 s = s.replace('IPYTHON_TEMP', '$\\')
302 306 return s
303 307
304 308
305 309 def unescape_glob(string):
306 310 """Unescape glob pattern in `string`."""
307 311 def unescape(s):
308 312 for pattern in '*[]!?':
309 313 s = s.replace(r'\{0}'.format(pattern), pattern)
310 314 return s
311 315 return '\\'.join(map(unescape, string.split('\\\\')))
312 316
313 317
314 318 def shellglob(args):
315 319 """
316 320 Do glob expansion for each element in `args` and return a flattened list.
317 321
318 322 Unmatched glob pattern will remain as-is in the returned list.
319 323
320 324 """
321 325 expanded = []
322 326 # Do not unescape backslash in Windows as it is interpreted as
323 327 # path separator:
324 328 unescape = unescape_glob if sys.platform != 'win32' else lambda x: x
325 329 for a in args:
326 330 expanded.extend(glob.glob(a) or [unescape(a)])
327 331 return expanded
328 332
329 333
330 334 def target_outdated(target,deps):
331 335 """Determine whether a target is out of date.
332 336
333 337 target_outdated(target,deps) -> 1/0
334 338
335 339 deps: list of filenames which MUST exist.
336 340 target: single filename which may or may not exist.
337 341
338 342 If target doesn't exist or is older than any file listed in deps, return
339 343 true, otherwise return false.
340 344 """
341 345 try:
342 346 target_time = os.path.getmtime(target)
343 347 except os.error:
344 348 return 1
345 349 for dep in deps:
346 350 dep_time = os.path.getmtime(dep)
347 351 if dep_time > target_time:
348 352 #print "For target",target,"Dep failed:",dep # dbg
349 353 #print "times (dep,tar):",dep_time,target_time # dbg
350 354 return 1
351 355 return 0
352 356
353 357
354 358 def target_update(target,deps,cmd):
355 359 """Update a target with a given command given a list of dependencies.
356 360
357 361 target_update(target,deps,cmd) -> runs cmd if target is outdated.
358 362
359 363 This is just a wrapper around target_outdated() which calls the given
360 364 command if target is outdated."""
361 365
362 366 if target_outdated(target,deps):
363 367 system(cmd)
364 368
365 369
366 370 ENOLINK = 1998
367 371
368 372 def link(src, dst):
369 373 """Hard links ``src`` to ``dst``, returning 0 or errno.
370 374
371 375 Note that the special errno ``ENOLINK`` will be returned if ``os.link`` isn't
372 376 supported by the operating system.
373 377 """
374 378
375 379 if not hasattr(os, "link"):
376 380 return ENOLINK
377 381 link_errno = 0
378 382 try:
379 383 os.link(src, dst)
380 384 except OSError as e:
381 385 link_errno = e.errno
382 386 return link_errno
383 387
384 388
385 389 def link_or_copy(src, dst):
386 390 """Attempts to hardlink ``src`` to ``dst``, copying if the link fails.
387 391
388 392 Attempts to maintain the semantics of ``shutil.copy``.
389 393
390 394 Because ``os.link`` does not overwrite files, a unique temporary file
391 395 will be used if the target already exists, then that file will be moved
392 396 into place.
393 397 """
394 398
395 399 if os.path.isdir(dst):
396 400 dst = os.path.join(dst, os.path.basename(src))
397 401
398 402 link_errno = link(src, dst)
399 403 if link_errno == errno.EEXIST:
400 404 if os.stat(src).st_ino == os.stat(dst).st_ino:
401 405 # dst is already a hard link to the correct file, so we don't need
402 406 # to do anything else. If we try to link and rename the file
403 407 # anyway, we get duplicate files - see http://bugs.python.org/issue21876
404 408 return
405 409
406 410 new_dst = dst + "-temp-%04X" %(random.randint(1, 16**4), )
407 411 try:
408 412 link_or_copy(src, new_dst)
409 413 except:
410 414 try:
411 415 os.remove(new_dst)
412 416 except OSError:
413 417 pass
414 418 raise
415 419 os.rename(new_dst, dst)
416 420 elif link_errno != 0:
417 421 # Either link isn't supported, or the filesystem doesn't support
418 422 # linking, or 'src' and 'dst' are on different filesystems.
419 423 shutil.copy(src, dst)
420 424
421 425 def ensure_dir_exists(path, mode=0o755):
422 426 """ensure that a directory exists
423 427
424 428 If it doesn't exist, try to create it and protect against a race condition
425 429 if another process is doing the same.
426 430
427 431 The default permissions are 755, which differ from os.makedirs default of 777.
428 432 """
429 433 if not os.path.exists(path):
430 434 try:
431 435 os.makedirs(path, mode=mode)
432 436 except OSError as e:
433 437 if e.errno != errno.EEXIST:
434 438 raise
435 439 elif not os.path.isdir(path):
436 440 raise IOError("%r exists but is not a directory" % path)
1 NO CONTENT: modified file
1 NO CONTENT: modified file
1 NO CONTENT: modified file
1 NO CONTENT: modified file
1 NO CONTENT: modified file
1 NO CONTENT: modified file
@@ -1,771 +1,761 b''
1 1 # encoding: utf-8
2 2 """
3 3 Utilities for working with strings and text.
4 4
5 5 Inheritance diagram:
6 6
7 7 .. inheritance-diagram:: IPython.utils.text
8 8 :parts: 3
9 9 """
10 10
11 11 import os
12 12 import re
13 13 import sys
14 14 import textwrap
15 15 from string import Formatter
16 16 from pathlib import Path
17 17
18 18
19 19 # datetime.strftime date format for ipython
20 20 if sys.platform == 'win32':
21 21 date_format = "%B %d, %Y"
22 22 else:
23 23 date_format = "%B %-d, %Y"
24 24
25 25 class LSString(str):
26 26 """String derivative with a special access attributes.
27 27
28 28 These are normal strings, but with the special attributes:
29 29
30 30 .l (or .list) : value as list (split on newlines).
31 31 .n (or .nlstr): original value (the string itself).
32 32 .s (or .spstr): value as whitespace-separated string.
33 33 .p (or .paths): list of path objects (requires path.py package)
34 34
35 35 Any values which require transformations are computed only once and
36 36 cached.
37 37
38 38 Such strings are very useful to efficiently interact with the shell, which
39 39 typically only understands whitespace-separated options for commands."""
40 40
41 41 def get_list(self):
42 42 try:
43 43 return self.__list
44 44 except AttributeError:
45 45 self.__list = self.split('\n')
46 46 return self.__list
47 47
48 48 l = list = property(get_list)
49 49
50 50 def get_spstr(self):
51 51 try:
52 52 return self.__spstr
53 53 except AttributeError:
54 54 self.__spstr = self.replace('\n',' ')
55 55 return self.__spstr
56 56
57 57 s = spstr = property(get_spstr)
58 58
59 59 def get_nlstr(self):
60 60 return self
61 61
62 62 n = nlstr = property(get_nlstr)
63 63
64 64 def get_paths(self):
65 65 try:
66 66 return self.__paths
67 67 except AttributeError:
68 68 self.__paths = [Path(p) for p in self.split('\n') if os.path.exists(p)]
69 69 return self.__paths
70 70
71 71 p = paths = property(get_paths)
72 72
73 73 # FIXME: We need to reimplement type specific displayhook and then add this
74 74 # back as a custom printer. This should also be moved outside utils into the
75 75 # core.
76 76
77 77 # def print_lsstring(arg):
78 78 # """ Prettier (non-repr-like) and more informative printer for LSString """
79 79 # print "LSString (.p, .n, .l, .s available). Value:"
80 80 # print arg
81 81 #
82 82 #
83 83 # print_lsstring = result_display.register(LSString)(print_lsstring)
84 84
85 85
86 86 class SList(list):
87 87 """List derivative with a special access attributes.
88 88
89 89 These are normal lists, but with the special attributes:
90 90
91 91 * .l (or .list) : value as list (the list itself).
92 92 * .n (or .nlstr): value as a string, joined on newlines.
93 93 * .s (or .spstr): value as a string, joined on spaces.
94 94 * .p (or .paths): list of path objects (requires path.py package)
95 95
96 96 Any values which require transformations are computed only once and
97 97 cached."""
98 98
99 99 def get_list(self):
100 100 return self
101 101
102 102 l = list = property(get_list)
103 103
104 104 def get_spstr(self):
105 105 try:
106 106 return self.__spstr
107 107 except AttributeError:
108 108 self.__spstr = ' '.join(self)
109 109 return self.__spstr
110 110
111 111 s = spstr = property(get_spstr)
112 112
113 113 def get_nlstr(self):
114 114 try:
115 115 return self.__nlstr
116 116 except AttributeError:
117 117 self.__nlstr = '\n'.join(self)
118 118 return self.__nlstr
119 119
120 120 n = nlstr = property(get_nlstr)
121 121
122 122 def get_paths(self):
123 123 try:
124 124 return self.__paths
125 125 except AttributeError:
126 126 self.__paths = [Path(p) for p in self if os.path.exists(p)]
127 127 return self.__paths
128 128
129 129 p = paths = property(get_paths)
130 130
131 131 def grep(self, pattern, prune = False, field = None):
132 132 """ Return all strings matching 'pattern' (a regex or callable)
133 133
134 134 This is case-insensitive. If prune is true, return all items
135 135 NOT matching the pattern.
136 136
137 137 If field is specified, the match must occur in the specified
138 138 whitespace-separated field.
139 139
140 140 Examples::
141 141
142 142 a.grep( lambda x: x.startswith('C') )
143 143 a.grep('Cha.*log', prune=1)
144 144 a.grep('chm', field=-1)
145 145 """
146 146
147 147 def match_target(s):
148 148 if field is None:
149 149 return s
150 150 parts = s.split()
151 151 try:
152 152 tgt = parts[field]
153 153 return tgt
154 154 except IndexError:
155 155 return ""
156 156
157 157 if isinstance(pattern, str):
158 158 pred = lambda x : re.search(pattern, x, re.IGNORECASE)
159 159 else:
160 160 pred = pattern
161 161 if not prune:
162 162 return SList([el for el in self if pred(match_target(el))])
163 163 else:
164 164 return SList([el for el in self if not pred(match_target(el))])
165 165
166 166 def fields(self, *fields):
167 167 """ Collect whitespace-separated fields from string list
168 168
169 169 Allows quick awk-like usage of string lists.
170 170
171 171 Example data (in var a, created by 'a = !ls -l')::
172 172
173 173 -rwxrwxrwx 1 ville None 18 Dec 14 2006 ChangeLog
174 174 drwxrwxrwx+ 6 ville None 0 Oct 24 18:05 IPython
175 175
176 176 * ``a.fields(0)`` is ``['-rwxrwxrwx', 'drwxrwxrwx+']``
177 177 * ``a.fields(1,0)`` is ``['1 -rwxrwxrwx', '6 drwxrwxrwx+']``
178 178 (note the joining by space).
179 179 * ``a.fields(-1)`` is ``['ChangeLog', 'IPython']``
180 180
181 181 IndexErrors are ignored.
182 182
183 183 Without args, fields() just split()'s the strings.
184 184 """
185 185 if len(fields) == 0:
186 186 return [el.split() for el in self]
187 187
188 188 res = SList()
189 189 for el in [f.split() for f in self]:
190 190 lineparts = []
191 191
192 192 for fd in fields:
193 193 try:
194 194 lineparts.append(el[fd])
195 195 except IndexError:
196 196 pass
197 197 if lineparts:
198 198 res.append(" ".join(lineparts))
199 199
200 200 return res
201 201
202 202 def sort(self,field= None, nums = False):
203 203 """ sort by specified fields (see fields())
204 204
205 205 Example::
206 206
207 207 a.sort(1, nums = True)
208 208
209 209 Sorts a by second field, in numerical order (so that 21 > 3)
210 210
211 211 """
212 212
213 213 #decorate, sort, undecorate
214 214 if field is not None:
215 215 dsu = [[SList([line]).fields(field), line] for line in self]
216 216 else:
217 217 dsu = [[line, line] for line in self]
218 218 if nums:
219 219 for i in range(len(dsu)):
220 220 numstr = "".join([ch for ch in dsu[i][0] if ch.isdigit()])
221 221 try:
222 222 n = int(numstr)
223 223 except ValueError:
224 224 n = 0
225 225 dsu[i][0] = n
226 226
227 227
228 228 dsu.sort()
229 229 return SList([t[1] for t in dsu])
230 230
231 231
232 232 # FIXME: We need to reimplement type specific displayhook and then add this
233 233 # back as a custom printer. This should also be moved outside utils into the
234 234 # core.
235 235
236 236 # def print_slist(arg):
237 237 # """ Prettier (non-repr-like) and more informative printer for SList """
238 238 # print "SList (.p, .n, .l, .s, .grep(), .fields(), sort() available):"
239 239 # if hasattr(arg, 'hideonce') and arg.hideonce:
240 240 # arg.hideonce = False
241 241 # return
242 242 #
243 243 # nlprint(arg) # This was a nested list printer, now removed.
244 244 #
245 245 # print_slist = result_display.register(SList)(print_slist)
246 246
247 247
248 248 def indent(instr,nspaces=4, ntabs=0, flatten=False):
249 249 """Indent a string a given number of spaces or tabstops.
250 250
251 251 indent(str,nspaces=4,ntabs=0) -> indent str by ntabs+nspaces.
252 252
253 253 Parameters
254 254 ----------
255
256 255 instr : basestring
257 256 The string to be indented.
258 257 nspaces : int (default: 4)
259 258 The number of spaces to be indented.
260 259 ntabs : int (default: 0)
261 260 The number of tabs to be indented.
262 261 flatten : bool (default: False)
263 262 Whether to scrub existing indentation. If True, all lines will be
264 263 aligned to the same indentation. If False, existing indentation will
265 264 be strictly increased.
266 265
267 266 Returns
268 267 -------
269
270 268 str|unicode : string indented by ntabs and nspaces.
271 269
272 270 """
273 271 if instr is None:
274 272 return
275 273 ind = '\t'*ntabs+' '*nspaces
276 274 if flatten:
277 275 pat = re.compile(r'^\s*', re.MULTILINE)
278 276 else:
279 277 pat = re.compile(r'^', re.MULTILINE)
280 278 outstr = re.sub(pat, ind, instr)
281 279 if outstr.endswith(os.linesep+ind):
282 280 return outstr[:-len(ind)]
283 281 else:
284 282 return outstr
285 283
286 284
287 285 def list_strings(arg):
288 286 """Always return a list of strings, given a string or list of strings
289 287 as input.
290 288
291 289 Examples
292 290 --------
293 291 ::
294 292
295 293 In [7]: list_strings('A single string')
296 294 Out[7]: ['A single string']
297 295
298 296 In [8]: list_strings(['A single string in a list'])
299 297 Out[8]: ['A single string in a list']
300 298
301 299 In [9]: list_strings(['A','list','of','strings'])
302 300 Out[9]: ['A', 'list', 'of', 'strings']
303 301 """
304 302
305 303 if isinstance(arg, str):
306 304 return [arg]
307 305 else:
308 306 return arg
309 307
310 308
311 309 def marquee(txt='',width=78,mark='*'):
312 310 """Return the input string centered in a 'marquee'.
313 311
314 312 Examples
315 313 --------
316 314 ::
317 315
318 316 In [16]: marquee('A test',40)
319 317 Out[16]: '**************** A test ****************'
320 318
321 319 In [17]: marquee('A test',40,'-')
322 320 Out[17]: '---------------- A test ----------------'
323 321
324 322 In [18]: marquee('A test',40,' ')
325 323 Out[18]: ' A test '
326 324
327 325 """
328 326 if not txt:
329 327 return (mark*width)[:width]
330 328 nmark = (width-len(txt)-2)//len(mark)//2
331 329 if nmark < 0: nmark =0
332 330 marks = mark*nmark
333 331 return '%s %s %s' % (marks,txt,marks)
334 332
335 333
336 334 ini_spaces_re = re.compile(r'^(\s+)')
337 335
338 336 def num_ini_spaces(strng):
339 337 """Return the number of initial spaces in a string"""
340 338
341 339 ini_spaces = ini_spaces_re.match(strng)
342 340 if ini_spaces:
343 341 return ini_spaces.end()
344 342 else:
345 343 return 0
346 344
347 345
348 346 def format_screen(strng):
349 347 """Format a string for screen printing.
350 348
351 349 This removes some latex-type format codes."""
352 350 # Paragraph continue
353 351 par_re = re.compile(r'\\$',re.MULTILINE)
354 352 strng = par_re.sub('',strng)
355 353 return strng
356 354
357 355
358 356 def dedent(text):
359 357 """Equivalent of textwrap.dedent that ignores unindented first line.
360 358
361 359 This means it will still dedent strings like:
362 360 '''foo
363 361 is a bar
364 362 '''
365 363
366 364 For use in wrap_paragraphs.
367 365 """
368 366
369 367 if text.startswith('\n'):
370 368 # text starts with blank line, don't ignore the first line
371 369 return textwrap.dedent(text)
372 370
373 371 # split first line
374 372 splits = text.split('\n',1)
375 373 if len(splits) == 1:
376 374 # only one line
377 375 return textwrap.dedent(text)
378 376
379 377 first, rest = splits
380 378 # dedent everything but the first line
381 379 rest = textwrap.dedent(rest)
382 380 return '\n'.join([first, rest])
383 381
384 382
385 383 def wrap_paragraphs(text, ncols=80):
386 384 """Wrap multiple paragraphs to fit a specified width.
387 385
388 386 This is equivalent to textwrap.wrap, but with support for multiple
389 387 paragraphs, as separated by empty lines.
390 388
391 389 Returns
392 390 -------
393
394 391 list of complete paragraphs, wrapped to fill `ncols` columns.
395 392 """
396 393 paragraph_re = re.compile(r'\n(\s*\n)+', re.MULTILINE)
397 394 text = dedent(text).strip()
398 395 paragraphs = paragraph_re.split(text)[::2] # every other entry is space
399 396 out_ps = []
400 397 indent_re = re.compile(r'\n\s+', re.MULTILINE)
401 398 for p in paragraphs:
402 399 # presume indentation that survives dedent is meaningful formatting,
403 400 # so don't fill unless text is flush.
404 401 if indent_re.search(p) is None:
405 402 # wrap paragraph
406 403 p = textwrap.fill(p, ncols)
407 404 out_ps.append(p)
408 405 return out_ps
409 406
410 407
411 408 def long_substr(data):
412 409 """Return the longest common substring in a list of strings.
413 410
414 411 Credit: http://stackoverflow.com/questions/2892931/longest-common-substring-from-more-than-two-strings-python
415 412 """
416 413 substr = ''
417 414 if len(data) > 1 and len(data[0]) > 0:
418 415 for i in range(len(data[0])):
419 416 for j in range(len(data[0])-i+1):
420 417 if j > len(substr) and all(data[0][i:i+j] in x for x in data):
421 418 substr = data[0][i:i+j]
422 419 elif len(data) == 1:
423 420 substr = data[0]
424 421 return substr
425 422
426 423
427 424 def strip_email_quotes(text):
428 425 """Strip leading email quotation characters ('>').
429 426
430 427 Removes any combination of leading '>' interspersed with whitespace that
431 428 appears *identically* in all lines of the input text.
432 429
433 430 Parameters
434 431 ----------
435 432 text : str
436 433
437 434 Examples
438 435 --------
439 436
440 437 Simple uses::
441 438
442 439 In [2]: strip_email_quotes('> > text')
443 440 Out[2]: 'text'
444 441
445 442 In [3]: strip_email_quotes('> > text\\n> > more')
446 443 Out[3]: 'text\\nmore'
447 444
448 445 Note how only the common prefix that appears in all lines is stripped::
449 446
450 447 In [4]: strip_email_quotes('> > text\\n> > more\\n> more...')
451 448 Out[4]: '> text\\n> more\\nmore...'
452 449
453 450 So if any line has no quote marks ('>') , then none are stripped from any
454 451 of them ::
455 452
456 453 In [5]: strip_email_quotes('> > text\\n> > more\\nlast different')
457 454 Out[5]: '> > text\\n> > more\\nlast different'
458 455 """
459 456 lines = text.splitlines()
460 457 matches = set()
461 458 for line in lines:
462 459 prefix = re.match(r'^(\s*>[ >]*)', line)
463 460 if prefix:
464 461 matches.add(prefix.group(1))
465 462 else:
466 463 break
467 464 else:
468 465 prefix = long_substr(list(matches))
469 466 if prefix:
470 467 strip = len(prefix)
471 468 text = '\n'.join([ ln[strip:] for ln in lines])
472 469 return text
473 470
474 471 def strip_ansi(source):
475 472 """
476 473 Remove ansi escape codes from text.
477 474
478 475 Parameters
479 476 ----------
480 477 source : str
481 478 Source to remove the ansi from
482 479 """
483 480 return re.sub(r'\033\[(\d|;)+?m', '', source)
484 481
485 482
486 483 class EvalFormatter(Formatter):
487 484 """A String Formatter that allows evaluation of simple expressions.
488 485
489 486 Note that this version interprets a : as specifying a format string (as per
490 487 standard string formatting), so if slicing is required, you must explicitly
491 488 create a slice.
492 489
493 490 This is to be used in templating cases, such as the parallel batch
494 491 script templates, where simple arithmetic on arguments is useful.
495 492
496 493 Examples
497 494 --------
498 495 ::
499 496
500 497 In [1]: f = EvalFormatter()
501 498 In [2]: f.format('{n//4}', n=8)
502 499 Out[2]: '2'
503 500
504 501 In [3]: f.format("{greeting[slice(2,4)]}", greeting="Hello")
505 502 Out[3]: 'll'
506 503 """
507 504 def get_field(self, name, args, kwargs):
508 505 v = eval(name, kwargs)
509 506 return v, name
510 507
511 508 #XXX: As of Python 3.4, the format string parsing no longer splits on a colon
512 509 # inside [], so EvalFormatter can handle slicing. Once we only support 3.4 and
513 510 # above, it should be possible to remove FullEvalFormatter.
514 511
515 512 class FullEvalFormatter(Formatter):
516 513 """A String Formatter that allows evaluation of simple expressions.
517 514
518 515 Any time a format key is not found in the kwargs,
519 516 it will be tried as an expression in the kwargs namespace.
520 517
521 518 Note that this version allows slicing using [1:2], so you cannot specify
522 519 a format string. Use :class:`EvalFormatter` to permit format strings.
523 520
524 521 Examples
525 522 --------
526 523 ::
527 524
528 525 In [1]: f = FullEvalFormatter()
529 526 In [2]: f.format('{n//4}', n=8)
530 527 Out[2]: '2'
531 528
532 529 In [3]: f.format('{list(range(5))[2:4]}')
533 530 Out[3]: '[2, 3]'
534 531
535 532 In [4]: f.format('{3*2}')
536 533 Out[4]: '6'
537 534 """
538 535 # copied from Formatter._vformat with minor changes to allow eval
539 536 # and replace the format_spec code with slicing
540 537 def vformat(self, format_string:str, args, kwargs)->str:
541 538 result = []
542 539 for literal_text, field_name, format_spec, conversion in \
543 540 self.parse(format_string):
544 541
545 542 # output the literal text
546 543 if literal_text:
547 544 result.append(literal_text)
548 545
549 546 # if there's a field, output it
550 547 if field_name is not None:
551 548 # this is some markup, find the object and do
552 549 # the formatting
553 550
554 551 if format_spec:
555 552 # override format spec, to allow slicing:
556 553 field_name = ':'.join([field_name, format_spec])
557 554
558 555 # eval the contents of the field for the object
559 556 # to be formatted
560 557 obj = eval(field_name, kwargs)
561 558
562 559 # do any conversion on the resulting object
563 560 obj = self.convert_field(obj, conversion)
564 561
565 562 # format the object and append to the result
566 563 result.append(self.format_field(obj, ''))
567 564
568 565 return ''.join(result)
569 566
570 567
571 568 class DollarFormatter(FullEvalFormatter):
572 569 """Formatter allowing Itpl style $foo replacement, for names and attribute
573 570 access only. Standard {foo} replacement also works, and allows full
574 571 evaluation of its arguments.
575 572
576 573 Examples
577 574 --------
578 575 ::
579 576
580 577 In [1]: f = DollarFormatter()
581 578 In [2]: f.format('{n//4}', n=8)
582 579 Out[2]: '2'
583 580
584 581 In [3]: f.format('23 * 76 is $result', result=23*76)
585 582 Out[3]: '23 * 76 is 1748'
586 583
587 584 In [4]: f.format('$a or {b}', a=1, b=2)
588 585 Out[4]: '1 or 2'
589 586 """
590 587 _dollar_pattern_ignore_single_quote = re.compile(r"(.*?)\$(\$?[\w\.]+)(?=([^']*'[^']*')*[^']*$)")
591 588 def parse(self, fmt_string):
592 589 for literal_txt, field_name, format_spec, conversion \
593 590 in Formatter.parse(self, fmt_string):
594 591
595 592 # Find $foo patterns in the literal text.
596 593 continue_from = 0
597 594 txt = ""
598 595 for m in self._dollar_pattern_ignore_single_quote.finditer(literal_txt):
599 596 new_txt, new_field = m.group(1,2)
600 597 # $$foo --> $foo
601 598 if new_field.startswith("$"):
602 599 txt += new_txt + new_field
603 600 else:
604 601 yield (txt + new_txt, new_field, "", None)
605 602 txt = ""
606 603 continue_from = m.end()
607 604
608 605 # Re-yield the {foo} style pattern
609 606 yield (txt + literal_txt[continue_from:], field_name, format_spec, conversion)
610 607
611 608 #-----------------------------------------------------------------------------
612 609 # Utils to columnize a list of string
613 610 #-----------------------------------------------------------------------------
614 611
615 612 def _col_chunks(l, max_rows, row_first=False):
616 613 """Yield successive max_rows-sized column chunks from l."""
617 614 if row_first:
618 615 ncols = (len(l) // max_rows) + (len(l) % max_rows > 0)
619 616 for i in range(ncols):
620 617 yield [l[j] for j in range(i, len(l), ncols)]
621 618 else:
622 619 for i in range(0, len(l), max_rows):
623 620 yield l[i:(i + max_rows)]
624 621
625 622
626 623 def _find_optimal(rlist, row_first=False, separator_size=2, displaywidth=80):
627 624 """Calculate optimal info to columnize a list of string"""
628 625 for max_rows in range(1, len(rlist) + 1):
629 626 col_widths = list(map(max, _col_chunks(rlist, max_rows, row_first)))
630 627 sumlength = sum(col_widths)
631 628 ncols = len(col_widths)
632 629 if sumlength + separator_size * (ncols - 1) <= displaywidth:
633 630 break
634 631 return {'num_columns': ncols,
635 632 'optimal_separator_width': (displaywidth - sumlength) // (ncols - 1) if (ncols - 1) else 0,
636 633 'max_rows': max_rows,
637 634 'column_widths': col_widths
638 635 }
639 636
640 637
641 638 def _get_or_default(mylist, i, default=None):
642 639 """return list item number, or default if don't exist"""
643 640 if i >= len(mylist):
644 641 return default
645 642 else :
646 643 return mylist[i]
647 644
648 645
649 646 def compute_item_matrix(items, row_first=False, empty=None, *args, **kwargs) :
650 647 """Returns a nested list, and info to columnize items
651 648
652 649 Parameters
653 650 ----------
654
655 651 items
656 652 list of strings to columize
657 653 row_first : (default False)
658 654 Whether to compute columns for a row-first matrix instead of
659 655 column-first (default).
660 656 empty : (default None)
661 657 default value to fill list if needed
662 658 separator_size : int (default=2)
663 659 How much characters will be used as a separation between each columns.
664 660 displaywidth : int (default=80)
665 661 The width of the area onto which the columns should enter
666 662
667 663 Returns
668 664 -------
669
670 665 strings_matrix
671
672 666 nested list of string, the outer most list contains as many list as
673 667 rows, the innermost lists have each as many element as columns. If the
674 668 total number of elements in `items` does not equal the product of
675 669 rows*columns, the last element of some lists are filled with `None`.
676
677 670 dict_info
678 671 some info to make columnize easier:
679 672
680 673 num_columns
681 674 number of columns
682 675 max_rows
683 676 maximum number of rows (final number may be less)
684 677 column_widths
685 678 list of with of each columns
686 679 optimal_separator_width
687 680 best separator width between columns
688 681
689 682 Examples
690 683 --------
691 684 ::
692 685
693 686 In [1]: l = ['aaa','b','cc','d','eeeee','f','g','h','i','j','k','l']
694 687 In [2]: list, info = compute_item_matrix(l, displaywidth=12)
695 688 In [3]: list
696 689 Out[3]: [['aaa', 'f', 'k'], ['b', 'g', 'l'], ['cc', 'h', None], ['d', 'i', None], ['eeeee', 'j', None]]
697 690 In [4]: ideal = {'num_columns': 3, 'column_widths': [5, 1, 1], 'optimal_separator_width': 2, 'max_rows': 5}
698 691 In [5]: all((info[k] == ideal[k] for k in ideal.keys()))
699 692 Out[5]: True
700 693 """
701 694 info = _find_optimal(list(map(len, items)), row_first, *args, **kwargs)
702 695 nrow, ncol = info['max_rows'], info['num_columns']
703 696 if row_first:
704 697 return ([[_get_or_default(items, r * ncol + c, default=empty) for c in range(ncol)] for r in range(nrow)], info)
705 698 else:
706 699 return ([[_get_or_default(items, c * nrow + r, default=empty) for c in range(ncol)] for r in range(nrow)], info)
707 700
708 701
709 702 def columnize(items, row_first=False, separator=' ', displaywidth=80, spread=False):
710 703 """ Transform a list of strings into a single string with columns.
711 704
712 705 Parameters
713 706 ----------
714 707 items : sequence of strings
715 708 The strings to process.
716
717 709 row_first : (default False)
718 710 Whether to compute columns for a row-first matrix instead of
719 711 column-first (default).
720
721 712 separator : str, optional [default is two spaces]
722 713 The string that separates columns.
723
724 714 displaywidth : int, optional [default is 80]
725 715 Width of the display in number of characters.
726 716
727 717 Returns
728 718 -------
729 719 The formatted string.
730 720 """
731 721 if not items:
732 722 return '\n'
733 723 matrix, info = compute_item_matrix(items, row_first=row_first, separator_size=len(separator), displaywidth=displaywidth)
734 724 if spread:
735 725 separator = separator.ljust(int(info['optimal_separator_width']))
736 726 fmatrix = [filter(None, x) for x in matrix]
737 727 sjoin = lambda x : separator.join([ y.ljust(w, ' ') for y, w in zip(x, info['column_widths'])])
738 728 return '\n'.join(map(sjoin, fmatrix))+'\n'
739 729
740 730
741 731 def get_text_list(list_, last_sep=' and ', sep=", ", wrap_item_with=""):
742 732 """
743 733 Return a string with a natural enumeration of items
744 734
745 735 >>> get_text_list(['a', 'b', 'c', 'd'])
746 736 'a, b, c and d'
747 737 >>> get_text_list(['a', 'b', 'c'], ' or ')
748 738 'a, b or c'
749 739 >>> get_text_list(['a', 'b', 'c'], ', ')
750 740 'a, b, c'
751 741 >>> get_text_list(['a', 'b'], ' or ')
752 742 'a or b'
753 743 >>> get_text_list(['a'])
754 744 'a'
755 745 >>> get_text_list([])
756 746 ''
757 747 >>> get_text_list(['a', 'b'], wrap_item_with="`")
758 748 '`a` and `b`'
759 749 >>> get_text_list(['a', 'b', 'c', 'd'], " = ", sep=" + ")
760 750 'a + b + c = d'
761 751 """
762 752 if len(list_) == 0:
763 753 return ''
764 754 if wrap_item_with:
765 755 list_ = ['%s%s%s' % (wrap_item_with, item, wrap_item_with) for
766 756 item in list_]
767 757 if len(list_) == 1:
768 758 return list_[0]
769 759 return '%s%s%s' % (
770 760 sep.join(i for i in list_[:-1]),
771 761 last_sep, list_[-1])
@@ -1,130 +1,127 b''
1 1 """Token-related utilities"""
2 2
3 3 # Copyright (c) IPython Development Team.
4 4 # Distributed under the terms of the Modified BSD License.
5 5
6 6 from collections import namedtuple
7 7 from io import StringIO
8 8 from keyword import iskeyword
9 9
10 10 import tokenize
11 11
12 12
13 13 Token = namedtuple('Token', ['token', 'text', 'start', 'end', 'line'])
14 14
15 15 def generate_tokens(readline):
16 16 """wrap generate_tokens to catch EOF errors"""
17 17 try:
18 18 for token in tokenize.generate_tokens(readline):
19 19 yield token
20 20 except tokenize.TokenError:
21 21 # catch EOF error
22 22 return
23 23
24 24 def line_at_cursor(cell, cursor_pos=0):
25 25 """Return the line in a cell at a given cursor position
26 26
27 27 Used for calling line-based APIs that don't support multi-line input, yet.
28 28
29 29 Parameters
30 30 ----------
31
32 31 cell: str
33 32 multiline block of text
34 33 cursor_pos: integer
35 34 the cursor position
36 35
37 36 Returns
38 37 -------
39
40 38 (line, offset): (string, integer)
41 39 The line with the current cursor, and the character offset of the start of the line.
42 40 """
43 41 offset = 0
44 42 lines = cell.splitlines(True)
45 43 for line in lines:
46 44 next_offset = offset + len(line)
47 45 if not line.endswith('\n'):
48 46 # If the last line doesn't have a trailing newline, treat it as if
49 47 # it does so that the cursor at the end of the line still counts
50 48 # as being on that line.
51 49 next_offset += 1
52 50 if next_offset > cursor_pos:
53 51 break
54 52 offset = next_offset
55 53 else:
56 54 line = ""
57 55 return (line, offset)
58 56
59 57 def token_at_cursor(cell, cursor_pos=0):
60 58 """Get the token at a given cursor
61 59
62 60 Used for introspection.
63 61
64 62 Function calls are prioritized, so the token for the callable will be returned
65 63 if the cursor is anywhere inside the call.
66 64
67 65 Parameters
68 66 ----------
69
70 67 cell : unicode
71 68 A block of Python code
72 69 cursor_pos : int
73 70 The location of the cursor in the block where the token should be found
74 71 """
75 72 names = []
76 73 tokens = []
77 74 call_names = []
78 75
79 76 offsets = {1: 0} # lines start at 1
80 77 for tup in generate_tokens(StringIO(cell).readline):
81 78
82 79 tok = Token(*tup)
83 80
84 81 # token, text, start, end, line = tup
85 82 start_line, start_col = tok.start
86 83 end_line, end_col = tok.end
87 84 if end_line + 1 not in offsets:
88 85 # keep track of offsets for each line
89 86 lines = tok.line.splitlines(True)
90 87 for lineno, line in enumerate(lines, start_line + 1):
91 88 if lineno not in offsets:
92 89 offsets[lineno] = offsets[lineno-1] + len(line)
93 90
94 91 offset = offsets[start_line]
95 92 # allow '|foo' to find 'foo' at the beginning of a line
96 93 boundary = cursor_pos + 1 if start_col == 0 else cursor_pos
97 94 if offset + start_col >= boundary:
98 95 # current token starts after the cursor,
99 96 # don't consume it
100 97 break
101 98
102 99 if tok.token == tokenize.NAME and not iskeyword(tok.text):
103 100 if names and tokens and tokens[-1].token == tokenize.OP and tokens[-1].text == '.':
104 101 names[-1] = "%s.%s" % (names[-1], tok.text)
105 102 else:
106 103 names.append(tok.text)
107 104 elif tok.token == tokenize.OP:
108 105 if tok.text == '=' and names:
109 106 # don't inspect the lhs of an assignment
110 107 names.pop(-1)
111 108 if tok.text == '(' and names:
112 109 # if we are inside a function call, inspect the function
113 110 call_names.append(names[-1])
114 111 elif tok.text == ')' and call_names:
115 112 call_names.pop(-1)
116 113
117 114 tokens.append(tok)
118 115
119 116 if offsets[end_line] + end_col > cursor_pos:
120 117 # we found the cursor, stop reading
121 118 break
122 119
123 120 if call_names:
124 121 return call_names[-1]
125 122 elif names:
126 123 return names[-1]
127 124 else:
128 125 return ''
129 126
130 127
General Comments 0
You need to be logged in to leave comments. Login now