##// END OF EJS Templates
python3 syntax fixes on various scripts...
Min RK -
Show More
@@ -1,133 +1,135
1 1 #!/usr/bin/env python
2 2 """Script to commit the doc build outputs into the github-pages repo.
3 3
4 4 Use:
5 5
6 6 gh-pages.py [tag]
7 7
8 8 If no tag is given, the current output of 'git describe' is used. If given,
9 9 that is how the resulting directory will be named.
10 10
11 11 In practice, you should use either actual clean tags from a current build or
12 12 something like 'current' as a stable URL for the most current version of the """
13 13
14 14 #-----------------------------------------------------------------------------
15 15 # Imports
16 16 #-----------------------------------------------------------------------------
17 from __future__ import print_function
18
17 19 import os
18 20 import shutil
19 21 import sys
20 22 from os import chdir as cd
21 23 from os.path import join as pjoin
22 24
23 25 from subprocess import Popen, PIPE, CalledProcessError, check_call
24 26
25 27 #-----------------------------------------------------------------------------
26 28 # Globals
27 29 #-----------------------------------------------------------------------------
28 30
29 31 pages_dir = 'gh-pages'
30 32 html_dir = 'build/html'
31 33 pdf_dir = 'build/latex'
32 34 pages_repo = 'git@github.com:ipython/ipython-doc.git'
33 35
34 36 #-----------------------------------------------------------------------------
35 37 # Functions
36 38 #-----------------------------------------------------------------------------
37 39 def sh(cmd):
38 40 """Execute command in a subshell, return status code."""
39 41 return check_call(cmd, shell=True)
40 42
41 43
42 44 def sh2(cmd):
43 45 """Execute command in a subshell, return stdout.
44 46
45 47 Stderr is unbuffered from the subshell.x"""
46 48 p = Popen(cmd, stdout=PIPE, shell=True)
47 49 out = p.communicate()[0]
48 50 retcode = p.returncode
49 51 if retcode:
50 52 raise CalledProcessError(retcode, cmd)
51 53 else:
52 54 return out.rstrip()
53 55
54 56
55 57 def sh3(cmd):
56 58 """Execute command in a subshell, return stdout, stderr
57 59
58 60 If anything appears in stderr, print it out to sys.stderr"""
59 61 p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
60 62 out, err = p.communicate()
61 63 retcode = p.returncode
62 64 if retcode:
63 65 raise CalledProcessError(retcode, cmd)
64 66 else:
65 67 return out.rstrip(), err.rstrip()
66 68
67 69
68 70 def init_repo(path):
69 71 """clone the gh-pages repo if we haven't already."""
70 72 sh("git clone %s %s"%(pages_repo, path))
71 73 here = os.getcwdu()
72 74 cd(path)
73 75 sh('git checkout gh-pages')
74 76 cd(here)
75 77
76 78 #-----------------------------------------------------------------------------
77 79 # Script starts
78 80 #-----------------------------------------------------------------------------
79 81 if __name__ == '__main__':
80 82 # The tag can be given as a positional argument
81 83 try:
82 84 tag = sys.argv[1]
83 85 except IndexError:
84 86 tag = "dev"
85 87
86 88 startdir = os.getcwdu()
87 89 if not os.path.exists(pages_dir):
88 90 # init the repo
89 91 init_repo(pages_dir)
90 92 else:
91 93 # ensure up-to-date before operating
92 94 cd(pages_dir)
93 95 sh('git checkout gh-pages')
94 96 sh('git pull')
95 97 cd(startdir)
96 98
97 99 dest = pjoin(pages_dir, tag)
98 100
99 101 # don't `make html` here, because gh-pages already depends on html in Makefile
100 102 # sh('make html')
101 103 if tag != 'dev':
102 104 # only build pdf for non-dev targets
103 105 #sh2('make pdf')
104 106 pass
105 107
106 108 # This is pretty unforgiving: we unconditionally nuke the destination
107 109 # directory, and then copy the html tree in there
108 110 shutil.rmtree(dest, ignore_errors=True)
109 111 shutil.copytree(html_dir, dest)
110 112 if tag != 'dev':
111 113 #shutil.copy(pjoin(pdf_dir, 'ipython.pdf'), pjoin(dest, 'ipython.pdf'))
112 114 pass
113 115
114 116 try:
115 117 cd(pages_dir)
116 118 branch = sh2('git rev-parse --abbrev-ref HEAD').strip()
117 119 if branch != 'gh-pages':
118 120 e = 'On %r, git branch is %r, MUST be "gh-pages"' % (pages_dir,
119 121 branch)
120 122 raise RuntimeError(e)
121 123
122 124 sh('git add -A %s' % tag)
123 125 sh('git commit -m"Updated doc release: %s"' % tag)
124 print
125 print 'Most recent 3 commits:'
126 print()
127 print('Most recent 3 commits:')
126 128 sys.stdout.flush()
127 129 sh('git --no-pager log --oneline HEAD~3..')
128 130 finally:
129 131 cd(startdir)
130 132
131 print
132 print 'Now verify the build in: %r' % dest
133 print "If everything looks good, 'git push'"
133 print()
134 print('Now verify the build in: %r' % dest)
135 print("If everything looks good, 'git push'")
@@ -1,253 +1,253
1 1 # -*- coding: utf-8 -*-
2 2 #
3 3 # IPython documentation build configuration file.
4 4
5 5 # NOTE: This file has been edited manually from the auto-generated one from
6 6 # sphinx. Do NOT delete and re-generate. If any changes from sphinx are
7 7 # needed, generate a scratch one and merge by hand any new fields needed.
8 8
9 9 #
10 10 # This file is execfile()d with the current directory set to its containing dir.
11 11 #
12 12 # The contents of this file are pickled, so don't put values in the namespace
13 13 # that aren't pickleable (module imports are okay, they're removed automatically).
14 14 #
15 15 # All configuration values have a default value; values that are commented out
16 16 # serve to show the default value.
17 17
18 18 import sys, os
19 19
20 20 ON_RTD = os.environ.get('READTHEDOCS', None) == 'True'
21 21
22 22 if ON_RTD:
23 23 # Mock the presence of matplotlib, which we don't have on RTD
24 24 # see
25 25 # http://read-the-docs.readthedocs.org/en/latest/faq.html
26 26 tags.add('rtd')
27 27
28 28 # If your extensions are in another directory, add it here. If the directory
29 29 # is relative to the documentation root, use os.path.abspath to make it
30 30 # absolute, like shown here.
31 31 sys.path.insert(0, os.path.abspath('../sphinxext'))
32 32
33 33 # We load the ipython release info into a dict by explicit execution
34 34 iprelease = {}
35 35 execfile('../../IPython/core/release.py',iprelease)
36 36
37 37 # General configuration
38 38 # ---------------------
39 39
40 40 # Add any Sphinx extension module names here, as strings. They can be extensions
41 41 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
42 42 extensions = [
43 43 'matplotlib.sphinxext.mathmpl',
44 44 'matplotlib.sphinxext.only_directives',
45 45 'matplotlib.sphinxext.plot_directive',
46 46 'sphinx.ext.autodoc',
47 47 'sphinx.ext.autosummary',
48 48 'sphinx.ext.doctest',
49 49 'sphinx.ext.inheritance_diagram',
50 50 'sphinx.ext.intersphinx',
51 51 'IPython.sphinxext.ipython_console_highlighting',
52 52 'IPython.sphinxext.ipython_directive',
53 53 'numpydoc', # to preprocess docstrings
54 54 'github', # for easy GitHub links
55 55 'magics',
56 56 ]
57 57
58 58 if ON_RTD:
59 59 # Remove extensions not currently supported on RTD
60 60 extensions.remove('matplotlib.sphinxext.only_directives')
61 61 extensions.remove('matplotlib.sphinxext.mathmpl')
62 62 extensions.remove('matplotlib.sphinxext.plot_directive')
63 63 extensions.remove('IPython.sphinxext.ipython_directive')
64 64 extensions.remove('IPython.sphinxext.ipython_console_highlighting')
65 65
66 66 # Add any paths that contain templates here, relative to this directory.
67 67 templates_path = ['_templates']
68 68
69 69 # The suffix of source filenames.
70 70 source_suffix = '.rst'
71 71
72 72 if iprelease['_version_extra'] == 'dev':
73 73 rst_prolog = """
74 74 .. note::
75 75
76 76 This documentation is for a development version of IPython. There may be
77 77 significant differences from the latest stable release.
78 78
79 79 """
80 80
81 81 # The master toctree document.
82 82 master_doc = 'index'
83 83
84 84 # General substitutions.
85 85 project = 'IPython'
86 86 copyright = 'The IPython Development Team'
87 87
88 88 # ghissue config
89 89 github_project_url = "https://github.com/ipython/ipython"
90 90
91 91 # numpydoc config
92 92 numpydoc_show_class_members = False # Otherwise Sphinx emits thousands of warnings
93 93 numpydoc_class_members_toctree = False
94 94
95 95 # The default replacements for |version| and |release|, also used in various
96 96 # other places throughout the built documents.
97 97 #
98 98 # The full version, including alpha/beta/rc tags.
99 99 release = "%s" % iprelease['version']
100 100 # Just the X.Y.Z part, no '-dev'
101 101 version = iprelease['version'].split('-', 1)[0]
102 102
103 103
104 104 # There are two options for replacing |today|: either, you set today to some
105 105 # non-false value, then it is used:
106 106 #today = ''
107 107 # Else, today_fmt is used as the format for a strftime call.
108 108 today_fmt = '%B %d, %Y'
109 109
110 110 # List of documents that shouldn't be included in the build.
111 111 #unused_docs = []
112 112
113 113 # Exclude these glob-style patterns when looking for source files. They are
114 114 # relative to the source/ directory.
115 115 exclude_patterns = ['whatsnew/pr']
116 116
117 117
118 118 # If true, '()' will be appended to :func: etc. cross-reference text.
119 119 #add_function_parentheses = True
120 120
121 121 # If true, the current module name will be prepended to all description
122 122 # unit titles (such as .. function::).
123 123 #add_module_names = True
124 124
125 125 # If true, sectionauthor and moduleauthor directives will be shown in the
126 126 # output. They are ignored by default.
127 127 #show_authors = False
128 128
129 129 # The name of the Pygments (syntax highlighting) style to use.
130 130 pygments_style = 'sphinx'
131 131
132 132 # Set the default role so we can use `foo` instead of ``foo``
133 133 default_role = 'literal'
134 134
135 135 # Options for HTML output
136 136 # -----------------------
137 137
138 138 # The style sheet to use for HTML and HTML Help pages. A file of that name
139 139 # must exist either in Sphinx' static/ path, or in one of the custom paths
140 140 # given in html_static_path.
141 141 html_style = 'default.css'
142 142
143 143 # The name for this set of Sphinx documents. If None, it defaults to
144 144 # "<project> v<release> documentation".
145 145 #html_title = None
146 146
147 147 # The name of an image file (within the static path) to place at the top of
148 148 # the sidebar.
149 149 #html_logo = None
150 150
151 151 # Add any paths that contain custom static files (such as style sheets) here,
152 152 # relative to this directory. They are copied after the builtin static files,
153 153 # so a file named "default.css" will overwrite the builtin "default.css".
154 154 html_static_path = ['_static']
155 155
156 156 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
157 157 # using the given strftime format.
158 158 html_last_updated_fmt = '%b %d, %Y'
159 159
160 160 # If true, SmartyPants will be used to convert quotes and dashes to
161 161 # typographically correct entities.
162 162 #html_use_smartypants = True
163 163
164 164 # Custom sidebar templates, maps document names to template names.
165 165 #html_sidebars = {}
166 166
167 167 # Additional templates that should be rendered to pages, maps page names to
168 168 # template names.
169 169 html_additional_pages = {
170 170 'interactive/htmlnotebook': 'notebook_redirect.html',
171 171 'interactive/notebook': 'notebook_redirect.html',
172 172 'interactive/nbconvert': 'notebook_redirect.html',
173 173 'interactive/public_server': 'notebook_redirect.html',
174 174 }
175 175
176 176 # If false, no module index is generated.
177 177 #html_use_modindex = True
178 178
179 179 # If true, the reST sources are included in the HTML build as _sources/<name>.
180 180 #html_copy_source = True
181 181
182 182 # If true, an OpenSearch description file will be output, and all pages will
183 183 # contain a <link> tag referring to it. The value of this option must be the
184 184 # base URL from which the finished HTML is served.
185 185 #html_use_opensearch = ''
186 186
187 187 # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
188 188 #html_file_suffix = ''
189 189
190 190 # Output file base name for HTML help builder.
191 191 htmlhelp_basename = 'ipythondoc'
192 192
193 193 intersphinx_mapping = {'python': ('http://docs.python.org/2/', None),
194 194 'rpy2': ('http://rpy.sourceforge.net/rpy2/doc-2.4/html/', None)}
195 195
196 196 # Options for LaTeX output
197 197 # ------------------------
198 198
199 199 # The paper size ('letter' or 'a4').
200 200 latex_paper_size = 'letter'
201 201
202 202 # The font size ('10pt', '11pt' or '12pt').
203 203 latex_font_size = '11pt'
204 204
205 205 # Grouping the document tree into LaTeX files. List of tuples
206 206 # (source start file, target name, title, author, document class [howto/manual]).
207 207
208 208 latex_documents = [
209 209 ('index', 'ipython.tex', 'IPython Documentation',
210 ur"""The IPython Development Team""", 'manual', True),
210 u"""The IPython Development Team""", 'manual', True),
211 211 ('parallel/winhpc_index', 'winhpc_whitepaper.tex',
212 212 'Using IPython on Windows HPC Server 2008',
213 ur"Brian E. Granger", 'manual', True)
213 u"Brian E. Granger", 'manual', True)
214 214 ]
215 215
216 216 # The name of an image file (relative to this directory) to place at the top of
217 217 # the title page.
218 218 #latex_logo = None
219 219
220 220 # For "manual" documents, if this is true, then toplevel headings are parts,
221 221 # not chapters.
222 222 #latex_use_parts = False
223 223
224 224 # Additional stuff for the LaTeX preamble.
225 225 #latex_preamble = ''
226 226
227 227 # Documents to append as an appendix to all manuals.
228 228 #latex_appendices = []
229 229
230 230 # If false, no module index is generated.
231 231 latex_use_modindex = True
232 232
233 233
234 234 # Options for texinfo output
235 235 # --------------------------
236 236
237 237 texinfo_documents = [
238 238 (master_doc, 'ipython', 'IPython Documentation',
239 239 'The IPython Development Team',
240 240 'IPython',
241 241 'IPython Documentation',
242 242 'Programming',
243 243 1),
244 244 ]
245 245
246 246 modindex_common_prefix = ['IPython.']
247 247
248 248
249 249 # Cleanup
250 250 # -------
251 251 # delete release info to avoid pickling errors from sphinx
252 252
253 253 del iprelease
@@ -1,87 +1,88
1 1 #!/usr/bin/env python
2 2 """
3 3 Script for setting up and using [all]reduce with a binary-tree engine interconnect.
4 4
5 5 usage: `python bintree_script.py`
6 6
7 7 This spanning tree strategy ensures that a single node node mailbox will never
8 8 receive more that 2 messages at once. This is very important to scale to large
9 9 clusters (e.g. 1000 nodes) since if you have many incoming messages of a couple
10 10 of megabytes you might saturate the network interface of a single node and
11 11 potentially its memory buffers if the messages are not consumed in a streamed
12 12 manner.
13 13
14 14 Note that the AllReduce scheme implemented with the spanning tree strategy
15 15 impose the aggregation function to be commutative and distributive. It might
16 16 not be the case if you implement the naive gather / reduce / broadcast strategy
17 17 where you can reorder the partial data before performing the reduce.
18 18 """
19 from __future__ import print_function
19 20
20 21 from IPython.parallel import Client, Reference
21 22
22 23
23 24 # connect client and create views
24 25 rc = Client()
25 26 rc.block=True
26 27 ids = rc.ids
27 28
28 29 root_id = ids[0]
29 30 root = rc[root_id]
30 31
31 32 view = rc[:]
32 33
33 34 # run bintree.py script defining bintree functions, etc.
34 execfile('bintree.py')
35 exec(compile(open('bintree.py').read(), 'bintree.py', 'exec'))
35 36
36 37 # generate binary tree of parents
37 38 btree = bintree(ids)
38 39
39 print "setting up binary tree interconnect:"
40 print("setting up binary tree interconnect:")
40 41 print_bintree(btree)
41 42
42 43 view.run('bintree.py')
43 44 view.scatter('id', ids, flatten=True)
44 45 view['root_id'] = root_id
45 46
46 47 # create the Communicator objects on the engines
47 48 view.execute('com = BinaryTreeCommunicator(id, root = id==root_id )')
48 49 pub_url = root.apply_sync(lambda : com.pub_url)
49 50
50 51 # gather the connection information into a dict
51 52 ar = view.apply_async(lambda : com.info)
52 53 peers = ar.get_dict()
53 54 # this is a dict, keyed by engine ID, of the connection info for the EngineCommunicators
54 55
55 56 # connect the engines to each other:
56 57 def connect(com, peers, tree, pub_url, root_id):
57 58 """this function will be called on the engines"""
58 59 com.connect(peers, tree, pub_url, root_id)
59 60
60 61 view.apply_sync(connect, Reference('com'), peers, btree, pub_url, root_id)
61 62
62 63 # functions that can be used for reductions
63 64 # max and min builtins can be used as well
64 65 def add(a,b):
65 66 """cumulative sum reduction"""
66 67 return a+b
67 68
68 69 def mul(a,b):
69 70 """cumulative product reduction"""
70 71 return a*b
71 72
72 73 view['add'] = add
73 74 view['mul'] = mul
74 75
75 76 # scatter some data
76 data = range(1000)
77 data = list(range(1000))
77 78 view.scatter('data', data)
78 79
79 80 # perform cumulative sum via allreduce
80 81 view.execute("data_sum = com.allreduce(add, data, flat=False)")
81 print "allreduce sum of data on all engines:", view['data_sum']
82 print("allreduce sum of data on all engines:", view['data_sum'])
82 83
83 84 # perform cumulative sum *without* final broadcast
84 85 # when not broadcasting with allreduce, the final result resides on the root node:
85 86 view.execute("ids_sum = com.reduce(add, id, flat=True)")
86 print "reduce sum of engine ids (not broadcast):", root['ids_sum']
87 print "partial result on each engine:", view['ids_sum']
87 print("reduce sum of engine ids (not broadcast):", root['ids_sum'])
88 print("partial result on each engine:", view['ids_sum'])
@@ -1,205 +1,205
1 1 #!/usr/bin/env python
2 2 """
3 3 A simple python program of solving a 2D wave equation in parallel.
4 4 Domain partitioning and inter-processor communication
5 5 are done by an object of class MPIRectPartitioner2D
6 6 (which is a subclass of RectPartitioner2D and uses MPI via mpi4py)
7 7
8 8 An example of running the program is (8 processors, 4x2 partition,
9 9 400x100 grid cells)::
10 10
11 11 $ ipcluster start --engines=MPIExec -n 8 # start 8 engines with mpiexec
12 12 $ python parallelwave-mpi.py --grid 400 100 --partition 4 2
13 13
14 14 See also parallelwave-mpi, which runs the same program, but uses MPI
15 15 (via mpi4py) for the inter-engine communication.
16 16
17 17 Authors
18 18 -------
19 19
20 20 * Xing Cai
21 21 * Min Ragan-Kelley
22 22
23 23 """
24 24
25 25 import sys
26 26 import time
27 27
28 28 from numpy import exp, zeros, newaxis, sqrt
29 29
30 30 from IPython.external import argparse
31 31 from IPython.parallel import Client, Reference
32 32
33 33 def setup_partitioner(index, num_procs, gnum_cells, parts):
34 34 """create a partitioner in the engine namespace"""
35 35 global partitioner
36 36 p = MPIRectPartitioner2D(my_id=index, num_procs=num_procs)
37 37 p.redim(global_num_cells=gnum_cells, num_parts=parts)
38 38 p.prepare_communication()
39 39 # put the partitioner into the global namespace:
40 40 partitioner=p
41 41
42 42 def setup_solver(*args, **kwargs):
43 43 """create a WaveSolver in the engine namespace"""
44 44 global solver
45 45 solver = WaveSolver(*args, **kwargs)
46 46
47 47 def wave_saver(u, x, y, t):
48 48 """save the wave log"""
49 49 global u_hist
50 50 global t_hist
51 51 t_hist.append(t)
52 52 u_hist.append(1.0*u)
53 53
54 54
55 55 # main program:
56 56 if __name__ == '__main__':
57 57
58 58 parser = argparse.ArgumentParser()
59 59 paa = parser.add_argument
60 60 paa('--grid', '-g',
61 61 type=int, nargs=2, default=[100,100], dest='grid',
62 62 help="Cells in the grid, e.g. --grid 100 200")
63 63 paa('--partition', '-p',
64 64 type=int, nargs=2, default=None,
65 65 help="Process partition grid, e.g. --partition 4 2 for 4x2")
66 66 paa('-c',
67 67 type=float, default=1.,
68 68 help="Wave speed (I think)")
69 69 paa('-Ly',
70 70 type=float, default=1.,
71 71 help="system size (in y)")
72 72 paa('-Lx',
73 73 type=float, default=1.,
74 74 help="system size (in x)")
75 75 paa('-t', '--tstop',
76 76 type=float, default=1.,
77 77 help="Time units to run")
78 78 paa('--profile',
79 79 type=unicode, default=u'default',
80 80 help="Specify the ipcluster profile for the client to connect to.")
81 81 paa('--save',
82 82 action='store_true',
83 83 help="Add this flag to save the time/wave history during the run.")
84 84 paa('--scalar',
85 85 action='store_true',
86 86 help="Also run with scalar interior implementation, to see vector speedup.")
87 87
88 88 ns = parser.parse_args()
89 89 # set up arguments
90 90 grid = ns.grid
91 91 partition = ns.partition
92 92 Lx = ns.Lx
93 93 Ly = ns.Ly
94 94 c = ns.c
95 95 tstop = ns.tstop
96 96 if ns.save:
97 97 user_action = wave_saver
98 98 else:
99 99 user_action = None
100 100
101 101 num_cells = 1.0*(grid[0]-1)*(grid[1]-1)
102 102 final_test = True
103 103
104 104 # create the Client
105 105 rc = Client(profile=ns.profile)
106 106 num_procs = len(rc.ids)
107 107
108 108 if partition is None:
109 109 partition = [1,num_procs]
110 110
111 111 assert partition[0]*partition[1] == num_procs, "can't map partition %s to %i engines"%(partition, num_procs)
112 112
113 113 view = rc[:]
114 print "Running %s system on %s processes until %f"%(grid, partition, tstop)
114 print("Running %s system on %s processes until %f" % (grid, partition, tstop))
115 115
116 116 # functions defining initial/boundary/source conditions
117 117 def I(x,y):
118 118 from numpy import exp
119 119 return 1.5*exp(-100*((x-0.5)**2+(y-0.5)**2))
120 120 def f(x,y,t):
121 121 return 0.0
122 122 # from numpy import exp,sin
123 123 # return 10*exp(-(x - sin(100*t))**2)
124 124 def bc(x,y,t):
125 125 return 0.0
126 126
127 127 # initial imports, setup rank
128 128 view.execute('\n'.join([
129 129 "from mpi4py import MPI",
130 130 "import numpy",
131 131 "mpi = MPI.COMM_WORLD",
132 132 "my_id = MPI.COMM_WORLD.Get_rank()"]), block=True)
133 133
134 134 # initialize t_hist/u_hist for saving the state at each step (optional)
135 135 view['t_hist'] = []
136 136 view['u_hist'] = []
137 137
138 138 # set vector/scalar implementation details
139 139 impl = {}
140 140 impl['ic'] = 'vectorized'
141 141 impl['inner'] = 'scalar'
142 142 impl['bc'] = 'vectorized'
143 143
144 144 # execute some files so that the classes we need will be defined on the engines:
145 145 view.run('RectPartitioner.py')
146 146 view.run('wavesolver.py')
147 147
148 148 # setup remote partitioner
149 149 # note that Reference means that the argument passed to setup_partitioner will be the
150 150 # object named 'my_id' in the engine's namespace
151 151 view.apply_sync(setup_partitioner, Reference('my_id'), num_procs, grid, partition)
152 152 # wait for initial communication to complete
153 153 view.execute('mpi.barrier()')
154 154 # setup remote solvers
155 155 view.apply_sync(setup_solver, I,f,c,bc,Lx,Ly,partitioner=Reference('partitioner'), dt=0,implementation=impl)
156 156
157 157 # lambda for calling solver.solve:
158 158 _solve = lambda *args, **kwargs: solver.solve(*args, **kwargs)
159 159
160 160 if ns.scalar:
161 161 impl['inner'] = 'scalar'
162 162 # run first with element-wise Python operations for each cell
163 163 t0 = time.time()
164 164 ar = view.apply_async(_solve, tstop, dt=0, verbose=True, final_test=final_test, user_action=user_action)
165 165 if final_test:
166 166 # this sum is performed element-wise as results finish
167 167 s = sum(ar)
168 168 # the L2 norm (RMS) of the result:
169 169 norm = sqrt(s/num_cells)
170 170 else:
171 171 norm = -1
172 172 t1 = time.time()
173 print 'scalar inner-version, Wtime=%g, norm=%g'%(t1-t0, norm)
173 print('scalar inner-version, Wtime=%g, norm=%g' % (t1-t0, norm))
174 174
175 175 impl['inner'] = 'vectorized'
176 176 # setup new solvers
177 177 view.apply_sync(setup_solver, I,f,c,bc,Lx,Ly,partitioner=Reference('partitioner'), dt=0,implementation=impl)
178 178 view.execute('mpi.barrier()')
179 179
180 180 # run again with numpy vectorized inner-implementation
181 181 t0 = time.time()
182 182 ar = view.apply_async(_solve, tstop, dt=0, verbose=True, final_test=final_test, user_action=user_action)
183 183 if final_test:
184 184 # this sum is performed element-wise as results finish
185 185 s = sum(ar)
186 186 # the L2 norm (RMS) of the result:
187 187 norm = sqrt(s/num_cells)
188 188 else:
189 189 norm = -1
190 190 t1 = time.time()
191 print 'vector inner-version, Wtime=%g, norm=%g'%(t1-t0, norm)
191 print('vector inner-version, Wtime=%g, norm=%g' % (t1-t0, norm))
192 192
193 193 # if ns.save is True, then u_hist stores the history of u as a list
194 194 # If the partion scheme is Nx1, then u can be reconstructed via 'gather':
195 195 if ns.save and partition[-1] == 1:
196 196 import matplotlib.pyplot as plt
197 197 view.execute('u_last=u_hist[-1]')
198 198 # map mpi IDs to IPython IDs, which may not match
199 199 ranks = view['my_id']
200 200 targets = range(len(ranks))
201 201 for idx in range(len(ranks)):
202 202 targets[idx] = ranks.index(idx)
203 203 u_last = rc[targets].gather('u_last', block=True)
204 204 plt.pcolor(u_last)
205 205 plt.show()
@@ -1,28 +1,28
1 1 #!/usr/bin/env python
2 2 """IPython release build script.
3 3 """
4 4
5 5 import os
6 6 from shutil import rmtree
7 7
8 8 from toollib import *
9 9
10 10 # Get main ipython dir, this will raise if it doesn't pass some checks
11 11 ipdir = get_ipdir()
12 12 cd(ipdir)
13 13
14 14 # Load release info
15 execfile(pjoin('IPython', 'core', 'release.py'))
15 execfile(pjoin('IPython', 'core', 'release.py'), globals())
16 16
17 17 # Check that everything compiles
18 18 compile_tree()
19 19
20 20 # Cleanup
21 21 for d in ['build', 'dist', pjoin('docs', 'build'), pjoin('docs', 'dist'),
22 22 pjoin('docs', 'source', 'api', 'generated')]:
23 23 if os.path.isdir(d):
24 24 rmtree(d)
25 25
26 26 # Build source and binary distros
27 27 sh(sdists)
28 28 sh(wheels)
@@ -1,54 +1,55
1 1 #!/usr/bin/env python
2 2 """Utility to look for hard tabs and \r characters in all sources.
3 3
4 4 Usage:
5 5
6 6 ./check_sources.py
7 7
8 8 It prints summaries and if chosen, line-by-line info of where \\t or \\r
9 9 characters can be found in our source tree.
10 10 """
11 from __future__ import print_function
11 12
12 13 # Config
13 14 # If true, all lines that have tabs are printed, with line number
14 15 full_report_tabs = True
15 16 # If true, all lines that have tabs are printed, with line number
16 17 full_report_rets = False
17 18
18 19 # Code begins
19 20 from IPython.external.path import path
20 21
21 22 rets = []
22 23 tabs = []
23 24
24 25 for f in path('..').walkfiles('*.py'):
25 26 errs = ''
26 27 cont = f.bytes()
27 28 if '\t' in cont:
28 29 errs+='t'
29 30 tabs.append(f)
30 31
31 32 if '\r' in cont:
32 33 errs+='r'
33 34 rets.append(f)
34 35
35 36 if errs:
36 print "%3s" % errs, f
37 print("%3s" % errs, f)
37 38
38 39 if 't' in errs and full_report_tabs:
39 40 for ln,line in enumerate(f.lines()):
40 41 if '\t' in line:
41 print 'TAB:',ln,':',line,
42 print('TAB:',ln,':',line, end=' ')
42 43
43 44 if 'r' in errs and full_report_rets:
44 45 for ln,line in enumerate(open(f.abspath(),'rb')):
45 46 if '\r' in line:
46 print 'RET:',ln,':',line,
47 print('RET:',ln,':',line, end=' ')
47 48
48 49 # Summary at the end, to call cleanup tools if necessary
49 50 if tabs:
50 print 'Hard tabs found. These can be cleaned with untabify:'
51 for f in tabs: print f,
51 print('Hard tabs found. These can be cleaned with untabify:')
52 for f in tabs: print(f, end=' ')
52 53 if rets:
53 print 'Carriage returns (\\r) found in:'
54 for f in rets: print f,
54 print('Carriage returns (\\r) found in:')
55 for f in rets: print(f, end=' ')
General Comments 0
You need to be logged in to leave comments. Login now