diff --git a/IPython/Release.py b/IPython/Release.py index c77a643..b744ac2 100644 --- a/IPython/Release.py +++ b/IPython/Release.py @@ -23,7 +23,7 @@ name = 'ipython' # bdist_deb does not accept underscores (a Debian convention). development = False # change this to False to do a release -version_base = '0.9.rc1' +version_base = '0.9' branch = 'ipython' revision = '1124' diff --git a/IPython/frontend/wx/console_widget.py b/IPython/frontend/wx/console_widget.py index d61e84a..30ec0b8 100644 --- a/IPython/frontend/wx/console_widget.py +++ b/IPython/frontend/wx/console_widget.py @@ -36,7 +36,7 @@ import re _DEFAULT_SIZE = 10 if sys.platform == 'darwin': - _DEFAULT_STYLE = 12 + _DEFAULT_SIZE = 12 _DEFAULT_STYLE = { 'stdout' : 'fore:#0000FF', diff --git a/docs/source/changes.txt b/docs/source/changes.txt index ffa88a6..e2361b8 100644 --- a/docs/source/changes.txt +++ b/docs/source/changes.txt @@ -27,6 +27,12 @@ Release 0.9 New features ------------ +* All furl files and security certificates are now put in a read-only directory + named ~./ipython/security. + +* A single function :func:`get_ipython_dir`, in :mod:`IPython.genutils` that + determines the user's IPython directory in a robust manner. + * Laurent's WX application has been given a top-level script called ipython-wx, and it has received numerous fixes. We expect this code to be architecturally better integrated with Gael's WX 'ipython widget' over the @@ -58,95 +64,129 @@ New features time and report problems), but it now works for the developers. We are working hard on continuing to improve it, as this was probably IPython's major Achilles heel (the lack of proper test coverage made it effectively - impossible to do large-scale refactoring). - - * The notion of a task has been completely reworked. An `ITask` interface has - been created. This interface defines the methods that tasks need to implement. - These methods are now responsible for things like submitting tasks and processing - results. There are two basic task types: :class:`IPython.kernel.task.StringTask` - (this is the old `Task` object, but renamed) and the new - :class:`IPython.kernel.task.MapTask`, which is based on a function. - * A new interface, :class:`IPython.kernel.mapper.IMapper` has been defined to - standardize the idea of a `map` method. This interface has a single - `map` method that has the same syntax as the built-in `map`. We have also defined - a `mapper` factory interface that creates objects that implement - :class:`IPython.kernel.mapper.IMapper` for different controllers. Both - the multiengine and task controller now have mapping capabilties. - * The parallel function capabilities have been reworks. The major changes are that - i) there is now an `@parallel` magic that creates parallel functions, ii) - the syntax for mulitple variable follows that of `map`, iii) both the - multiengine and task controller now have a parallel function implementation. - * All of the parallel computing capabilities from `ipython1-dev` have been merged into - IPython proper. This resulted in the following new subpackages: - :mod:`IPython.kernel`, :mod:`IPython.kernel.core`, :mod:`IPython.config`, - :mod:`IPython.tools` and :mod:`IPython.testing`. - * As part of merging in the `ipython1-dev` stuff, the `setup.py` script and friends - have been completely refactored. Now we are checking for dependencies using - the approach that matplotlib uses. - * The documentation has been completely reorganized to accept the documentation - from `ipython1-dev`. - * We have switched to using Foolscap for all of our network protocols in - :mod:`IPython.kernel`. This gives us secure connections that are both encrypted - and authenticated. - * We have a brand new `COPYING.txt` files that describes the IPython license - and copyright. The biggest change is that we are putting "The IPython - Development Team" as the copyright holder. We give more details about exactly - what this means in this file. All developer should read this and use the new - banner in all IPython source code files. - * sh profile: ./foo runs foo as system command, no need to do !./foo anymore - * String lists now support 'sort(field, nums = True)' method (to easily - sort system command output). Try it with 'a = !ls -l ; a.sort(1, nums=1)' - * '%cpaste foo' now assigns the pasted block as string list, instead of string - * The ipcluster script now run by default with no security. This is done because - the main usage of the script is for starting things on localhost. Eventually - when ipcluster is able to start things on other hosts, we will put security - back. - * 'cd --foo' searches directory history for string foo, and jumps to that dir. - Last part of dir name is checked first. If no matches for that are found, - look at the whole path. + impossible to do large-scale refactoring). The full test suite can now + be run using the :command:`iptest` command line program. + +* The notion of a task has been completely reworked. An `ITask` interface has + been created. This interface defines the methods that tasks need to implement. + These methods are now responsible for things like submitting tasks and processing + results. There are two basic task types: :class:`IPython.kernel.task.StringTask` + (this is the old `Task` object, but renamed) and the new + :class:`IPython.kernel.task.MapTask`, which is based on a function. + +* A new interface, :class:`IPython.kernel.mapper.IMapper` has been defined to + standardize the idea of a `map` method. This interface has a single + `map` method that has the same syntax as the built-in `map`. We have also defined + a `mapper` factory interface that creates objects that implement + :class:`IPython.kernel.mapper.IMapper` for different controllers. Both + the multiengine and task controller now have mapping capabilties. + +* The parallel function capabilities have been reworks. The major changes are that + i) there is now an `@parallel` magic that creates parallel functions, ii) + the syntax for mulitple variable follows that of `map`, iii) both the + multiengine and task controller now have a parallel function implementation. + +* All of the parallel computing capabilities from `ipython1-dev` have been merged into + IPython proper. This resulted in the following new subpackages: + :mod:`IPython.kernel`, :mod:`IPython.kernel.core`, :mod:`IPython.config`, + :mod:`IPython.tools` and :mod:`IPython.testing`. + +* As part of merging in the `ipython1-dev` stuff, the `setup.py` script and friends + have been completely refactored. Now we are checking for dependencies using + the approach that matplotlib uses. + +* The documentation has been completely reorganized to accept the documentation + from `ipython1-dev`. + +* We have switched to using Foolscap for all of our network protocols in + :mod:`IPython.kernel`. This gives us secure connections that are both encrypted + and authenticated. + +* We have a brand new `COPYING.txt` files that describes the IPython license + and copyright. The biggest change is that we are putting "The IPython + Development Team" as the copyright holder. We give more details about exactly + what this means in this file. All developer should read this and use the new + banner in all IPython source code files. + +* sh profile: ./foo runs foo as system command, no need to do !./foo anymore + +* String lists now support 'sort(field, nums = True)' method (to easily + sort system command output). Try it with 'a = !ls -l ; a.sort(1, nums=1)' + +* '%cpaste foo' now assigns the pasted block as string list, instead of string + +* The ipcluster script now run by default with no security. This is done because + the main usage of the script is for starting things on localhost. Eventually + when ipcluster is able to start things on other hosts, we will put security + back. + +* 'cd --foo' searches directory history for string foo, and jumps to that dir. + Last part of dir name is checked first. If no matches for that are found, + look at the whole path. Bug fixes --------- - * The colors escapes in the multiengine client are now turned off on win32 as they - don't print correctly. - * The :mod:`IPython.kernel.scripts.ipengine` script was exec'ing mpi_import_statement - incorrectly, which was leading the engine to crash when mpi was enabled. - * A few subpackages has missing `__init__.py` files. - * The documentation is only created is Sphinx is found. Previously, the `setup.py` - script would fail if it was missing. - * Greedy 'cd' completion has been disabled again (it was enabled in 0.8.4) +* The Windows installer has been fixed. Now all IPython scripts have ``.bat`` + versions created. Also, the Start Menu shortcuts have been updated. + +* The colors escapes in the multiengine client are now turned off on win32 as they + don't print correctly. + +* The :mod:`IPython.kernel.scripts.ipengine` script was exec'ing mpi_import_statement + incorrectly, which was leading the engine to crash when mpi was enabled. + +* A few subpackages has missing `__init__.py` files. + +* The documentation is only created if Sphinx is found. Previously, the `setup.py` + script would fail if it was missing. + +* Greedy 'cd' completion has been disabled again (it was enabled in 0.8.4) Backwards incompatible changes ------------------------------ +* The ``clusterfile`` options of the :command:`ipcluster` command has been + removed as it was not working and it will be replaced soon by something much + more robust. + +* The :mod:`IPython.kernel` configuration now properly find the user's + IPython directory. + * In ipapi, the :func:`make_user_ns` function has been replaced with :func:`make_user_namespaces`, to support dict subclasses in namespace creation. - * :class:`IPython.kernel.client.Task` has been renamed - :class:`IPython.kernel.client.StringTask` to make way for new task types. - * The keyword argument `style` has been renamed `dist` in `scatter`, `gather` - and `map`. - * Renamed the values that the rename `dist` keyword argument can have from - `'basic'` to `'b'`. - * IPython has a larger set of dependencies if you want all of its capabilities. - See the `setup.py` script for details. - * The constructors for :class:`IPython.kernel.client.MultiEngineClient` and - :class:`IPython.kernel.client.TaskClient` no longer take the (ip,port) tuple. - Instead they take the filename of a file that contains the FURL for that - client. If the FURL file is in your IPYTHONDIR, it will be found automatically - and the constructor can be left empty. - * The asynchronous clients in :mod:`IPython.kernel.asyncclient` are now created - using the factory functions :func:`get_multiengine_client` and - :func:`get_task_client`. These return a `Deferred` to the actual client. - * The command line options to `ipcontroller` and `ipengine` have changed to - reflect the new Foolscap network protocol and the FURL files. Please see the - help for these scripts for details. - * The configuration files for the kernel have changed because of the Foolscap stuff. - If you were using custom config files before, you should delete them and regenerate - new ones. +* :class:`IPython.kernel.client.Task` has been renamed + :class:`IPython.kernel.client.StringTask` to make way for new task types. + +* The keyword argument `style` has been renamed `dist` in `scatter`, `gather` + and `map`. + +* Renamed the values that the rename `dist` keyword argument can have from + `'basic'` to `'b'`. + +* IPython has a larger set of dependencies if you want all of its capabilities. + See the `setup.py` script for details. + +* The constructors for :class:`IPython.kernel.client.MultiEngineClient` and + :class:`IPython.kernel.client.TaskClient` no longer take the (ip,port) tuple. + Instead they take the filename of a file that contains the FURL for that + client. If the FURL file is in your IPYTHONDIR, it will be found automatically + and the constructor can be left empty. + +* The asynchronous clients in :mod:`IPython.kernel.asyncclient` are now created + using the factory functions :func:`get_multiengine_client` and + :func:`get_task_client`. These return a `Deferred` to the actual client. + +* The command line options to `ipcontroller` and `ipengine` have changed to + reflect the new Foolscap network protocol and the FURL files. Please see the + help for these scripts for details. + +* The configuration files for the kernel have changed because of the Foolscap stuff. + If you were using custom config files before, you should delete them and regenerate + new ones. Changes merged in from IPython1 ------------------------------- @@ -154,76 +194,97 @@ Changes merged in from IPython1 New features ............ - * Much improved ``setup.py`` and ``setupegg.py`` scripts. Because Twisted - and zope.interface are now easy installable, we can declare them as dependencies - in our setupegg.py script. - * IPython is now compatible with Twisted 2.5.0 and 8.x. - * Added a new example of how to use :mod:`ipython1.kernel.asynclient`. - * Initial draft of a process daemon in :mod:`ipython1.daemon`. This has not - been merged into IPython and is still in `ipython1-dev`. - * The ``TaskController`` now has methods for getting the queue status. - * The ``TaskResult`` objects not have information about how long the task - took to run. - * We are attaching additional attributes to exceptions ``(_ipython_*)`` that - we use to carry additional info around. - * New top-level module :mod:`asyncclient` that has asynchronous versions (that - return deferreds) of the client classes. This is designed to users who want - to run their own Twisted reactor - * All the clients in :mod:`client` are now based on Twisted. This is done by - running the Twisted reactor in a separate thread and using the - :func:`blockingCallFromThread` function that is in recent versions of Twisted. - * Functions can now be pushed/pulled to/from engines using - :meth:`MultiEngineClient.push_function` and :meth:`MultiEngineClient.pull_function`. - * Gather/scatter are now implemented in the client to reduce the work load - of the controller and improve performance. - * Complete rewrite of the IPython docuementation. All of the documentation - from the IPython website has been moved into docs/source as restructured - text documents. PDF and HTML documentation are being generated using - Sphinx. - * New developer oriented documentation: development guidelines and roadmap. - * Traditional ``ChangeLog`` has been changed to a more useful ``changes.txt`` file - that is organized by release and is meant to provide something more relevant - for users. +* Much improved ``setup.py`` and ``setupegg.py`` scripts. Because Twisted + and zope.interface are now easy installable, we can declare them as dependencies + in our setupegg.py script. + +* IPython is now compatible with Twisted 2.5.0 and 8.x. + +* Added a new example of how to use :mod:`ipython1.kernel.asynclient`. + +* Initial draft of a process daemon in :mod:`ipython1.daemon`. This has not + been merged into IPython and is still in `ipython1-dev`. + +* The ``TaskController`` now has methods for getting the queue status. + +* The ``TaskResult`` objects not have information about how long the task + took to run. + +* We are attaching additional attributes to exceptions ``(_ipython_*)`` that + we use to carry additional info around. + +* New top-level module :mod:`asyncclient` that has asynchronous versions (that + return deferreds) of the client classes. This is designed to users who want + to run their own Twisted reactor. + +* All the clients in :mod:`client` are now based on Twisted. This is done by + running the Twisted reactor in a separate thread and using the + :func:`blockingCallFromThread` function that is in recent versions of Twisted. + +* Functions can now be pushed/pulled to/from engines using + :meth:`MultiEngineClient.push_function` and :meth:`MultiEngineClient.pull_function`. + +* Gather/scatter are now implemented in the client to reduce the work load + of the controller and improve performance. + +* Complete rewrite of the IPython docuementation. All of the documentation + from the IPython website has been moved into docs/source as restructured + text documents. PDF and HTML documentation are being generated using + Sphinx. + +* New developer oriented documentation: development guidelines and roadmap. + +* Traditional ``ChangeLog`` has been changed to a more useful ``changes.txt`` file + that is organized by release and is meant to provide something more relevant + for users. Bug fixes ......... - * Created a proper ``MANIFEST.in`` file to create source distributions. - * Fixed a bug in the ``MultiEngine`` interface. Previously, multi-engine - actions were being collected with a :class:`DeferredList` with - ``fireononeerrback=1``. This meant that methods were returning - before all engines had given their results. This was causing extremely odd - bugs in certain cases. To fix this problem, we have 1) set - ``fireononeerrback=0`` to make sure all results (or exceptions) are in - before returning and 2) introduced a :exc:`CompositeError` exception - that wraps all of the engine exceptions. This is a huge change as it means - that users will have to catch :exc:`CompositeError` rather than the actual - exception. +* Created a proper ``MANIFEST.in`` file to create source distributions. + +* Fixed a bug in the ``MultiEngine`` interface. Previously, multi-engine + actions were being collected with a :class:`DeferredList` with + ``fireononeerrback=1``. This meant that methods were returning + before all engines had given their results. This was causing extremely odd + bugs in certain cases. To fix this problem, we have 1) set + ``fireononeerrback=0`` to make sure all results (or exceptions) are in + before returning and 2) introduced a :exc:`CompositeError` exception + that wraps all of the engine exceptions. This is a huge change as it means + that users will have to catch :exc:`CompositeError` rather than the actual + exception. Backwards incompatible changes .............................. - * All names have been renamed to conform to the lowercase_with_underscore - convention. This will require users to change references to all names like - ``queueStatus`` to ``queue_status``. - * Previously, methods like :meth:`MultiEngineClient.push` and - :meth:`MultiEngineClient.push` used ``*args`` and ``**kwargs``. This was - becoming a problem as we weren't able to introduce new keyword arguments into - the API. Now these methods simple take a dict or sequence. This has also allowed - us to get rid of the ``*All`` methods like :meth:`pushAll` and :meth:`pullAll`. - These things are now handled with the ``targets`` keyword argument that defaults - to ``'all'``. - * The :attr:`MultiEngineClient.magicTargets` has been renamed to - :attr:`MultiEngineClient.targets`. - * All methods in the MultiEngine interface now accept the optional keyword argument - ``block``. - * Renamed :class:`RemoteController` to :class:`MultiEngineClient` and - :class:`TaskController` to :class:`TaskClient`. - * Renamed the top-level module from :mod:`api` to :mod:`client`. - * Most methods in the multiengine interface now raise a :exc:`CompositeError` exception - that wraps the user's exceptions, rather than just raising the raw user's exception. - * Changed the ``setupNS`` and ``resultNames`` in the ``Task`` class to ``push`` - and ``pull``. +* All names have been renamed to conform to the lowercase_with_underscore + convention. This will require users to change references to all names like + ``queueStatus`` to ``queue_status``. + +* Previously, methods like :meth:`MultiEngineClient.push` and + :meth:`MultiEngineClient.push` used ``*args`` and ``**kwargs``. This was + becoming a problem as we weren't able to introduce new keyword arguments into + the API. Now these methods simple take a dict or sequence. This has also allowed + us to get rid of the ``*All`` methods like :meth:`pushAll` and :meth:`pullAll`. + These things are now handled with the ``targets`` keyword argument that defaults + to ``'all'``. + +* The :attr:`MultiEngineClient.magicTargets` has been renamed to + :attr:`MultiEngineClient.targets`. + +* All methods in the MultiEngine interface now accept the optional keyword argument + ``block``. + +* Renamed :class:`RemoteController` to :class:`MultiEngineClient` and + :class:`TaskController` to :class:`TaskClient`. + +* Renamed the top-level module from :mod:`api` to :mod:`client`. + +* Most methods in the multiengine interface now raise a :exc:`CompositeError` exception + that wraps the user's exceptions, rather than just raising the raw user's exception. + +* Changed the ``setupNS`` and ``resultNames`` in the ``Task`` class to ``push`` + and ``pull``. Release 0.8.4 ============= diff --git a/docs/source/conf.py b/docs/source/conf.py index ca5a31d..4b2c078 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -151,10 +151,7 @@ latex_font_size = '11pt' # (source start file, target name, title, author, document class [howto/manual]). latex_documents = [ ('index', 'ipython.tex', 'IPython Documentation', - ur"""Brian Granger, Fernando Pérez and Ville Vainio\\ - \ \\ - With contributions from:\\ - Benjamin Ragan-Kelley and Barry Wark.""", + ur"""The IPython Development Team""", 'manual'), ] diff --git a/docs/source/credits.txt b/docs/source/credits.txt index e9eaf9e..d372531 100644 --- a/docs/source/credits.txt +++ b/docs/source/credits.txt @@ -4,24 +4,25 @@ Credits ======= -IPython is mainly developed by Fernando Pérez -, but the project was born from mixing in -Fernando's code with the IPP project by Janko Hauser - and LazyPython by Nathan Gray -. For all IPython-related requests, please -contact Fernando. +IPython is led by Fernando Pérez. As of early 2006, the following developers have joined the core team: - * [Robert Kern] : co-mentored the 2005 - Google Summer of Code project to develop python interactive - notebooks (XML documents) and graphical interface. This project - was awarded to the students Tzanko Matev and - Toni Alatalo - * [Brian Granger] : extending IPython to allow - support for interactive parallel computing. - * [Ville Vainio] : Ville is the new - maintainer for the main trunk of IPython after version 0.7.1. +* [Robert Kern] : co-mentored the 2005 + Google Summer of Code project to develop python interactive + notebooks (XML documents) and graphical interface. This project + was awarded to the students Tzanko Matev and + Toni Alatalo . + +* [Brian Granger] : extending IPython to allow + support for interactive parallel computing. + +* [Benjamin (Min) Ragan-Kelley]: key work on IPython's parallel + computing infrastructure. + +* [Ville Vainio] : Ville has made many improvements + to the core of IPython and was the maintainer of the main IPython + trunk from version 0.7.1 to 0.8.4. The IPython project is also very grateful to: @@ -54,86 +55,134 @@ And last but not least, all the kind IPython users who have emailed new code, bug reports, fixes, comments and ideas. A brief list follows, please let me know if I have ommitted your name by accident: - * [Jack Moffit] Bug fixes, including the infamous - color problem. This bug alone caused many lost hours and - frustration, many thanks to him for the fix. I've always been a - fan of Ogg & friends, now I have one more reason to like these folks. - Jack is also contributing with Debian packaging and many other - things. - * [Alexander Schmolck] Emacs work, bug - reports, bug fixes, ideas, lots more. The ipython.el mode for - (X)Emacs is Alex's code, providing full support for IPython under - (X)Emacs. - * [Andrea Riciputi] Mac OSX - information, Fink package management. - * [Gary Bishop] Bug reports, and patches to work - around the exception handling idiosyncracies of WxPython. Readline - and color support for Windows. - * [Jeffrey Collins] Bug reports. Much - improved readline support, including fixes for Python 2.3. - * [Dryice Liu] FreeBSD port. - * [Mike Heeter] - * [Christopher Hart] PDB integration. - * [Milan Zamazal] Emacs info. - * [Philip Hisley] - * [Holger Krekel] Tab completion, lots - more. - * [Robin Siebler] - * [Ralf Ahlbrink] - * [Thorsten Kampe] - * [Fredrik Kant] Windows setup. - * [Syver Enstad] Windows setup. - * [Richard] Global embedding. - * [Hayden Callow] Gnuplot.py 1.6 - compatibility. - * [Leonardo Santagada] Fixes for Windows - installation. - * [Christopher Armstrong] Bugfixes. - * [Francois Pinard] Code and - documentation fixes. - * [Cory Dodt] Bug reports and Windows - ideas. Patches for Windows installer. - * [Olivier Aubert] New magics. - * [King C. Shu] Autoindent patch. - * [Chris Drexler] Readline packages for - Win32/CygWin. - * [Gustavo Cordova Avila] EvalDict code for - nice, lightweight string interpolation. - * [Kasper Souren] Bug reports, ideas. - * [Gever Tulley] Code contributions. - * [Ralf Schmitt] Bug reports & fixes. - * [Oliver Sander] Bug reports. - * [Rod Holland] Bug reports and fixes to - logging module. - * [Daniel 'Dang' Griffith] - Fixes, enhancement suggestions for system shell use. - * [Viktor Ransmayr] Tests and - reports on Windows installation issues. Contributed a true Windows - binary installer. - * [Mike Salib] Help fixing a subtle bug related - to traceback printing. - * [W.J. van der Laan] Bash-like - prompt specials. - * [Antoon Pardon] Critical fix for - the multithreaded IPython. - * [John Hunter] Matplotlib - author, helped with all the development of support for matplotlib - in IPyhton, including making necessary changes to matplotlib itself. - * [Matthew Arnison] Bug reports, '%run -d' idea. - * [Prabhu Ramachandran] Help - with (X)Emacs support, threading patches, ideas... - * [Norbert Tretkowski] help with Debian - packaging and distribution. - * [George Sakkis] New matcher for - tab-completing named arguments of user-defined functions. - * [Jörgen Stenarson] Wildcard - support implementation for searching namespaces. - * [Vivian De Smedt] Debugger enhancements, - so that when pdb is activated from within IPython, coloring, tab - completion and other features continue to work seamlessly. - * [Scott Tsai] Support for automatic - editor invocation on syntax errors (see - http://www.scipy.net/roundup/ipython/issue36). - * [Alexander Belchenko] Improvements for win32 - paging system. - * [Will Maier] Official OpenBSD port. \ No newline at end of file +* Dan Milstein . A bold refactoring of the + core prefilter stuff in the IPython interpreter. + +* [Jack Moffit] Bug fixes, including the infamous + color problem. This bug alone caused many lost hours and + frustration, many thanks to him for the fix. I've always been a + fan of Ogg & friends, now I have one more reason to like these folks. + Jack is also contributing with Debian packaging and many other + things. + +* [Alexander Schmolck] Emacs work, bug + reports, bug fixes, ideas, lots more. The ipython.el mode for + (X)Emacs is Alex's code, providing full support for IPython under + (X)Emacs. + +* [Andrea Riciputi] Mac OSX + information, Fink package management. + +* [Gary Bishop] Bug reports, and patches to work + around the exception handling idiosyncracies of WxPython. Readline + and color support for Windows. + +* [Jeffrey Collins] Bug reports. Much + improved readline support, including fixes for Python 2.3. + +* [Dryice Liu] FreeBSD port. + +* [Mike Heeter] + +* [Christopher Hart] PDB integration. + +* [Milan Zamazal] Emacs info. + +* [Philip Hisley] + +* [Holger Krekel] Tab completion, lots + more. + +* [Robin Siebler] + +* [Ralf Ahlbrink] + +* [Thorsten Kampe] + +* [Fredrik Kant] Windows setup. + +* [Syver Enstad] Windows setup. + +* [Richard] Global embedding. + +* [Hayden Callow] Gnuplot.py 1.6 + compatibility. + +* [Leonardo Santagada] Fixes for Windows + installation. + +* [Christopher Armstrong] Bugfixes. + +* [Francois Pinard] Code and + documentation fixes. + +* [Cory Dodt] Bug reports and Windows + ideas. Patches for Windows installer. + +* [Olivier Aubert] New magics. + +* [King C. Shu] Autoindent patch. + +* [Chris Drexler] Readline packages for + Win32/CygWin. + +* [Gustavo Cordova Avila] EvalDict code for + nice, lightweight string interpolation. + +* [Kasper Souren] Bug reports, ideas. + +* [Gever Tulley] Code contributions. + +* [Ralf Schmitt] Bug reports & fixes. + +* [Oliver Sander] Bug reports. + +* [Rod Holland] Bug reports and fixes to + logging module. + +* [Daniel 'Dang' Griffith] + Fixes, enhancement suggestions for system shell use. + +* [Viktor Ransmayr] Tests and + reports on Windows installation issues. Contributed a true Windows + binary installer. + +* [Mike Salib] Help fixing a subtle bug related + to traceback printing. + +* [W.J. van der Laan] Bash-like + prompt specials. + +* [Antoon Pardon] Critical fix for + the multithreaded IPython. + +* [John Hunter] Matplotlib + author, helped with all the development of support for matplotlib + in IPyhton, including making necessary changes to matplotlib itself. + +* [Matthew Arnison] Bug reports, '%run -d' idea. + +* [Prabhu Ramachandran] Help + with (X)Emacs support, threading patches, ideas... + +* [Norbert Tretkowski] help with Debian + packaging and distribution. + +* [George Sakkis] New matcher for + tab-completing named arguments of user-defined functions. + +* [Jörgen Stenarson] Wildcard + support implementation for searching namespaces. + +* [Vivian De Smedt] Debugger enhancements, + so that when pdb is activated from within IPython, coloring, tab + completion and other features continue to work seamlessly. + +* [Scott Tsai] Support for automatic + editor invocation on syntax errors (see + http://www.scipy.net/roundup/ipython/issue36). + +* [Alexander Belchenko] Improvements for win32 + paging system. + +* [Will Maier] Official OpenBSD port. \ No newline at end of file diff --git a/docs/source/development/index.txt b/docs/source/development/index.txt index 0a8feef..c3796e0 100644 --- a/docs/source/development/index.txt +++ b/docs/source/development/index.txt @@ -7,3 +7,4 @@ Development development.txt roadmap.txt + notification_blueprint.txt diff --git a/docs/source/development/notification_blueprint.txt b/docs/source/development/notification_blueprint.txt index 2d2e372..ffb8c73 100644 --- a/docs/source/development/notification_blueprint.txt +++ b/docs/source/development/notification_blueprint.txt @@ -1,4 +1,4 @@ -.. Notification: +.. _notification: ========================================== IPython.kernel.core.notification blueprint @@ -11,37 +11,39 @@ The :mod:`IPython.kernel.core.notification` module will provide a simple impleme Functional Requirements ======================= The notification center must: - * Provide synchronous notification of events to all registered observers. - * Provide typed or labeled notification types - * Allow observers to register callbacks for individual or all notification types - * Allow observers to register callbacks for events from individual or all notifying objects - * Notification to the observer consists of the notification type, notifying object and user-supplied extra information [implementation: as keyword parameters to the registered callback] - * Perform as O(1) in the case of no registered observers. - * Permit out-of-process or cross-network extension. - + * Provide synchronous notification of events to all registered observers. + * Provide typed or labeled notification types + * Allow observers to register callbacks for individual or all notification types + * Allow observers to register callbacks for events from individual or all notifying objects + * Notification to the observer consists of the notification type, notifying object and user-supplied extra information [implementation: as keyword parameters to the registered callback] + * Perform as O(1) in the case of no registered observers. + * Permit out-of-process or cross-network extension. + What's not included ============================================================== As written, the :mod:`IPython.kernel.core.notificaiton` module does not: - * Provide out-of-process or network notifications [these should be handled by a separate, Twisted aware module in :mod:`IPython.kernel`]. - * Provide zope.interface-style interfaces for the notification system [these should also be provided by the :mod:`IPython.kernel` module] - + * Provide out-of-process or network notifications [these should be handled by a separate, Twisted aware module in :mod:`IPython.kernel`]. + * Provide zope.interface-style interfaces for the notification system [these should also be provided by the :mod:`IPython.kernel` module] + Use Cases ========= The following use cases describe the main intended uses of the notificaiton module and illustrate the main success scenario for each use case: - 1. Dwight Schroot is writing a frontend for the IPython project. His frontend is stuck in the stone age and must communicate synchronously with an IPython.kernel.core.Interpreter instance. Because code is executed in blocks by the Interpreter, Dwight's UI freezes every time he executes a long block of code. To keep track of the progress of his long running block, Dwight adds the following code to his frontend's set-up code:: - from IPython.kernel.core.notification import NotificationCenter - center = NotificationCenter.sharedNotificationCenter - center.registerObserver(self, type=IPython.kernel.core.Interpreter.STDOUT_NOTIFICATION_TYPE, notifying_object=self.interpreter, callback=self.stdout_notification) - - and elsewhere in his front end:: - def stdout_notification(self, type, notifying_object, out_string=None): - self.writeStdOut(out_string) - - If everything works, the Interpreter will (according to its published API) fire a notification via the :data:`IPython.kernel.core.notification.sharedCenter` of type :const:`STD_OUT_NOTIFICATION_TYPE` before writing anything to stdout [it's up to the Intereter implementation to figure out when to do this]. The notificaiton center will then call the registered callbacks for that event type (in this case, Dwight's frontend's stdout_notification method). Again, according to its API, the Interpreter provides an additional keyword argument when firing the notificaiton of out_string, a copy of the string it will write to stdout. - - Like magic, Dwight's frontend is able to provide output, even during long-running calculations. Now if Jim could just convince Dwight to use Twisted... - - 2. Boss Hog is writing a frontend for the IPython project. Because Boss Hog is stuck in the stone age, his frontend will be written in a new Fortran-like dialect of python and will run only from the command line. Because he doesn't need any fancy notification system and is used to worrying about every cycle on his rat-wheel powered mini, Boss Hog is adamant that the new notification system not produce any performance penalty. As they say in Hazard county, there's no such thing as a free lunch. If he wanted zero overhead, he should have kept using IPython 0.8. Instead, those tricky Duke boys slide in a suped-up bridge-out jumpin' awkwardly confederate-lovin' notification module that imparts only a constant (and small) performance penalty when the Interpreter (or any other object) fires an event for which there are no registered observers. Of course, the same notificaiton-enabled Interpreter can then be used in frontends that require notifications, thus saving the IPython project from a nasty civil war. - - 3. Barry is wrting a frontend for the IPython project. Because Barry's front end is the *new hotness*, it uses an asynchronous event model to communicate with a Twisted :mod:`~IPython.kernel.engineservice` that communicates with the IPython :class:`~IPython.kernel.core.interpreter.Interpreter`. Using the :mod:`IPython.kernel.notification` module, an asynchronous wrapper on the :mod:`IPython.kernel.core.notification` module, Barry's frontend can register for notifications from the interpreter that are delivered asynchronously. Even if Barry's frontend is running on a separate process or even host from the Interpreter, the notifications are delivered, as if by dark and twisted magic. Just like Dwight's frontend, Barry's frontend can now recieve notifications of e.g. writing to stdout/stderr, opening/closing an external file, an exception in the executing code, etc. \ No newline at end of file + 1. Dwight Schroot is writing a frontend for the IPython project. His frontend is stuck in the stone age and must communicate synchronously with an IPython.kernel.core.Interpreter instance. Because code is executed in blocks by the Interpreter, Dwight's UI freezes every time he executes a long block of code. To keep track of the progress of his long running block, Dwight adds the following code to his frontend's set-up code:: + + from IPython.kernel.core.notification import NotificationCenter + center = NotificationCenter.sharedNotificationCenter + center.registerObserver(self, type=IPython.kernel.core.Interpreter.STDOUT_NOTIFICATION_TYPE, notifying_object=self.interpreter, callback=self.stdout_notification) + + and elsewhere in his front end:: + + def stdout_notification(self, type, notifying_object, out_string=None): + self.writeStdOut(out_string) + + If everything works, the Interpreter will (according to its published API) fire a notification via the :data:`IPython.kernel.core.notification.sharedCenter` of type :const:`STD_OUT_NOTIFICATION_TYPE` before writing anything to stdout [it's up to the Intereter implementation to figure out when to do this]. The notificaiton center will then call the registered callbacks for that event type (in this case, Dwight's frontend's stdout_notification method). Again, according to its API, the Interpreter provides an additional keyword argument when firing the notificaiton of out_string, a copy of the string it will write to stdout. + + Like magic, Dwight's frontend is able to provide output, even during long-running calculations. Now if Jim could just convince Dwight to use Twisted... + + 2. Boss Hog is writing a frontend for the IPython project. Because Boss Hog is stuck in the stone age, his frontend will be written in a new Fortran-like dialect of python and will run only from the command line. Because he doesn't need any fancy notification system and is used to worrying about every cycle on his rat-wheel powered mini, Boss Hog is adamant that the new notification system not produce any performance penalty. As they say in Hazard county, there's no such thing as a free lunch. If he wanted zero overhead, he should have kept using IPython 0.8. Instead, those tricky Duke boys slide in a suped-up bridge-out jumpin' awkwardly confederate-lovin' notification module that imparts only a constant (and small) performance penalty when the Interpreter (or any other object) fires an event for which there are no registered observers. Of course, the same notificaiton-enabled Interpreter can then be used in frontends that require notifications, thus saving the IPython project from a nasty civil war. + + 3. Barry is wrting a frontend for the IPython project. Because Barry's front end is the *new hotness*, it uses an asynchronous event model to communicate with a Twisted :mod:`~IPython.kernel.engineservice` that communicates with the IPython :class:`~IPython.kernel.core.interpreter.Interpreter`. Using the :mod:`IPython.kernel.notification` module, an asynchronous wrapper on the :mod:`IPython.kernel.core.notification` module, Barry's frontend can register for notifications from the interpreter that are delivered asynchronously. Even if Barry's frontend is running on a separate process or even host from the Interpreter, the notifications are delivered, as if by dark and twisted magic. Just like Dwight's frontend, Barry's frontend can now recieve notifications of e.g. writing to stdout/stderr, opening/closing an external file, an exception in the executing code, etc. \ No newline at end of file diff --git a/docs/source/development/roadmap.txt b/docs/source/development/roadmap.txt index f6ee969..f74372e 100644 --- a/docs/source/development/roadmap.txt +++ b/docs/source/development/roadmap.txt @@ -32,16 +32,21 @@ IPython is implemented using a distributed set of processes that communicate usi We need to build a system that makes it trivial for users to start and manage IPython processes. This system should have the following properties: - * It should possible to do everything through an extremely simple API that users - can call from their own Python script. No shell commands should be needed. - * This simple API should be configured using standard .ini files. - * The system should make it possible to start processes using a number of different - approaches: SSH, PBS/Torque, Xgrid, Windows Server, mpirun, etc. - * The controller and engine processes should each have a daemon for monitoring, - signaling and clean up. - * The system should be secure. - * The system should work under all the major operating systems, including - Windows. +* It should possible to do everything through an extremely simple API that users + can call from their own Python script. No shell commands should be needed. + +* This simple API should be configured using standard .ini files. + +* The system should make it possible to start processes using a number of different + approaches: SSH, PBS/Torque, Xgrid, Windows Server, mpirun, etc. + +* The controller and engine processes should each have a daemon for monitoring, + signaling and clean up. + +* The system should be secure. + +* The system should work under all the major operating systems, including + Windows. Initial work has begun on the daemon infrastructure, and some of the needed logic is contained in the ipcluster script. @@ -57,12 +62,15 @@ Security Currently, IPython has no built in security or security model. Because we would like IPython to be usable on public computer systems and over wide area networks, we need to come up with a robust solution for security. Here are some of the specific things that need to be included: - * User authentication between all processes (engines, controller and clients). - * Optional TSL/SSL based encryption of all communication channels. - * A good way of picking network ports so multiple users on the same system can - run their own controller and engines without interfering with those of others. - * A clear model for security that enables users to evaluate the security risks - associated with using IPython in various manners. +* User authentication between all processes (engines, controller and clients). + +* Optional TSL/SSL based encryption of all communication channels. + +* A good way of picking network ports so multiple users on the same system can + run their own controller and engines without interfering with those of others. + +* A clear model for security that enables users to evaluate the security risks + associated with using IPython in various manners. For the implementation of this, we plan on using Twisted's support for SSL and authentication. One things that we really should look at is the `Foolscap`_ network protocol, which provides many of these things out of the box. @@ -70,6 +78,9 @@ For the implementation of this, we plan on using Twisted's support for SSL and a The security work needs to be done in conjunction with other network protocol stuff. +As of the 0.9 release of IPython, we are using Foolscap and we have implemented +a full security model. + Latent performance issues ------------------------- @@ -82,7 +93,7 @@ Currently, we have a number of performance issues that are waiting to bite users * Currently, the client to controller connections are done through XML-RPC using HTTP 1.0. This is very inefficient as XML-RPC is a very verbose protocol and each request must be handled with a new connection. We need to move these network - connections over to PB or Foolscap. + connections over to PB or Foolscap. Done! * We currently don't have a good way of handling large objects in the controller. The biggest problem is that because we don't have any way of streaming objects, we get lots of temporary copies in the low-level buffers. We need to implement diff --git a/docs/source/faq.txt b/docs/source/faq.txt index d6aa0a5..321cb06 100644 --- a/docs/source/faq.txt +++ b/docs/source/faq.txt @@ -16,10 +16,13 @@ Will IPython speed my Python code up? Yes and no. When converting a serial code to run in parallel, there often many difficulty questions that need to be answered, such as: - * How should data be decomposed onto the set of processors? - * What are the data movement patterns? - * Can the algorithm be structured to minimize data movement? - * Is dynamic load balancing important? +* How should data be decomposed onto the set of processors? + +* What are the data movement patterns? + +* Can the algorithm be structured to minimize data movement? + +* Is dynamic load balancing important? We can't answer such questions for you. This is the hard (but fun) work of parallel computing. But, once you understand these things IPython will make it easier for you to @@ -28,9 +31,7 @@ resulting parallel code interactively. With that said, if your problem is trivial to parallelize, IPython has a number of different interfaces that will enable you to parallelize things is almost no time at -all. A good place to start is the ``map`` method of our `multiengine interface`_. - -.. _multiengine interface: ./parallel_multiengine +all. A good place to start is the ``map`` method of our :class:`MultiEngineClient`. What is the best way to use MPI from Python? -------------------------------------------- @@ -40,26 +41,33 @@ What about all the other parallel computing packages in Python? Some of the unique characteristic of IPython are: - * IPython is the only architecture that abstracts out the notion of a - parallel computation in such a way that new models of parallel computing - can be explored quickly and easily. If you don't like the models we - provide, you can simply create your own using the capabilities we provide. - * IPython is asynchronous from the ground up (we use `Twisted`_). - * IPython's architecture is designed to avoid subtle problems - that emerge because of Python's global interpreter lock (GIL). - * While IPython'1 architecture is designed to support a wide range - of novel parallel computing models, it is fully interoperable with - traditional MPI applications. - * IPython has been used and tested extensively on modern supercomputers. - * IPython's networking layers are completely modular. Thus, is - straightforward to replace our existing network protocols with - high performance alternatives (ones based upon Myranet/Infiniband). - * IPython is designed from the ground up to support collaborative - parallel computing. This enables multiple users to actively develop - and run the *same* parallel computation. - * Interactivity is a central goal for us. While IPython does not have - to be used interactivly, is can be. - +* IPython is the only architecture that abstracts out the notion of a + parallel computation in such a way that new models of parallel computing + can be explored quickly and easily. If you don't like the models we + provide, you can simply create your own using the capabilities we provide. + +* IPython is asynchronous from the ground up (we use `Twisted`_). + +* IPython's architecture is designed to avoid subtle problems + that emerge because of Python's global interpreter lock (GIL). + +* While IPython's architecture is designed to support a wide range + of novel parallel computing models, it is fully interoperable with + traditional MPI applications. + +* IPython has been used and tested extensively on modern supercomputers. + +* IPython's networking layers are completely modular. Thus, is + straightforward to replace our existing network protocols with + high performance alternatives (ones based upon Myranet/Infiniband). + +* IPython is designed from the ground up to support collaborative + parallel computing. This enables multiple users to actively develop + and run the *same* parallel computation. + +* Interactivity is a central goal for us. While IPython does not have + to be used interactivly, it can be. + .. _Twisted: http://www.twistedmatrix.com Why The IPython controller a bottleneck in my parallel calculation? @@ -71,13 +79,17 @@ too much data is being pushed and pulled to and from the engines. If your algori is structured in this way, you really should think about alternative ways of handling the data movement. Here are some ideas: - 1. Have the engines write data to files on the locals disks of the engines. - 2. Have the engines write data to files on a file system that is shared by - the engines. - 3. Have the engines write data to a database that is shared by the engines. - 4. Simply keep data in the persistent memory of the engines and move the - computation to the data (rather than the data to the computation). - 5. See if you can pass data directly between engines using MPI. +1. Have the engines write data to files on the locals disks of the engines. + +2. Have the engines write data to files on a file system that is shared by + the engines. + +3. Have the engines write data to a database that is shared by the engines. + +4. Simply keep data in the persistent memory of the engines and move the + computation to the data (rather than the data to the computation). + +5. See if you can pass data directly between engines using MPI. Isn't Python slow to be used for high-performance parallel computing? --------------------------------------------------------------------- diff --git a/docs/source/history.txt b/docs/source/history.txt index 29f2596..439f8e4 100644 --- a/docs/source/history.txt +++ b/docs/source/history.txt @@ -7,50 +7,32 @@ History Origins ======= -The current IPython system grew out of the following three projects: - - * [ipython] by Fernando Pérez. I was working on adding - Mathematica-type prompts and a flexible configuration system - (something better than $PYTHONSTARTUP) to the standard Python - interactive interpreter. - * [IPP] by Janko Hauser. Very well organized, great usability. Had - an old help system. IPP was used as the 'container' code into - which I added the functionality from ipython and LazyPython. - * [LazyPython] by Nathan Gray. Simple but very powerful. The quick - syntax (auto parens, auto quotes) and verbose/colored tracebacks - were all taken from here. - -When I found out about IPP and LazyPython I tried to join all three -into a unified system. I thought this could provide a very nice -working environment, both for regular programming and scientific -computing: shell-like features, IDL/Matlab numerics, Mathematica-type -prompt history and great object introspection and help facilities. I -think it worked reasonably well, though it was a lot more work than I -had initially planned. - - -Current status -============== - -The above listed features work, and quite well for the most part. But -until a major internal restructuring is done (see below), only bug -fixing will be done, no other features will be added (unless very minor -and well localized in the cleaner parts of the code). - -IPython consists of some 18000 lines of pure python code, of which -roughly two thirds is reasonably clean. The rest is, messy code which -needs a massive restructuring before any further major work is done. -Even the messy code is fairly well documented though, and most of the -problems in the (non-existent) class design are well pointed to by a -PyChecker run. So the rewriting work isn't that bad, it will just be -time-consuming. - - -Future ------- - -See the separate new_design document for details. Ultimately, I would -like to see IPython become part of the standard Python distribution as a -'big brother with batteries' to the standard Python interactive -interpreter. But that will never happen with the current state of the -code, so all contributions are welcome. \ No newline at end of file +IPython was starting in 2001 by Fernando Perez. IPython as we know it +today grew out of the following three projects: + +* ipython by Fernando Pérez. I was working on adding + Mathematica-type prompts and a flexible configuration system + (something better than $PYTHONSTARTUP) to the standard Python + interactive interpreter. +* IPP by Janko Hauser. Very well organized, great usability. Had + an old help system. IPP was used as the 'container' code into + which I added the functionality from ipython and LazyPython. +* LazyPython by Nathan Gray. Simple but very powerful. The quick + syntax (auto parens, auto quotes) and verbose/colored tracebacks + were all taken from here. + +Here is how Fernando describes it: + + When I found out about IPP and LazyPython I tried to join all three + into a unified system. I thought this could provide a very nice + working environment, both for regular programming and scientific + computing: shell-like features, IDL/Matlab numerics, Mathematica-type + prompt history and great object introspection and help facilities. I + think it worked reasonably well, though it was a lot more work than I + had initially planned. + +Today and how we got here +========================= + +This needs to be filled in. + diff --git a/docs/source/install/advanced.txt b/docs/source/install/advanced.txt deleted file mode 100644 index 3d37428..0000000 --- a/docs/source/install/advanced.txt +++ /dev/null @@ -1,138 +0,0 @@ -========================================= -Advanced installation options for IPython -========================================= - -.. contents:: - -Introduction -============ - -IPython enables parallel applications to be developed in Python. This document -describes the steps required to install IPython. For an overview of IPython's -architecture as it relates to parallel computing, see our :ref:`introduction to -parallel computing with IPython `. - -Please let us know if you have problems installing IPython or any of its -dependencies. We have tested IPython extensively with Python 2.4 and 2.5. - -.. warning:: - - IPython will not work with Python 2.3 or below. - -IPython has three required dependencies: - - 1. `IPython`__ - 2. `Zope Interface`__ - 3. `Twisted`__ - 4. `Foolscap`__ - -.. __: http://ipython.scipy.org -.. __: http://pypi.python.org/pypi/zope.interface -.. __: http://twistedmatrix.com -.. __: http://foolscap.lothar.com/trac - -It also has the following optional dependencies: - - 1. pexpect (used for certain tests) - 2. nose (used to run our test suite) - 3. sqlalchemy (used for database support) - 4. mpi4py (for MPI support) - 5. Sphinx and pygments (for building documentation) - 6. pyOpenSSL (for security) - -Getting IPython -================ - -IPython development has been moved to `Launchpad`_. The development branch of IPython can be checkout out using `Bazaar`_:: - - $ bzr branch lp:///~ipython/ipython/ipython1-dev - -.. _Launchpad: http://www.launchpad.net/ipython -.. _Bazaar: http://bazaar-vcs.org/ - -Installation using setuptools -============================= - -The easiest way of installing IPython and its dependencies is using -`setuptools`_. If you have setuptools installed you can simple use the ``easy_install`` -script that comes with setuptools (this should be on your path if you have setuptools):: - - $ easy_install ipython1 - -This will download and install the latest version of IPython as well as all of its dependencies. For this to work, you will need to be connected to the internet when you run this command. This will install everything info the ``site-packages`` directory of your Python distribution. If this is the system wide Python, you will likely need admin privileges. For information about installing Python packages to other locations (that don't require admin privileges) see the `setuptools`_ documentation. - -.. _setuptools: http://peak.telecommunity.com/DevCenter/setuptools - -If you don't want `setuptools`_ to automatically install the dependencies, you can also get the dependencies yourself, using ``easy_install``:: - - $ easy_install IPython - $ easy_install zope.interface - $ easy_install Twisted - $ easy_install foolscap - -or by simply downloading and installing the dependencies manually. - -If you want to have secure (highly recommended) network connections, you will also -need to get `pyOpenSSL`__, version 0.6, or just do: - - $ easy_install ipython1[security] - -.. hint:: If you want to do development on IPython and want to always - run off your development branch, you can run - :command:`python setupegg.py develop` in the IPython source tree. - -.. __: http://pyopenssl.sourceforge.net/ - -Installation using plain distutils -================================== - -If you don't have `setuptools`_ installed or don't want to use it, you can also install IPython and its dependencies using ``distutils``. In this approach, you will need to get the most recent stable releases of IPython's dependencies and install each of them by doing:: - - $ python setup.py install - -The dependencies need to be installed before installing IPython. After installing the dependencies, install IPython by running:: - - $ cd ipython1-dev - $ python setup.py install - -.. note:: Here we are using setup.py rather than setupegg.py. - -.. _install_testing: - -Testing -======= - -Once you have completed the installation of the IPython kernel you can run our test suite -with the command:: - - trial ipython1 - -Or if you have `nose`__ installed:: - - nosetests -v ipython1 - -The ``trial`` command is part of Twisted and allows asynchronous network based -applications to be tested using Python's unittest framework. Please let us know -if the tests do not pass. The best way to get in touch with us is on the `IPython -developer mailing list`_. - -.. __: http://somethingaboutorange.com/mrl/projects/nose/ -.. _IPython developer mailing list: http://projects.scipy.org/mailman/listinfo/ipython-dev - -MPI Support -=========== - -IPython includes optional support for the Message Passing Interface (`MPI`_), -which enables the IPython Engines to pass data between each other using `MPI`_. To use MPI with IPython, the minimal requirements are: - - * An MPI implementation (we recommend `Open MPI`_) - * A way to call MPI (we recommend `mpi4py`_) - -But, IPython should work with any MPI implementation and with any code -(Python/C/C++/Fortran) that uses MPI. Please contact us for more information about -this. - -.. _MPI: http://www-unix.mcs.anl.gov/mpi/ -.. _mpi4py: http://mpi4py.scipy.org/ -.. _Open MPI: http://www.open-mpi.org/ - diff --git a/docs/source/install/basic.txt b/docs/source/install/basic.txt deleted file mode 100644 index bf3298e..0000000 --- a/docs/source/install/basic.txt +++ /dev/null @@ -1,286 +0,0 @@ -============================= -Basic installation of IPython -============================= - -Installation -============ - -Instant instructions --------------------- - -If you are of the impatient kind, under Linux/Unix simply untar/unzip -the download, then install with 'python setup.py install'. Under -Windows, double-click on the provided .exe binary installer. - -Then, take a look at Customization_ section for configuring things -optimally and `Quick tips`_ for quick tips on efficient use of -IPython. You can later refer to the rest of the manual for all the -gory details. - -See the notes in upgrading_ section for upgrading IPython versions. - - - Detailed Unix instructions (Linux, Mac OS X, etc.) - -For RPM based systems, simply install the supplied package in the usual -manner. If you download the tar archive, the process is: - - 1. Unzip/untar the ipython-XXX.tar.gz file wherever you want (XXX is - the version number). It will make a directory called ipython-XXX. - Change into that directory where you will find the files README - and setup.py. Once you've completed the installation, you can - safely remove this directory. - 2. If you are installing over a previous installation of version - 0.2.0 or earlier, first remove your $HOME/.ipython directory, - since the configuration file format has changed somewhat (the '=' - were removed from all option specifications). Or you can call - ipython with the -upgrade option and it will do this automatically - for you. - 3. IPython uses distutils, so you can install it by simply typing at - the system prompt (don't type the $):: - - $ python setup.py install - - Note that this assumes you have root access to your machine. If - you don't have root access or don't want IPython to go in the - default python directories, you'll need to use the ``--home`` option - (or ``--prefix``). For example:: - - $ python setup.py install --home $HOME/local - - will install IPython into $HOME/local and its subdirectories - (creating them if necessary). - You can type:: - - $ python setup.py --help - - for more details. - - Note that if you change the default location for ``--home`` at - installation, IPython may end up installed at a location which is - not part of your $PYTHONPATH environment variable. In this case, - you'll need to configure this variable to include the actual - directory where the IPython/ directory ended (typically the value - you give to ``--home`` plus /lib/python). - - -Mac OSX information -------------------- - -Under OSX, there is a choice you need to make. Apple ships its own build -of Python, which lives in the core OSX filesystem hierarchy (/System/Library/Frameworks/Python.framework). You can -also manually install a separate Python, either purely by hand -(typically in /usr/local), by using Fink or DarwinPorts, which put everything under /sw or /opt respectively, or using the python.org "Framework" python, which installs a framework similar to the system python in /Library/Frameworks. The Enthought Python Distribution (http://www.enthought.com/products/epd.php), uses the python.org "Framework" python and thus also installs into /Library/Frameworks. Which route to follow is a matter of personal preference, as I've seen users who favor each of the approaches. - -For users of OS X 10.5 (Leopard), the system python installation contains support for `DTrace`_, a kernel-level profiling system integrated into OS X 10.5. This facility provides significant advantage to developers and users interested in high-performance computing by using the system python. - -.. _DTrace: http://www.apple.com/macosx/technology/unix.html - -IPython is known to work with all the above installation options. As such, we do not endorse one choice over the others. Here we will simply list the known installation issues under OSX, along with their solutions. - -This page: http://geosci.uchicago.edu/~tobis/pylab.html contains -information on this topic, with additional details on how to make -IPython and matplotlib play nicely under OSX. - - -GUI problems (older versions of OS X) -------------------------------------- - -The following instructions apply to an install of IPython under OSX before OS X 10.5 (users of OS X 10.5 see [#]_ ) by unpacking the .tar.gz distribution and installing it for the default Python interpreter shipped by Apple. If you are using a fink or DarwinPorts install, they will take care of these details for you, by installing IPython against their Python. - -IPython offers various forms of support for interacting with graphical -applications from the command line, from simple Tk apps (which are in -principle always supported by Python) to interactive control of WX, Qt -and GTK apps. Under OSX, however, this requires that ipython is -installed by calling the special pythonw script at installation time, -which takes care of coordinating things with Apple's graphical environment. - -So when installing under OSX, it is best to use the following command:: - - $ sudo pythonw setup.py install --install-scripts=/usr/local/bin - -or:: - - $ sudo pythonw setup.py install --install-scripts=/usr/bin - -depending on where you like to keep hand-installed executables. - -The resulting script will have an appropriate shebang line (the first -line in the script whic begins with #!...) such that the ipython -interpreter can interact with the OS X GUI. If the installed version -does not work and has a shebang line that points to, for example, just -/usr/bin/python, then you might have a stale, cached version in your -build/scripts- directory. Delete that directory and -rerun the setup.py. - -It is also a good idea to use the special flag ``--install-scripts`` as -indicated above, to ensure that the ipython scripts end up in a location -which is part of your $PATH. Otherwise Apple's Python will put the -scripts in an internal directory not available by default at the command -line (if you use /usr/local/bin, you need to make sure this is in your -$PATH, which may not be true by default). - -.. [#] Users of OS X 10.5 who choose to use the system-installed python should install IPython using ``sudo python setupegg.py install`` from the IPython source directory or from `PyPI`_ using ``sudo easy_install ipython``. - -.. _PyPI: http://pypi.python.org/pypi - -Readline problems ------------------ - -By default, the Python version shipped by Apple before OS X 10.5 does not include the readline library, so central to IPython's behavior. If you install -IPython against Apple's Python, you will not have arrow keys, tab -completion, etc. For Mac OSX 10.3 (Panther), you can find a prebuilt -readline library here: -http://pythonmac.org/packages/readline-5.0-py2.3-macosx10.3.zip - -If you are using OSX 10.4 (Tiger), after installing this package you -need to either: - - 1. move readline.so from /Library/Python/2.3 to - /Library/Python/2.3/site-packages, or - 2. install http://pythonmac.org/packages/TigerPython23Compat.pkg.zip - - -Beginning with OS X 10.5, Apple's python installation uses libedit, a BSD-licensed not-quite-compatible readline replacement. As of IPython 0.9, many of the issues related to the differences between readline and libedit have been resolved. If you find that you are experiencing readline-related issues (e.g. problems with tab-completion, history movement, or line editing), you can install Ludwig Schwartz's readline package which effectively replaces libedit with readline for packages installed via setuptools. If you installed IPython from the source directory using:: - - sudo python setupegg.py - -or from PyPI with:: - - sudo easy_install ipython - -then you can install the readline egg via [#]_:: - - sudo easy_install readline - -If needed, the readline egg can be build and installed from source (see the -wiki page at http://ipython.scipy.org/moin/InstallationOSXLeopard). - -Users installing against Fink or DarwinPorts's Python or a properly hand-built python installation should not have this problem. - -.. [#] If you have installed SVN 1.5, you will also to install a patch to setuptools before installing the readline egg. Use ``sudo easy_install http://www.jaraco.com/ASP/eggs/setuptools-0.6c8_svn15fix.egg``. - -DarwinPorts ------------ - -I report here a message from an OSX user, who suggests an alternative -means of using IPython under this operating system with good results. -Please let me know of any updates that may be useful for this section. -His message is reproduced verbatim below: - - From: Markus Banfi - - As a MacOS X (10.4.2) user I prefer to install software using - DawinPorts instead of Fink. I had no problems installing ipython - with DarwinPorts. It's just: - - sudo port install py-ipython - - It automatically resolved all dependencies (python24, readline, - py-readline). So far I did not encounter any problems with the - DarwinPorts port of ipython. - - - -Windows instructions --------------------- - -Some of IPython's very useful features are: - - * Integrated readline support (Tab-based file, object and attribute - completion, input history across sessions, editable command line, - etc.) - * Coloring of prompts, code and tracebacks. - -.. _pyreadline: - -These, by default, are only available under Unix-like operating systems. -However, thanks to Gary Bishop's work, Windows XP/2k users can also -benefit from them. His readline library originally implemented both GNU -readline functionality and color support, so that IPython under Windows -XP/2k can be as friendly and powerful as under Unix-like environments. - -This library, now named PyReadline, has been absorbed by the IPython -team (Jörgen Stenarson, in particular), and it continues to be developed -with new features, as well as being distributed directly from the -IPython site. - -The PyReadline extension requires CTypes and the windows IPython -installer needs PyWin32, so in all you need: - - 1. PyWin32 from http://sourceforge.net/projects/pywin32. - 2. PyReadline for Windows from - http://ipython.scipy.org/moin/PyReadline/Intro. That page contains - further details on using and configuring the system to your liking. - 3. Finally, only if you are using Python 2.3 or 2.4, you need CTypes - from http://starship.python.net/crew/theller/ctypes(you must use - version 0.9.1 or newer). This package is included in Python 2.5, - so you don't need to manually get it if your Python version is 2.5 - or newer. - -Warning about a broken readline-like library: several users have -reported problems stemming from using the pseudo-readline library at -http://newcenturycomputers.net/projects/readline.html. This is a broken -library which, while called readline, only implements an incomplete -subset of the readline API. Since it is still called readline, it fools -IPython's detection mechanisms and causes unpredictable crashes later. -If you wish to use IPython under Windows, you must NOT use this library, -which for all purposes is (at least as of version 1.6) terminally broken. - - -Installation procedure ----------------------- - -Once you have the above installed, from the IPython download directory -grab the ipython-XXX.win32.exe file, where XXX represents the version -number. This is a regular windows executable installer, which you can -simply double-click to install. It will add an entry for IPython to your -Start Menu, as well as registering IPython in the Windows list of -applications, so you can later uninstall it from the Control Panel. - -IPython tries to install the configuration information in a directory -named .ipython (_ipython under Windows) located in your 'home' -directory. IPython sets this directory by looking for a HOME environment -variable; if such a variable does not exist, it uses HOMEDRIVE\HOMEPATH -(these are always defined by Windows). This typically gives something -like C:\Documents and Settings\YourUserName, but your local details may -vary. In this directory you will find all the files that configure -IPython's defaults, and you can put there your profiles and extensions. -This directory is automatically added by IPython to sys.path, so -anything you place there can be found by import statements. - - -Upgrading ---------- - -For an IPython upgrade, you should first uninstall the previous version. -This will ensure that all files and directories (such as the -documentation) which carry embedded version strings in their names are -properly removed. - - -Manual installation under Win32 -------------------------------- - -In case the automatic installer does not work for some reason, you can -download the ipython-XXX.tar.gz file, which contains the full IPython -source distribution (the popular WinZip can read .tar.gz files). After -uncompressing the archive, you can install it at a command terminal just -like any other Python module, by using 'python setup.py install'. - -After the installation, run the supplied win32_manual_post_install.py -script, which creates the necessary Start Menu shortcuts for you. - - -.. upgrading: - -Upgrading from a previous version ---------------------------------- - -If you are upgrading from a previous version of IPython, you may want -to upgrade the contents of your ~/.ipython directory. Just run -%upgrade, look at the diffs and delete the suggested files manually, -if you think you can lose the old versions. %upgrade will never -overwrite or delete anything. - - diff --git a/docs/source/install/index.txt b/docs/source/install/index.txt index 63dfae4..6ddc5b0 100644 --- a/docs/source/install/index.txt +++ b/docs/source/install/index.txt @@ -7,5 +7,4 @@ Installation .. toctree:: :maxdepth: 2 - basic.txt - advanced.txt + install.txt diff --git a/docs/source/license_and_copyright.txt b/docs/source/license_and_copyright.txt index eec41bb..1c9840e 100644 --- a/docs/source/license_and_copyright.txt +++ b/docs/source/license_and_copyright.txt @@ -1,56 +1,82 @@ .. _license: -============================= -License and Copyright -============================= +===================== +License and Copyright +===================== -This files needs to be updated to reflect what the new COPYING.txt files says about our license and copyright! +License +======= -IPython is released under the terms of the BSD license, whose general -form can be found at: http://www.opensource.org/licenses/bsd-license.php. The full text of the -IPython license is reproduced below:: +IPython is licensed under the terms of the new or revised BSD license, as follows:: - IPython is released under a BSD-type license. + Copyright (c) 2008, IPython Development Team - Copyright (c) 2001, 2002, 2003, 2004 Fernando Perez - . + All rights reserved. - Copyright (c) 2001 Janko Hauser and - Nathaniel Gray . + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: - All rights reserved. + Redistributions of source code must retain the above copyright notice, this list of + conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright notice, this list + of conditions and the following disclaimer in the documentation and/or other + materials provided with the distribution. + + Neither the name of the IPython Development Team nor the names of its contributors + may be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY + EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +About the IPython Development Team +================================== + +Fernando Perez began IPython in 2001 based on code from Janko Hauser +and Nathaniel Gray . Fernando is still the project lead. + +The IPython Development Team is the set of all contributors to the IPython project. +This includes all of the IPython subprojects. Here is a list of the currently active contributors: + + * Matthieu Brucher + * Ondrej Certik + * Laurent Dufrechou + * Robert Kern + * Brian E. Granger + * Fernando Perez (project leader) + * Benjamin Ragan-Kelley + * Ville M. Vainio + * Gael Varoququx + * Stefan van der Walt + * Tech-X Corporation + * Barry Wark + +If your name is missing, please add it. + +Our Copyright Policy +==================== + +IPython uses a shared copyright model. Each contributor maintains copyright over +their contributions to IPython. But, it is important to note that these +contributions are typically only changes to the repositories. Thus, the IPython +source code, in its entirety is not the copyright of any single person or +institution. Instead, it is the collective copyright of the entire IPython +Development Team. If individual contributors want to maintain a record of what +changes/contributions they have specific copyright on, they should indicate their +copyright in the commit message of the change, when they commit the change to +one of the IPython repositories. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - a. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - b. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - c. Neither the name of the copyright holders nor the names of any - contributors to this software may be used to endorse or promote - products derived from this software without specific prior written - permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - POSSIBILITY OF SUCH DAMAGE. - -Individual authors are the holders of the copyright for their code and -are listed in each file. +Miscellaneous +============= Some files (DPyGetOpt.py, for example) may be licensed under different conditions. Ultimately each file indicates clearly the conditions under diff --git a/docs/source/overview.txt b/docs/source/overview.txt index 2b3320d..6ac308d 100644 --- a/docs/source/overview.txt +++ b/docs/source/overview.txt @@ -17,133 +17,161 @@ The goal of IPython is to create a comprehensive environment for interactive and exploratory computing. To support, this goal, IPython has two main components: - * An enhanced interactive Python shell. - * An architecture for interactive parallel computing. +* An enhanced interactive Python shell. +* An architecture for interactive parallel computing. All of IPython is open source (released under the revised BSD license). Enhanced interactive Python shell ================================= -IPython's interactive shell (`ipython`), has the following goals: - - 1. Provide an interactive shell superior to Python's default. IPython - has many features for object introspection, system shell access, - and its own special command system for adding functionality when - working interactively. It tries to be a very efficient environment - both for Python code development and for exploration of problems - using Python objects (in situations like data analysis). - 2. Serve as an embeddable, ready to use interpreter for your own - programs. IPython can be started with a single call from inside - another program, providing access to the current namespace. This - can be very useful both for debugging purposes and for situations - where a blend of batch-processing and interactive exploration are - needed. - 3. Offer a flexible framework which can be used as the base - environment for other systems with Python as the underlying - language. Specifically scientific environments like Mathematica, - IDL and Matlab inspired its design, but similar ideas can be - useful in many fields. - 4. Allow interactive testing of threaded graphical toolkits. IPython - has support for interactive, non-blocking control of GTK, Qt and - WX applications via special threading flags. The normal Python - shell can only do this for Tkinter applications. +IPython's interactive shell (:command:`ipython`), has the following goals, +amongst others: + +1. Provide an interactive shell superior to Python's default. IPython + has many features for object introspection, system shell access, + and its own special command system for adding functionality when + working interactively. It tries to be a very efficient environment + both for Python code development and for exploration of problems + using Python objects (in situations like data analysis). + +2. Serve as an embeddable, ready to use interpreter for your own + programs. IPython can be started with a single call from inside + another program, providing access to the current namespace. This + can be very useful both for debugging purposes and for situations + where a blend of batch-processing and interactive exploration are + needed. New in the 0.9 version of IPython is a reusable wxPython + based IPython widget. + +3. Offer a flexible framework which can be used as the base + environment for other systems with Python as the underlying + language. Specifically scientific environments like Mathematica, + IDL and Matlab inspired its design, but similar ideas can be + useful in many fields. + +4. Allow interactive testing of threaded graphical toolkits. IPython + has support for interactive, non-blocking control of GTK, Qt and + WX applications via special threading flags. The normal Python + shell can only do this for Tkinter applications. Main features of the interactive shell -------------------------------------- - * Dynamic object introspection. One can access docstrings, function - definition prototypes, source code, source files and other details - of any object accessible to the interpreter with a single - keystroke (:samp:`?`, and using :samp:`??` provides additional detail). - * Searching through modules and namespaces with :samp:`*` wildcards, both - when using the :samp:`?` system and via the :samp:`%psearch` command. - * Completion in the local namespace, by typing :kbd:`TAB` at the prompt. - This works for keywords, modules, methods, variables and files in the - current directory. This is supported via the readline library, and - full access to configuring readline's behavior is provided. - Custom completers can be implemented easily for different purposes - (system commands, magic arguments etc.) - * Numbered input/output prompts with command history (persistent - across sessions and tied to each profile), full searching in this - history and caching of all input and output. - * User-extensible 'magic' commands. A set of commands prefixed with - :samp:`%` is available for controlling IPython itself and provides - directory control, namespace information and many aliases to - common system shell commands. - * Alias facility for defining your own system aliases. - * Complete system shell access. Lines starting with :samp:`!` are passed - directly to the system shell, and using :samp:`!!` or :samp:`var = !cmd` - captures shell output into python variables for further use. - * Background execution of Python commands in a separate thread. - IPython has an internal job manager called jobs, and a - conveninence backgrounding magic function called :samp:`%bg`. - * The ability to expand python variables when calling the system - shell. In a shell command, any python variable prefixed with :samp:`$` is - expanded. A double :samp:`$$` allows passing a literal :samp:`$` to the shell (for - access to shell and environment variables like :envvar:`PATH`). - * Filesystem navigation, via a magic :samp:`%cd` command, along with a - persistent bookmark system (using :samp:`%bookmark`) for fast access to - frequently visited directories. - * A lightweight persistence framework via the :samp:`%store` command, which - allows you to save arbitrary Python variables. These get restored - automatically when your session restarts. - * Automatic indentation (optional) of code as you type (through the - readline library). - * Macro system for quickly re-executing multiple lines of previous - input with a single name. Macros can be stored persistently via - :samp:`%store` and edited via :samp:`%edit`. - * Session logging (you can then later use these logs as code in your - programs). Logs can optionally timestamp all input, and also store - session output (marked as comments, so the log remains valid - Python source code). - * Session restoring: logs can be replayed to restore a previous - session to the state where you left it. - * Verbose and colored exception traceback printouts. Easier to parse - visually, and in verbose mode they produce a lot of useful - debugging information (basically a terminal version of the cgitb - module). - * Auto-parentheses: callable objects can be executed without - parentheses: :samp:`sin 3` is automatically converted to :samp:`sin(3)`. - * Auto-quoting: using :samp:`,`, or :samp:`;` as the first character forces - auto-quoting of the rest of the line: :samp:`,my_function a b` becomes - automatically :samp:`my_function("a","b")`, while :samp:`;my_function a b` - becomes :samp:`my_function("a b")`. - * Extensible input syntax. You can define filters that pre-process - user input to simplify input in special situations. This allows - for example pasting multi-line code fragments which start with - :samp:`>>>` or :samp:`...` such as those from other python sessions or the - standard Python documentation. - * Flexible configuration system. It uses a configuration file which - allows permanent setting of all command-line options, module - loading, code and file execution. The system allows recursive file - inclusion, so you can have a base file with defaults and layers - which load other customizations for particular projects. - * Embeddable. You can call IPython as a python shell inside your own - python programs. This can be used both for debugging code or for - providing interactive abilities to your programs with knowledge - about the local namespaces (very useful in debugging and data - analysis situations). - * Easy debugger access. You can set IPython to call up an enhanced - version of the Python debugger (pdb) every time there is an - uncaught exception. This drops you inside the code which triggered - the exception with all the data live and it is possible to - navigate the stack to rapidly isolate the source of a bug. The - :samp:`%run` magic command (with the :samp:`-d` option) can run any script under - pdb's control, automatically setting initial breakpoints for you. - This version of pdb has IPython-specific improvements, including - tab-completion and traceback coloring support. For even easier - debugger access, try :samp:`%debug` after seeing an exception. winpdb is - also supported, see ipy_winpdb extension. - * Profiler support. You can run single statements (similar to - :samp:`profile.run()`) or complete programs under the profiler's control. - While this is possible with standard cProfile or profile modules, - IPython wraps this functionality with magic commands (see :samp:`%prun` - and :samp:`%run -p`) convenient for rapid interactive work. - * Doctest support. The special :samp:`%doctest_mode` command toggles a mode - that allows you to paste existing doctests (with leading :samp:`>>>` - prompts and whitespace) and uses doctest-compatible prompts and - output, so you can use IPython sessions as doctest code. +* Dynamic object introspection. One can access docstrings, function + definition prototypes, source code, source files and other details + of any object accessible to the interpreter with a single + keystroke (:samp:`?`, and using :samp:`??` provides additional detail). + +* Searching through modules and namespaces with :samp:`*` wildcards, both + when using the :samp:`?` system and via the :samp:`%psearch` command. + +* Completion in the local namespace, by typing :kbd:`TAB` at the prompt. + This works for keywords, modules, methods, variables and files in the + current directory. This is supported via the readline library, and + full access to configuring readline's behavior is provided. + Custom completers can be implemented easily for different purposes + (system commands, magic arguments etc.) + +* Numbered input/output prompts with command history (persistent + across sessions and tied to each profile), full searching in this + history and caching of all input and output. + +* User-extensible 'magic' commands. A set of commands prefixed with + :samp:`%` is available for controlling IPython itself and provides + directory control, namespace information and many aliases to + common system shell commands. + +* Alias facility for defining your own system aliases. + +* Complete system shell access. Lines starting with :samp:`!` are passed + directly to the system shell, and using :samp:`!!` or :samp:`var = !cmd` + captures shell output into python variables for further use. + +* Background execution of Python commands in a separate thread. + IPython has an internal job manager called jobs, and a + convenience backgrounding magic function called :samp:`%bg`. + +* The ability to expand python variables when calling the system + shell. In a shell command, any python variable prefixed with :samp:`$` is + expanded. A double :samp:`$$` allows passing a literal :samp:`$` to the shell (for + access to shell and environment variables like :envvar:`PATH`). + +* Filesystem navigation, via a magic :samp:`%cd` command, along with a + persistent bookmark system (using :samp:`%bookmark`) for fast access to + frequently visited directories. + +* A lightweight persistence framework via the :samp:`%store` command, which + allows you to save arbitrary Python variables. These get restored + automatically when your session restarts. + +* Automatic indentation (optional) of code as you type (through the + readline library). + +* Macro system for quickly re-executing multiple lines of previous + input with a single name. Macros can be stored persistently via + :samp:`%store` and edited via :samp:`%edit`. + +* Session logging (you can then later use these logs as code in your + programs). Logs can optionally timestamp all input, and also store + session output (marked as comments, so the log remains valid + Python source code). + +* Session restoring: logs can be replayed to restore a previous + session to the state where you left it. + +* Verbose and colored exception traceback printouts. Easier to parse + visually, and in verbose mode they produce a lot of useful + debugging information (basically a terminal version of the cgitb + module). + +* Auto-parentheses: callable objects can be executed without + parentheses: :samp:`sin 3` is automatically converted to :samp:`sin(3)`. + +* Auto-quoting: using :samp:`,`, or :samp:`;` as the first character forces + auto-quoting of the rest of the line: :samp:`,my_function a b` becomes + automatically :samp:`my_function("a","b")`, while :samp:`;my_function a b` + becomes :samp:`my_function("a b")`. + +* Extensible input syntax. You can define filters that pre-process + user input to simplify input in special situations. This allows + for example pasting multi-line code fragments which start with + :samp:`>>>` or :samp:`...` such as those from other python sessions or the + standard Python documentation. + +* Flexible configuration system. It uses a configuration file which + allows permanent setting of all command-line options, module + loading, code and file execution. The system allows recursive file + inclusion, so you can have a base file with defaults and layers + which load other customizations for particular projects. + +* Embeddable. You can call IPython as a python shell inside your own + python programs. This can be used both for debugging code or for + providing interactive abilities to your programs with knowledge + about the local namespaces (very useful in debugging and data + analysis situations). + +* Easy debugger access. You can set IPython to call up an enhanced + version of the Python debugger (pdb) every time there is an + uncaught exception. This drops you inside the code which triggered + the exception with all the data live and it is possible to + navigate the stack to rapidly isolate the source of a bug. The + :samp:`%run` magic command (with the :samp:`-d` option) can run any script under + pdb's control, automatically setting initial breakpoints for you. + This version of pdb has IPython-specific improvements, including + tab-completion and traceback coloring support. For even easier + debugger access, try :samp:`%debug` after seeing an exception. winpdb is + also supported, see ipy_winpdb extension. + +* Profiler support. You can run single statements (similar to + :samp:`profile.run()`) or complete programs under the profiler's control. + While this is possible with standard cProfile or profile modules, + IPython wraps this functionality with magic commands (see :samp:`%prun` + and :samp:`%run -p`) convenient for rapid interactive work. + +* Doctest support. The special :samp:`%doctest_mode` command toggles a mode + that allows you to paste existing doctests (with leading :samp:`>>>` + prompts and whitespace) and uses doctest-compatible prompts and + output, so you can use IPython sessions as doctest code. Interactive parallel computing ============================== @@ -153,6 +181,37 @@ architecture within IPython that allows such hardware to be used quickly and eas from Python. Moreover, this architecture is designed to support interactive and collaborative parallel computing. +The main features of this system are: + +* Quickly parallelize Python code from an interactive Python/IPython session. + +* A flexible and dynamic process model that be deployed on anything from + multicore workstations to supercomputers. + +* An architecture that supports many different styles of parallelism, from + message passing to task farming. And all of these styles can be handled + interactively. + +* Both blocking and fully asynchronous interfaces. + +* High level APIs that enable many things to be parallelized in a few lines + of code. + +* Write parallel code that will run unchanged on everything from multicore + workstations to supercomputers. + +* Full integration with Message Passing libraries (MPI). + +* Capabilities based security model with full encryption of network connections. + +* Share live parallel jobs with other users securely. We call this collaborative + parallel computing. + +* Dynamically load balanced task farming system. + +* Robust error handling. Python exceptions raised in parallel execution are + gathered and presented to the top-level code. + For more information, see our :ref:`overview ` of using IPython for parallel computing. diff --git a/docs/source/parallel/index.txt b/docs/source/parallel/index.txt index cc31f75..15c8436 100644 --- a/docs/source/parallel/index.txt +++ b/docs/source/parallel/index.txt @@ -1,12 +1,9 @@ .. _parallel_index: ==================================== -Using IPython for Parallel computing +Using IPython for parallel computing ==================================== -User Documentation -================== - .. toctree:: :maxdepth: 2 diff --git a/docs/source/parallel/parallel_intro.txt b/docs/source/parallel/parallel_intro.txt index 20eee76..23900cd 100644 --- a/docs/source/parallel/parallel_intro.txt +++ b/docs/source/parallel/parallel_intro.txt @@ -1,57 +1,68 @@ .. _ip1par: -====================================== -Using IPython for parallel computing -====================================== +============================ +Overview and getting started +============================ .. contents:: Introduction ============ -This file gives an overview of IPython. IPython has a sophisticated and +This file gives an overview of IPython's sophisticated and powerful architecture for parallel and distributed computing. This architecture abstracts out parallelism in a very general way, which enables IPython to support many different styles of parallelism including: - * Single program, multiple data (SPMD) parallelism. - * Multiple program, multiple data (MPMD) parallelism. - * Message passing using ``MPI``. - * Task farming. - * Data parallel. - * Combinations of these approaches. - * Custom user defined approaches. +* Single program, multiple data (SPMD) parallelism. +* Multiple program, multiple data (MPMD) parallelism. +* Message passing using ``MPI``. +* Task farming. +* Data parallel. +* Combinations of these approaches. +* Custom user defined approaches. Most importantly, IPython enables all types of parallel applications to be developed, executed, debugged and monitored *interactively*. Hence, the ``I`` in IPython. The following are some example usage cases for IPython: - * Quickly parallelize algorithms that are embarrassingly parallel - using a number of simple approaches. Many simple things can be - parallelized interactively in one or two lines of code. - * Steer traditional MPI applications on a supercomputer from an - IPython session on your laptop. - * Analyze and visualize large datasets (that could be remote and/or - distributed) interactively using IPython and tools like - matplotlib/TVTK. - * Develop, test and debug new parallel algorithms - (that may use MPI) interactively. - * Tie together multiple MPI jobs running on different systems into - one giant distributed and parallel system. - * Start a parallel job on your cluster and then have a remote - collaborator connect to it and pull back data into their - local IPython session for plotting and analysis. - * Run a set of tasks on a set of CPUs using dynamic load balancing. +* Quickly parallelize algorithms that are embarrassingly parallel + using a number of simple approaches. Many simple things can be + parallelized interactively in one or two lines of code. + +* Steer traditional MPI applications on a supercomputer from an + IPython session on your laptop. + +* Analyze and visualize large datasets (that could be remote and/or + distributed) interactively using IPython and tools like + matplotlib/TVTK. + +* Develop, test and debug new parallel algorithms + (that may use MPI) interactively. + +* Tie together multiple MPI jobs running on different systems into + one giant distributed and parallel system. + +* Start a parallel job on your cluster and then have a remote + collaborator connect to it and pull back data into their + local IPython session for plotting and analysis. + +* Run a set of tasks on a set of CPUs using dynamic load balancing. Architecture overview ===================== The IPython architecture consists of three components: - * The IPython engine. - * The IPython controller. - * Various controller Clients. +* The IPython engine. +* The IPython controller. +* Various controller clients. + +These components live in the :mod:`IPython.kernel` package and are +installed with IPython. They do, however, have additional dependencies +that must be installed. For more information, see our +:ref:`installation documentation `. IPython engine --------------- @@ -75,16 +86,21 @@ IPython engines can connect. For each connected engine, the controller manages a queue. All actions that can be performed on the engine go through this queue. While the engines themselves block when user code is run, the controller hides that from the user to provide a fully -asynchronous interface to a set of engines. Because the controller -listens on a network port for engines to connect to it, it must be -started before any engines are started. +asynchronous interface to a set of engines. + +.. note:: + + Because the controller listens on a network port for engines to + connect to it, it must be started *before* any engines are started. The controller also provides a single point of contact for users who wish to utilize the engines connected to the controller. There are different ways of working with a controller. In IPython these ways correspond to different interfaces that the controller is adapted to. Currently we have two default interfaces to the controller: - * The MultiEngine interface. - * The Task interface. +* The MultiEngine interface, which provides the simplest possible way of working + with engines interactively. +* The Task interface, which provides presents the engines as a load balanced + task farming system. Advanced users can easily add new custom interfaces to enable other styles of parallelism. @@ -100,18 +116,37 @@ Controller clients For each controller interface, there is a corresponding client. These clients allow users to interact with a set of engines through the -interface. +interface. Here are the two default clients: + +* The :class:`MultiEngineClient` class. +* The :class:`TaskClient` class. Security -------- -By default (as long as `pyOpenSSL` is installed) all network connections between the controller and engines and the controller and clients are secure. What does this mean? First of all, all of the connections will be encrypted using SSL. Second, the connections are authenticated. We handle authentication in a `capabilities`__ based security model. In this model, a "capability (known in some systems as a key) is a communicable, unforgeable token of authority". Put simply, a capability is like a key to your house. If you have the key to your house, you can get in, if not you can't. +By default (as long as `pyOpenSSL` is installed) all network connections between the controller and engines and the controller and clients are secure. What does this mean? First of all, all of the connections will be encrypted using SSL. Second, the connections are authenticated. We handle authentication in a `capabilities`__ based security model. In this model, a "capability (known in some systems as a key) is a communicable, unforgeable token of authority". Put simply, a capability is like a key to your house. If you have the key to your house, you can get in. If not, you can't. .. __: http://en.wikipedia.org/wiki/Capability-based_security -In our architecture, the controller is the only process that listens on network ports, and is thus responsible to creating these keys. In IPython, these keys are known as Foolscap URLs, or FURLs, because of the underlying network protocol we are using. As a user, you don't need to know anything about the details of these FURLs, other than that when the controller starts, it saves a set of FURLs to files named something.furl. The default location of these files is your ~./ipython directory. +In our architecture, the controller is the only process that listens on network ports, and is thus responsible to creating these keys. In IPython, these keys are known as Foolscap URLs, or FURLs, because of the underlying network protocol we are using. As a user, you don't need to know anything about the details of these FURLs, other than that when the controller starts, it saves a set of FURLs to files named :file:`something.furl`. The default location of these files is the :file:`~./ipython/security` directory. -To connect and authenticate to the controller an engine or client simply needs to present an appropriate furl (that was originally created by the controller) to the controller. Thus, the .furl files need to be copied to a location where the clients and engines can find them. Typically, this is the ~./ipython directory on the host where the client/engine is running (which could be a different host than the controller). Once the .furl files are copied over, everything should work fine. +To connect and authenticate to the controller an engine or client simply needs to present an appropriate furl (that was originally created by the controller) to the controller. Thus, the .furl files need to be copied to a location where the clients and engines can find them. Typically, this is the :file:`~./ipython/security` directory on the host where the client/engine is running (which could be a different host than the controller). Once the .furl files are copied over, everything should work fine. + +Currently, there are three .furl files that the controller creates: + +ipcontroller-engine.furl + This ``.furl`` file is the key that gives an engine the ability to connect + to a controller. + +ipcontroller-tc.furl + This ``.furl`` file is the key that a :class:`TaskClient` must use to + connect to the task interface of a controller. + +ipcontroller-mec.furl + This ``.furl`` file is the key that a :class:`MultiEngineClient` must use to + connect to the multiengine interface of a controller. + +More details of how these ``.furl`` files are used are given below. Getting Started =============== @@ -127,28 +162,40 @@ Starting the controller and engine on your local machine This is the simplest configuration that can be used and is useful for testing the system and on machines that have multiple cores and/or -multple CPUs. The easiest way of doing this is using the ``ipcluster`` +multple CPUs. The easiest way of getting started is to use the :command:`ipcluster` command:: $ ipcluster -n 4 - + This will start an IPython controller and then 4 engines that connect to the controller. Lastly, the script will print out the Python commands that you can use to connect to the controller. It is that easy. -Underneath the hood, the ``ipcluster`` script uses two other top-level +.. warning:: + + The :command:`ipcluster` does not currently work on Windows. We are + working on it though. + +Underneath the hood, the controller creates ``.furl`` files in the +:file:`~./ipython/security` directory. Because the engines are on the +same host, they automatically find the needed :file:`ipcontroller-engine.furl` +there and use it to connect to the controller. + +The :command:`ipcluster` script uses two other top-level scripts that you can also use yourself. These scripts are -``ipcontroller``, which starts the controller and ``ipengine`` which +:command:`ipcontroller`, which starts the controller and :command:`ipengine` which starts one engine. To use these scripts to start things on your local machine, do the following. First start the controller:: - $ ipcontroller & + $ ipcontroller Next, start however many instances of the engine you want using (repeatedly) the command:: - $ ipengine & + $ ipengine + +The engines should start and automatically connect to the controller using the ``.furl`` files in :file:`~./ipython/security`. You are now ready to use the controller and engines from IPython. .. warning:: @@ -156,47 +203,71 @@ Next, start however many instances of the engine you want using (repeatedly) the start the controller before the engines, since the engines connect to the controller as they get started. -On some platforms you may need to give these commands in the form -``(ipcontroller &)`` and ``(ipengine &)`` for them to work properly. The -engines should start and automatically connect to the controller on the -default ports, which are chosen for this type of setup. You are now ready -to use the controller and engines from IPython. +.. note:: -Starting the controller and engines on different machines ---------------------------------------------------------- + On some platforms (OS X), to put the controller and engine into the background + you may need to give these commands in the form ``(ipcontroller &)`` + and ``(ipengine &)`` (with the parentheses) for them to work properly. -This section needs to be updated to reflect the new Foolscap capabilities based -model. -Using ``ipcluster`` with ``ssh`` --------------------------------- +Starting the controller and engines on different hosts +------------------------------------------------------ -The ``ipcluster`` command can also start a controller and engines using -``ssh``. We need more documentation on this, but for now here is any -example startup script:: +When the controller and engines are running on different hosts, things are +slightly more complicated, but the underlying ideas are the same: - controller = dict(host='myhost', - engine_port=None, # default is 10105 - control_port=None, - ) +1. Start the controller on a host using :command:`ipcontroler`. +2. Copy :file:`ipcontroller-engine.furl` from :file:`~./ipython/security` on the controller's host to the host where the engines will run. +3. Use :command:`ipengine` on the engine's hosts to start the engines. - # keys are hostnames, values are the number of engine on that host - engines = dict(node1=2, - node2=2, - node3=2, - node3=2, - ) +The only thing you have to be careful of is to tell :command:`ipengine` where the :file:`ipcontroller-engine.furl` file is located. There are two ways you can do this: + +* Put :file:`ipcontroller-engine.furl` in the :file:`~./ipython/security` directory + on the engine's host, where it will be found automatically. +* Call :command:`ipengine` with the ``--furl-file=full_path_to_the_file`` flag. + +The ``--furl-file`` flag works like this:: + + $ ipengine --furl-file=/path/to/my/ipcontroller-engine.furl + +.. note:: + + If the controller's and engine's hosts all have a shared file system + (:file:`~./ipython/security` is the same on all of them), then things + will just work! + +Make .furl files persistent +--------------------------- + +At fist glance it may seem that that managing the ``.furl`` files is a bit annoying. Going back to the house and key analogy, copying the ``.furl`` around each time you start the controller is like having to make a new key everytime you want to unlock the door and enter your house. As with your house, you want to be able to create the key (or ``.furl`` file) once, and then simply use it at any point in the future. + +This is possible. The only thing you have to do is decide what ports the controller will listen on for the engines and clients. This is done as follows:: + + $ ipcontroller --client-port=10101 --engine-port=10102 + +Then, just copy the furl files over the first time and you are set. You can start and stop the controller and engines any many times as you want in the future, just make sure to tell the controller to use the *same* ports. + +.. note:: + + You may ask the question: what ports does the controller listen on if you + don't tell is to use specific ones? The default is to use high random port + numbers. We do this for two reasons: i) to increase security through obcurity + and ii) to multiple controllers on a given host to start and automatically + use different ports. Starting engines using ``mpirun`` --------------------------------- The IPython engines can be started using ``mpirun``/``mpiexec``, even if -the engines don't call MPI_Init() or use the MPI API in any way. This is +the engines don't call ``MPI_Init()`` or use the MPI API in any way. This is supported on modern MPI implementations like `Open MPI`_.. This provides an really nice way of starting a bunch of engine. On a system with MPI installed you can do:: - mpirun -n 4 ipengine --controller-port=10000 --controller-ip=host0 + mpirun -n 4 ipengine + +to start 4 engine on a cluster. This works even if you don't have any +Python-MPI bindings installed. .. _Open MPI: http://www.open-mpi.org/ @@ -214,12 +285,12 @@ Next Steps ========== Once you have started the IPython controller and one or more engines, you -are ready to use the engines to do somnething useful. To make sure +are ready to use the engines to do something useful. To make sure everything is working correctly, try the following commands:: In [1]: from IPython.kernel import client - In [2]: mec = client.MultiEngineClient() # This looks for .furl files in ~./ipython + In [2]: mec = client.MultiEngineClient() In [4]: mec.get_ids() Out[4]: [0, 1, 2, 3] @@ -239,4 +310,18 @@ everything is working correctly, try the following commands:: [3] In [1]: print "Hello World" [3] Out[1]: Hello World -If this works, you are ready to learn more about the :ref:`MultiEngine ` and :ref:`Task ` interfaces to the controller. +Remember, a client also needs to present a ``.furl`` file to the controller. How does this happen? When a multiengine client is created with no arguments, the client tries to find the corresponding ``.furl`` file in the local :file:`~./ipython/security` directory. If it finds it, you are set. If you have put the ``.furl`` file in a different location or it has a different name, create the client like this:: + + mec = client.MultiEngineClient('/path/to/my/ipcontroller-mec.furl') + +Same thing hold true of creating a task client:: + + tc = client.TaskClient('/path/to/my/ipcontroller-tc.furl') + +You are now ready to learn more about the :ref:`MultiEngine ` and :ref:`Task ` interfaces to the controller. + +.. note:: + + Don't forget that the engine, multiengine client and task client all have + *different* furl files. You must move *each* of these around to an appropriate + location so that the engines and clients can use them to connect to the controller. diff --git a/docs/source/parallel/parallel_multiengine.txt b/docs/source/parallel/parallel_multiengine.txt index d86e541..becc6f8 100644 --- a/docs/source/parallel/parallel_multiengine.txt +++ b/docs/source/parallel/parallel_multiengine.txt @@ -1,57 +1,115 @@ .. _parallelmultiengine: -================================= -IPython's MultiEngine interface -================================= +=============================== +IPython's multiengine interface +=============================== .. contents:: -The MultiEngine interface represents one possible way of working with a -set of IPython engines. The basic idea behind the MultiEngine interface is -that the capabilities of each engine are explicitly exposed to the user. -Thus, in the MultiEngine interface, each engine is given an id that is -used to identify the engine and give it work to do. This interface is very -intuitive and is designed with interactive usage in mind, and is thus the -best place for new users of IPython to begin. +The multiengine interface represents one possible way of working with a set of +IPython engines. The basic idea behind the multiengine interface is that the +capabilities of each engine are directly and explicitly exposed to the user. +Thus, in the multiengine interface, each engine is given an id that is used to +identify the engine and give it work to do. This interface is very intuitive +and is designed with interactive usage in mind, and is thus the best place for +new users of IPython to begin. Starting the IPython controller and engines =========================================== To follow along with this tutorial, you will need to start the IPython -controller and four IPython engines. The simplest way of doing this is to -use the ``ipcluster`` command:: +controller and four IPython engines. The simplest way of doing this is to use +the :command:`ipcluster` command:: $ ipcluster -n 4 -For more detailed information about starting the controller and engines, see our :ref:`introduction ` to using IPython for parallel computing. +For more detailed information about starting the controller and engines, see +our :ref:`introduction ` to using IPython for parallel computing. Creating a ``MultiEngineClient`` instance ========================================= -The first step is to import the IPython ``client`` module and then create a ``MultiEngineClient`` instance:: +The first step is to import the IPython :mod:`IPython.kernel.client` module +and then create a :class:`MultiEngineClient` instance:: In [1]: from IPython.kernel import client In [2]: mec = client.MultiEngineClient() -To make sure there are engines connected to the controller, use can get a list of engine ids:: +This form assumes that the :file:`ipcontroller-mec.furl` is in the +:file:`~./ipython/security` directory on the client's host. If not, the +location of the ``.furl`` file must be given as an argument to the +constructor:: + + In[2]: mec = client.MultiEngineClient('/path/to/my/ipcontroller-mec.furl') + +To make sure there are engines connected to the controller, use can get a list +of engine ids:: In [3]: mec.get_ids() Out[3]: [0, 1, 2, 3] Here we see that there are four engines ready to do work for us. +Quick and easy parallelism +========================== + +In many cases, you simply want to apply a Python function to a sequence of objects, but *in parallel*. The multiengine interface provides two simple ways of accomplishing this: a parallel version of :func:`map` and ``@parallel`` function decorator. + +Parallel map +------------ + +Python's builtin :func:`map` functions allows a function to be applied to a +sequence element-by-element. This type of code is typically trivial to +parallelize. In fact, the multiengine interface in IPython already has a +parallel version of :meth:`map` that works just like its serial counterpart:: + + In [63]: serial_result = map(lambda x:x**10, range(32)) + + In [64]: parallel_result = mec.map(lambda x:x**10, range(32)) + + In [65]: serial_result==parallel_result + Out[65]: True + +.. note:: + + The multiengine interface version of :meth:`map` does not do any load + balancing. For a load balanced version, see the task interface. + +.. seealso:: + + The :meth:`map` method has a number of options that can be controlled by + the :meth:`mapper` method. See its docstring for more information. + +Parallel function decorator +--------------------------- + +Parallel functions are just like normal function, but they can be called on sequences and *in parallel*. The multiengine interface provides a decorator that turns any Python function into a parallel function:: + + In [10]: @mec.parallel() + ....: def f(x): + ....: return 10.0*x**4 + ....: + + In [11]: f(range(32)) # this is done in parallel + Out[11]: + [0.0,10.0,160.0,...] + +See the docstring for the :meth:`parallel` decorator for options. + Running Python commands ======================= -The most basic type of operation that can be performed on the engines is to execute Python code. Executing Python code can be done in blocking or non-blocking mode (blocking is default) using the ``execute`` method. +The most basic type of operation that can be performed on the engines is to +execute Python code. Executing Python code can be done in blocking or +non-blocking mode (blocking is default) using the :meth:`execute` method. Blocking execution ------------------ -In blocking mode, the ``MultiEngineClient`` object (called ``mec`` in +In blocking mode, the :class:`MultiEngineClient` object (called ``mec`` in these examples) submits the command to the controller, which places the -command in the engines' queues for execution. The ``execute`` call then +command in the engines' queues for execution. The :meth:`execute` call then blocks until the engines are done executing the command:: # The default is to run on all engines @@ -71,7 +129,8 @@ blocks until the engines are done executing the command:: [2] In [2]: b=10 [3] In [2]: b=10 -Python commands can be executed on specific engines by calling execute using the ``targets`` keyword argument:: +Python commands can be executed on specific engines by calling execute using +the ``targets`` keyword argument:: In [6]: mec.execute('c=a+b',targets=[0,2]) Out[6]: @@ -102,7 +161,9 @@ Python commands can be executed on specific engines by calling execute using the [3] In [4]: print c [3] Out[4]: -5 -This example also shows one of the most important things about the IPython engines: they have a persistent user namespaces. The ``execute`` method returns a Python ``dict`` that contains useful information:: +This example also shows one of the most important things about the IPython +engines: they have a persistent user namespaces. The :meth:`execute` method +returns a Python ``dict`` that contains useful information:: In [9]: result_dict = mec.execute('d=10; print d') @@ -118,10 +179,12 @@ This example also shows one of the most important things about the IPython engin Non-blocking execution ---------------------- -In non-blocking mode, ``execute`` submits the command to be executed and then returns a -``PendingResult`` object immediately. The ``PendingResult`` object gives you a way of getting a -result at a later time through its ``get_result`` method or ``r`` attribute. This allows you to -quickly submit long running commands without blocking your local Python/IPython session:: +In non-blocking mode, :meth:`execute` submits the command to be executed and +then returns a :class:`PendingResult` object immediately. The +:class:`PendingResult` object gives you a way of getting a result at a later +time through its :meth:`get_result` method or :attr:`r` attribute. This allows +you to quickly submit long running commands without blocking your local +Python/IPython session:: # In blocking mode In [6]: mec.execute('import time') @@ -159,7 +222,10 @@ quickly submit long running commands without blocking your local Python/IPython [2] In [3]: time.sleep(10) [3] In [3]: time.sleep(10) -Often, it is desirable to wait until a set of ``PendingResult`` objects are done. For this, there is a the method ``barrier``. This method takes a tuple of ``PendingResult`` objects and blocks until all of the associated results are ready:: +Often, it is desirable to wait until a set of :class:`PendingResult` objects +are done. For this, there is a the method :meth:`barrier`. This method takes a +tuple of :class:`PendingResult` objects and blocks until all of the associated +results are ready:: In [72]: mec.block=False @@ -182,14 +248,16 @@ Often, it is desirable to wait until a set of ``PendingResult`` objects are done The ``block`` and ``targets`` keyword arguments and attributes -------------------------------------------------------------- -Most commands in the multiengine interface (like ``execute``) accept ``block`` and ``targets`` -as keyword arguments. As we have seen above, these keyword arguments control the blocking mode -and which engines the command is applied to. The ``MultiEngineClient`` class also has ``block`` -and ``targets`` attributes that control the default behavior when the keyword arguments are not -provided. Thus the following logic is used for ``block`` and ``targets``: +Most methods in the multiengine interface (like :meth:`execute`) accept +``block`` and ``targets`` as keyword arguments. As we have seen above, these +keyword arguments control the blocking mode and which engines the command is +applied to. The :class:`MultiEngineClient` class also has :attr:`block` and +:attr:`targets` attributes that control the default behavior when the keyword +arguments are not provided. Thus the following logic is used for :attr:`block` +and :attr:`targets`: - * If no keyword argument is provided, the instance attributes are used. - * Keyword argument, if provided override the instance attributes. +* If no keyword argument is provided, the instance attributes are used. +* Keyword argument, if provided override the instance attributes. The following examples demonstrate how to use the instance attributes:: @@ -225,14 +293,19 @@ The following examples demonstrate how to use the instance attributes:: [3] In [6]: b=10; print b [3] Out[6]: 10 -The ``block`` and ``targets`` instance attributes also determine the behavior of the parallel -magic commands... +The :attr:`block` and :attr:`targets` instance attributes also determine the +behavior of the parallel magic commands. Parallel magic commands ----------------------- -We provide a few IPython magic commands (``%px``, ``%autopx`` and ``%result``) that make it more pleasant to execute Python commands on the engines interactively. These are simply shortcuts to ``execute`` and ``get_result``. The ``%px`` magic executes a single Python command on the engines specified by the `magicTargets``targets` attribute of the ``MultiEngineClient`` instance (by default this is 'all'):: +We provide a few IPython magic commands (``%px``, ``%autopx`` and ``%result``) +that make it more pleasant to execute Python commands on the engines +interactively. These are simply shortcuts to :meth:`execute` and +:meth:`get_result`. The ``%px`` magic executes a single Python command on the +engines specified by the :attr:`targets` attribute of the +:class:`MultiEngineClient` instance (by default this is ``'all'``):: # Make this MultiEngineClient active for parallel magic commands In [23]: mec.activate() @@ -277,7 +350,9 @@ We provide a few IPython magic commands (``%px``, ``%autopx`` and ``%result``) t [3] In [9]: print numpy.linalg.eigvals(a) [3] Out[9]: [ 0.83664764 -0.25602658] -The ``%result`` magic gets and prints the stdin/stdout/stderr of the last command executed on each engine. It is simply a shortcut to the ``get_result`` method:: +The ``%result`` magic gets and prints the stdin/stdout/stderr of the last +command executed on each engine. It is simply a shortcut to the +:meth:`get_result` method:: In [29]: %result Out[29]: @@ -294,7 +369,8 @@ The ``%result`` magic gets and prints the stdin/stdout/stderr of the last comman [3] In [9]: print numpy.linalg.eigvals(a) [3] Out[9]: [ 0.83664764 -0.25602658] -The ``%autopx`` magic switches to a mode where everything you type is executed on the engines given by the ``targets`` attribute:: +The ``%autopx`` magic switches to a mode where everything you type is executed +on the engines given by the :attr:`targets` attribute:: In [30]: mec.block=False @@ -335,51 +411,19 @@ The ``%autopx`` magic switches to a mode where everything you type is executed o [3] In [12]: print "Average max eigenvalue is: ", sum(max_evals)/len(max_evals) [3] Out[12]: Average max eigenvalue is: 10.1158837784 -Using the ``with`` statement of Python 2.5 ------------------------------------------- -Python 2.5 introduced the ``with`` statement. The ``MultiEngineClient`` can be used with the ``with`` statement to execute a block of code on the engines indicated by the ``targets`` attribute:: +Moving Python objects around +============================ - In [3]: with mec: - ...: client.remote() # Required so the following code is not run locally - ...: a = 10 - ...: b = 30 - ...: c = a+b - ...: - ...: - - In [4]: mec.get_result() - Out[4]: - - [0] In [1]: a = 10 - b = 30 - c = a+b - - [1] In [1]: a = 10 - b = 30 - c = a+b - - [2] In [1]: a = 10 - b = 30 - c = a+b - - [3] In [1]: a = 10 - b = 30 - c = a+b - -This is basically another way of calling execute, but one with allows you to avoid writing code in strings. When used in this way, the attributes ``targets`` and ``block`` are used to control how the code is executed. For now, if you run code in non-blocking mode you won't have access to the ``PendingResult``. - -Moving Python object around -=========================== - -In addition to executing code on engines, you can transfer Python objects to and from your -IPython session and the engines. In IPython, these operations are called ``push`` (sending an -object to the engines) and ``pull`` (getting an object from the engines). +In addition to executing code on engines, you can transfer Python objects to +and from your IPython session and the engines. In IPython, these operations +are called :meth:`push` (sending an object to the engines) and :meth:`pull` +(getting an object from the engines). Basic push and pull ------------------- -Here are some examples of how you use ``push`` and ``pull``:: +Here are some examples of how you use :meth:`push` and :meth:`pull`:: In [38]: mec.push(dict(a=1.03234,b=3453)) Out[38]: [None, None, None, None] @@ -415,7 +459,8 @@ Here are some examples of how you use ``push`` and ``pull``:: [3] In [13]: print c [3] Out[13]: speed -In non-blocking mode ``push`` and ``pull`` also return ``PendingResult`` objects:: +In non-blocking mode :meth:`push` and :meth:`pull` also return +:class:`PendingResult` objects:: In [47]: mec.block=False @@ -428,7 +473,11 @@ In non-blocking mode ``push`` and ``pull`` also return ``PendingResult`` objects Push and pull for functions --------------------------- -Functions can also be pushed and pulled using ``push_function`` and ``pull_function``:: +Functions can also be pushed and pulled using :meth:`push_function` and +:meth:`pull_function`:: + + + In [52]: mec.block=True In [53]: def f(x): ....: return 2.0*x**4 @@ -466,7 +515,10 @@ Functions can also be pushed and pulled using ``push_function`` and ``pull_funct Dictionary interface -------------------- -As a shorthand to ``push`` and ``pull``, the ``MultiEngineClient`` class implements some of the Python dictionary interface. This make the remote namespaces of the engines appear as a local dictionary. Underneath, this uses ``push`` and ``pull``:: +As a shorthand to :meth:`push` and :meth:`pull`, the +:class:`MultiEngineClient` class implements some of the Python dictionary +interface. This make the remote namespaces of the engines appear as a local +dictionary. Underneath, this uses :meth:`push` and :meth:`pull`:: In [50]: mec.block=True @@ -478,11 +530,13 @@ As a shorthand to ``push`` and ``pull``, the ``MultiEngineClient`` class impleme Scatter and gather ------------------ -Sometimes it is useful to partition a sequence and push the partitions to different engines. In -MPI language, this is know as scatter/gather and we follow that terminology. However, it is -important to remember that in IPython ``scatter`` is from the interactive IPython session to -the engines and ``gather`` is from the engines back to the interactive IPython session. For -scatter/gather operations between engines, MPI should be used:: +Sometimes it is useful to partition a sequence and push the partitions to +different engines. In MPI language, this is know as scatter/gather and we +follow that terminology. However, it is important to remember that in +IPython's :class:`MultiEngineClient` class, :meth:`scatter` is from the +interactive IPython session to the engines and :meth:`gather` is from the +engines back to the interactive IPython session. For scatter/gather operations +between engines, MPI should be used:: In [58]: mec.scatter('a',range(16)) Out[58]: [None, None, None, None] @@ -510,24 +564,12 @@ scatter/gather operations between engines, MPI should be used:: Other things to look at ======================= -Parallel map ------------- - -Python's builtin ``map`` functions allows a function to be applied to a sequence element-by-element. This type of code is typically trivial to parallelize. In fact, the MultiEngine interface in IPython already has a parallel version of ``map`` that works just like its serial counterpart:: - - In [63]: serial_result = map(lambda x:x**10, range(32)) - - In [64]: parallel_result = mec.map(lambda x:x**10, range(32)) - - In [65]: serial_result==parallel_result - Out[65]: True - -As you would expect, the parallel version of ``map`` is also influenced by the ``block`` and ``targets`` keyword arguments and attributes. - How to do parallel list comprehensions -------------------------------------- -In many cases list comprehensions are nicer than using the map function. While we don't have fully parallel list comprehensions, it is simple to get the basic effect using ``scatter`` and ``gather``:: +In many cases list comprehensions are nicer than using the map function. While +we don't have fully parallel list comprehensions, it is simple to get the +basic effect using :meth:`scatter` and :meth:`gather`:: In [66]: mec.scatter('x',range(64)) Out[66]: [None, None, None, None] @@ -547,10 +589,16 @@ In many cases list comprehensions are nicer than using the map function. While In [69]: print y [0, 1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824,...] -Parallel Exceptions +Parallel exceptions ------------------- -In the MultiEngine interface, parallel commands can raise Python exceptions, just like serial commands. But, it is a little subtle, because a single parallel command can actually raise multiple exceptions (one for each engine the command was run on). To express this idea, the MultiEngine interface has a ``CompositeError`` exception class that will be raised in most cases. The ``CompositeError`` class is a special type of exception that wraps one or more other types of exceptions. Here is how it works:: +In the multiengine interface, parallel commands can raise Python exceptions, +just like serial commands. But, it is a little subtle, because a single +parallel command can actually raise multiple exceptions (one for each engine +the command was run on). To express this idea, the MultiEngine interface has a +:exc:`CompositeError` exception class that will be raised in most cases. The +:exc:`CompositeError` class is a special type of exception that wraps one or +more other types of exceptions. Here is how it works:: In [76]: mec.block=True @@ -580,7 +628,7 @@ In the MultiEngine interface, parallel commands can raise Python exceptions, jus [2:execute]: ZeroDivisionError: integer division or modulo by zero [3:execute]: ZeroDivisionError: integer division or modulo by zero -Notice how the error message printed when ``CompositeError`` is raised has information about the individual exceptions that were raised on each engine. If you want, you can even raise one of these original exceptions:: +Notice how the error message printed when :exc:`CompositeError` is raised has information about the individual exceptions that were raised on each engine. If you want, you can even raise one of these original exceptions:: In [80]: try: ....: mec.execute('1/0') @@ -602,7 +650,9 @@ Notice how the error message printed when ``CompositeError`` is raised has infor ZeroDivisionError: integer division or modulo by zero -If you are working in IPython, you can simple type ``%debug`` after one of these ``CompositeError`` is raised, and inspect the exception instance:: +If you are working in IPython, you can simple type ``%debug`` after one of +these :exc:`CompositeError` exceptions is raised, and inspect the exception +instance:: In [81]: mec.execute('1/0') --------------------------------------------------------------------------- @@ -679,6 +729,11 @@ If you are working in IPython, you can simple type ``%debug`` after one of these ZeroDivisionError: integer division or modulo by zero +.. note:: + + The above example appears to be broken right now because of a change in + how we are using Twisted. + All of this same error handling magic even works in non-blocking mode:: In [83]: mec.block=False diff --git a/docs/source/parallel/parallel_task.txt b/docs/source/parallel/parallel_task.txt index 94670c8..a307439 100644 --- a/docs/source/parallel/parallel_task.txt +++ b/docs/source/parallel/parallel_task.txt @@ -1,240 +1,93 @@ .. _paralleltask: -================================= -The IPython Task interface -================================= +========================== +The IPython task interface +========================== .. contents:: -The ``Task`` interface to the controller presents the engines as a fault tolerant, dynamic load-balanced system or workers. Unlike the ``MultiEngine`` interface, in the ``Task`` interface, the user have no direct access to individual engines. In some ways, this interface is simpler, but in other ways it is more powerful. Best of all the user can use both of these interfaces at the same time to take advantage or both of their strengths. When the user can break up the user's work into segments that do not depend on previous execution, the ``Task`` interface is ideal. But it also has more power and flexibility, allowing the user to guide the distribution of jobs, without having to assign Tasks to engines explicitly. +The task interface to the controller presents the engines as a fault tolerant, dynamic load-balanced system or workers. Unlike the multiengine interface, in the task interface, the user have no direct access to individual engines. In some ways, this interface is simpler, but in other ways it is more powerful. + +Best of all the user can use both of these interfaces running at the same time to take advantage or both of their strengths. When the user can break up the user's work into segments that do not depend on previous execution, the task interface is ideal. But it also has more power and flexibility, allowing the user to guide the distribution of jobs, without having to assign tasks to engines explicitly. Starting the IPython controller and engines =========================================== -To follow along with this tutorial, the user will need to start the IPython -controller and four IPython engines. The simplest way of doing this is to -use the ``ipcluster`` command:: +To follow along with this tutorial, you will need to start the IPython +controller and four IPython engines. The simplest way of doing this is to use +the :command:`ipcluster` command:: $ ipcluster -n 4 -For more detailed information about starting the controller and engines, see our :ref:`introduction ` to using IPython for parallel computing. +For more detailed information about starting the controller and engines, see +our :ref:`introduction ` to using IPython for parallel computing. + +Creating a ``TaskClient`` instance +========================================= + +The first step is to import the IPython :mod:`IPython.kernel.client` module +and then create a :class:`TaskClient` instance:: + + In [1]: from IPython.kernel import client + + In [2]: tc = client.TaskClient() + +This form assumes that the :file:`ipcontroller-tc.furl` is in the +:file:`~./ipython/security` directory on the client's host. If not, the +location of the ``.furl`` file must be given as an argument to the +constructor:: + + In[2]: mec = client.TaskClient('/path/to/my/ipcontroller-tc.furl') + +Quick and easy parallelism +========================== + +In many cases, you simply want to apply a Python function to a sequence of objects, but *in parallel*. Like the multiengine interface, the task interface provides two simple ways of accomplishing this: a parallel version of :func:`map` and ``@parallel`` function decorator. However, the verions in the task interface have one important difference: they are dynamically load balanced. Thus, if the execution time per item varies significantly, you should use the versions in the task interface. + +Parallel map +------------ + +The parallel :meth:`map` in the task interface is similar to that in the multiengine interface:: + + In [63]: serial_result = map(lambda x:x**10, range(32)) + + In [64]: parallel_result = tc.map(lambda x:x**10, range(32)) + + In [65]: serial_result==parallel_result + Out[65]: True + +Parallel function decorator +--------------------------- + +Parallel functions are just like normal function, but they can be called on sequences and *in parallel*. The multiengine interface provides a decorator that turns any Python function into a parallel function:: -The magic here is that this single controller and set of engines is running both the MultiEngine and ``Task`` interfaces simultaneously. + In [10]: @tc.parallel() + ....: def f(x): + ....: return 10.0*x**4 + ....: -QuickStart Task Farming -======================= + In [11]: f(range(32)) # this is done in parallel + Out[11]: + [0.0,10.0,160.0,...] -First, a quick example of how to start running the most basic Tasks. -The first step is to import the IPython ``client`` module and then create a ``TaskClient`` instance:: - - In [1]: from IPython.kernel import client - - In [2]: tc = client.TaskClient() +More details +============ -Then the user wrap the commands the user want to run in Tasks:: +The :class:`TaskClient` has many more powerful features that allow quite a bit of flexibility in how tasks are defined and run. The next places to look are in the following classes: - In [3]: tasklist = [] - In [4]: for n in range(1000): - ... tasklist.append(client.Task("a = %i"%n, pull="a")) +* :class:`IPython.kernel.client.TaskClient` +* :class:`IPython.kernel.client.StringTask` +* :class:`IPython.kernel.client.MapTask` -The first argument of the ``Task`` constructor is a string, the command to be executed. The most important optional keyword argument is ``pull``, which can be a string or list of strings, and it specifies the variable names to be saved as results of the ``Task``. +The following is an overview of how to use these classes together: -Next, the user need to submit the Tasks to the ``TaskController`` with the ``TaskClient``:: +1. Create a :class:`TaskClient`. +2. Create one or more instances of :class:`StringTask` or :class:`MapTask` + to define your tasks. +3. Submit your tasks to using the :meth:`run` method of your + :class:`TaskClient` instance. +4. Use :meth:`TaskClient.get_task_result` to get the results of the + tasks. - In [5]: taskids = [ tc.run(t) for t in tasklist ] +We are in the process of developing more detailed information about the task interface. For now, the docstrings of the :class:`TaskClient`, :class:`StringTask` and :class:`MapTask` classes should be consulted. -This will give the user a list of the TaskIDs used by the controller to keep track of the Tasks and their results. Now at some point the user are going to want to get those results back. The ``barrier`` method allows the user to wait for the Tasks to finish running:: - - In [6]: tc.barrier(taskids) - -This command will block until all the Tasks in ``taskids`` have finished. Now, the user probably want to look at the user's results:: - - In [7]: task_results = [ tc.get_task_result(taskid) for taskid in taskids ] - -Now the user have a list of ``TaskResult`` objects, which have the actual result as a dictionary, but also keep track of some useful metadata about the ``Task``:: - - In [8]: tr = ``Task``_results[73] - - In [9]: tr - Out[9]: ``TaskResult``[ID:73]:{'a':73} - - In [10]: tr.engineid - Out[10]: 1 - - In [11]: tr.submitted, tr.completed, tr.duration - Out[11]: ("2008/03/08 03:41:42", "2008/03/08 03:41:44", 2.12345) - -The actual results are stored in a dictionary, ``tr.results``, and a namespace object ``tr.ns`` which accesses the result keys by attribute:: - - In [12]: tr.results['a'] - Out[12]: 73 - - In [13]: tr.ns.a - Out[13]: 73 - -That should cover the basics of running simple Tasks. There are several more powerful things the user can do with Tasks covered later. The most useful probably being using a ``MutiEngineClient`` interface to initialize all the engines with the import dependencies necessary to run the user's Tasks. - -There are many options for running and managing Tasks. The best way to learn further about the ``Task`` interface is to study the examples in ``docs/examples``. If the user do so and learn a lots about this interface, we encourage the user to expand this documentation about the ``Task`` system. - -Overview of the Task System -=========================== - -The user's view of the ``Task`` system has three basic objects: The ``TaskClient``, the ``Task``, and the ``TaskResult``. The names of these three objects well indicate their role. - -The ``TaskClient`` is the user's ``Task`` farming connection to the IPython cluster. Unlike the ``MultiEngineClient``, the ``TaskControler`` handles all the scheduling and distribution of work, so the ``TaskClient`` has no notion of engines, it just submits Tasks and requests their results. The Tasks are described as ``Task`` objects, and their results are wrapped in ``TaskResult`` objects. Thus, there are very few necessary methods for the user to manage. - -Inside the task system is a Scheduler object, which assigns tasks to workers. The default scheduler is a simple FIFO queue. Subclassing the Scheduler should be easy, just implementing your own priority system. - -The TaskClient -============== - -The ``TaskClient`` is the object the user use to connect to the ``Controller`` that is managing the user's Tasks. It is the analog of the ``MultiEngineClient`` for the standard IPython multiplexing interface. As with all client interfaces, the first step is to import the IPython Client Module:: - - In [1]: from IPython.kernel import client - -Just as with the ``MultiEngineClient``, the user create the ``TaskClient`` with a tuple, containing the ip-address and port of the ``Controller``. the ``client`` module conveniently has the default address of the ``Task`` interface of the controller. Creating a default ``TaskClient`` object would be done with this:: - - In [2]: tc = client.TaskClient(client.default_task_address) - -or, if the user want to specify a non default location of the ``Controller``, the user can specify explicitly:: - - In [3]: tc = client.TaskClient(("192.168.1.1", 10113)) - -As discussed earlier, the ``TaskClient`` only has a few basic methods. - - * ``tc.run(task)`` - ``run`` is the method by which the user submits Tasks. It takes exactly one argument, a ``Task`` object. All the advanced control of ``Task`` behavior is handled by properties of the ``Task`` object, rather than the submission command, so they will be discussed later in the `Task`_ section. ``run`` returns an integer, the ``Task``ID by which the ``Task`` and its results can be tracked and retrieved:: - - In [4]: ``Task``ID = tc.run(``Task``) - - * ``tc.get_task_result(taskid, block=``False``)`` - ``get_task_result`` is the method by which results are retrieved. It takes a single integer argument, the ``Task``ID`` of the result the user wish to retrieve. ``get_task_result`` also takes a keyword argument ``block``. ``block`` specifies whether the user actually want to wait for the result. If ``block`` is false, as it is by default, ``get_task_result`` will return immediately. If the ``Task`` has completed, it will return the ``TaskResult`` object for that ``Task``. But if the ``Task`` has not completed, it will return ``None``. If the user specify ``block=``True``, then ``get_task_result`` will wait for the ``Task`` to complete, and always return the ``TaskResult`` for the requested ``Task``. - * ``tc.barrier(taskid(s))`` - ``barrier`` is a synchronization method. It takes exactly one argument, a ``Task``ID or list of taskIDs. ``barrier`` will block until all the specified Tasks have completed. In practice, a barrier is often called between the ``Task`` submission section of the code and the result gathering section:: - - In [5]: taskIDs = [ tc.run(``Task``) for ``Task`` in myTasks ] - - In [6]: tc.get_task_result(taskIDs[-1]) is None - Out[6]: ``True`` - - In [7]: tc.barrier(``Task``ID) - - In [8]: results = [ tc.get_task_result(tid) for tid in taskIDs ] - - * ``tc.queue_status(verbose=``False``)`` - ``queue_status`` is a method for querying the state of the ``TaskControler``. ``queue_status`` returns a dict of the form:: - - {'scheduled': Tasks that have been submitted but yet run - 'pending' : Tasks that are currently running - 'succeeded': Tasks that have completed successfully - 'failed' : Tasks that have finished with a failure - } - - if @verbose is not specified (or is ``False``), then the values of the dict are integers - the number of Tasks in each state. if @verbose is ``True``, then each element in the dict is a list of the taskIDs in that state:: - - In [8]: tc.queue_status() - Out[8]: {'scheduled': 4, - 'pending' : 2, - 'succeeded': 5, - 'failed' : 1 - } - - In [9]: tc.queue_status(verbose=True) - Out[9]: {'scheduled': [8,9,10,11], - 'pending' : [6,7], - 'succeeded': [0,1,2,4,5], - 'failed' : [3] - } - - * ``tc.abort(taskid)`` - ``abort`` allows the user to abort Tasks that have already been submitted. ``abort`` will always return immediately. If the ``Task`` has completed, ``abort`` will raise an ``IndexError ``Task`` Already Completed``. An obvious case for ``abort`` would be where the user submits a long-running ``Task`` with a number of retries (see ``Task``_ section for how to specify retries) in an interactive session, but realizes there has been a typo. The user can then abort the ``Task``, preventing certain failures from cluttering up the queue. It can also be used for parallel search-type problems, where only one ``Task`` will give the solution, so once the user find the solution, the user would want to abort all remaining Tasks to prevent wasted work. - * ``tc.spin()`` - ``spin`` simply triggers the scheduler in the ``TaskControler``. Under most normal circumstances, this will do nothing. The primary known usage case involves the ``Task`` dependency (see `Dependencies`_). The dependency is a function of an Engine's ``properties``, but changing the ``properties`` via the ``MutliEngineClient`` does not trigger a reschedule event. The main example case for this requires the following event sequence: - * ``engine`` is available, ``Task`` is submitted, but ``engine`` does not have ``Task``'s dependencies. - * ``engine`` gets necessary dependencies while no new Tasks are submitted or completed. - * now ``engine`` can run ``Task``, but a ``Task`` event is required for the ``TaskControler`` to try scheduling ``Task`` again. - - ``spin`` is just an empty ping method to ensure that the Controller has scheduled all available Tasks, and should not be needed under most normal circumstances. - -That covers the ``TaskClient``, a simple interface to the cluster. With this, the user can submit jobs (and abort if necessary), request their results, synchronize on arbitrary subsets of jobs. - -.. _task: The Task Object - -The Task Object -=============== - -The ``Task`` is the basic object for describing a job. It can be used in a very simple manner, where the user just specifies a command string to be executed as the ``Task``. The usage of this first argument is exactly the same as the ``execute`` method of the ``MultiEngine`` (in fact, ``execute`` is called to run the code):: - - In [1]: t = client.Task("a = str(id)") - -This ``Task`` would run, and store the string representation of the ``id`` element in ``a`` in each worker's namespace, but it is fairly useless because the user does not know anything about the state of the ``worker`` on which it ran at the time of retrieving results. It is important that each ``Task`` not expect the state of the ``worker`` to persist after the ``Task`` is completed. -There are many different situations for using ``Task`` Farming, and the ``Task`` object has many attributes for use in customizing the ``Task`` behavior. All of a ``Task``'s attributes may be specified in the constructor, through keyword arguments, or after ``Task`` construction through attribute assignment. - -Data Attributes -*************** -It is likely that the user may want to move data around before or after executing the ``Task``. We provide methods of sending data to initialize the worker's namespace, and specifying what data to bring back as the ``Task``'s results. - - * pull = [] - The obvious case is as above, where ``t`` would execute and store the result of ``myfunc`` in ``a``, it is likely that the user would want to bring ``a`` back to their namespace. This is done through the ``pull`` attribute. ``pull`` can be a string or list of strings, and it specifies the names of variables to be retrieved. The ``TaskResult`` object retrieved by ``get_task_result`` will have a dictionary of keys and values, and the ``Task``'s ``pull`` attribute determines what goes into it:: - - In [2]: t = client.Task("a = str(id)", pull = "a") - - In [3]: t = client.Task("a = str(id)", pull = ["a", "id"]) - - * push = {} - A user might also want to initialize some data into the namespace before the code part of the ``Task`` is run. Enter ``push``. ``push`` is a dictionary of key/value pairs to be loaded from the user's namespace into the worker's immediately before execution:: - - In [4]: t = client.Task("a = f(submitted)", push=dict(submitted=time.time()), pull="a") - -push and pull result directly in calling an ``engine``'s ``push`` and ``pull`` methods before and after ``Task`` execution respectively, and thus their api is the same. - -Namespace Cleaning -****************** -When a user is running a large number of Tasks, it is likely that the namespace of the worker's could become cluttered. Some Tasks might be sensitive to clutter, while others might be known to cause namespace pollution. For these reasons, Tasks have two boolean attributes for cleaning up the namespace. - - * ``clear_after`` - if clear_after is specified ``True``, the worker on which the ``Task`` was run will be reset (via ``engine.reset``) upon completion of the ``Task``. This can be useful for both Tasks that produce clutter or Tasks whose intermediate data one might wish to be kept private:: - - In [5]: t = client.Task("a = range(1e10)", pull = "a",clear_after=True) - - - * ``clear_before`` - as one might guess, clear_before is identical to ``clear_after``, but it takes place before the ``Task`` is run. This ensures that the ``Task`` runs on a fresh worker:: - - In [6]: t = client.Task("a = globals()", pull = "a",clear_before=True) - -Of course, a user can both at the same time, ensuring that all workers are clear except when they are currently running a job. Both of these default to ``False``. - -Fault Tolerance -*************** -It is possible that Tasks might fail, and there are a variety of reasons this could happen. One might be that the worker it was running on disconnected, and there was nothing wrong with the ``Task`` itself. With the fault tolerance attributes of the ``Task``, the user can specify how many times to resubmit the ``Task``, and what to do if it never succeeds. - - * ``retries`` - ``retries`` is an integer, specifying the number of times a ``Task`` is to be retried. It defaults to zero. It is often a good idea for this number to be 1 or 2, to protect the ``Task`` from disconnecting engines, but not a large number. If a ``Task`` is failing 100 times, there is probably something wrong with the ``Task``. The canonical bad example: - - In [7]: t = client.Task("os.kill(os.getpid(), 9)", retries=99) - - This would actually take down 100 workers. - - * ``recovery_task`` - ``recovery_task`` is another ``Task`` object, to be run in the event of the original ``Task`` still failing after running out of retries. Since ``recovery_task`` is another ``Task`` object, it can have its own ``recovery_task``. The chain of Tasks is limitless, except loops are not allowed (that would be bad!). - -Dependencies -************ -Dependencies are the most powerful part of the ``Task`` farming system, because it allows the user to do some classification of the workers, and guide the ``Task`` distribution without meddling with the controller directly. It makes use of two objects - the ``Task``'s ``depend`` attribute, and the engine's ``properties``. See the `MultiEngine`_ reference for how to use engine properties. The engine properties api exists for extending IPython, allowing conditional execution and new controllers that make decisions based on properties of its engines. Currently the ``Task`` dependency is the only internal use of the properties api. - -.. _MultiEngine: ./parallel_multiengine - -The ``depend`` attribute of a ``Task`` must be a function of exactly one argument, the worker's properties dictionary, and it should return ``True`` if the ``Task`` should be allowed to run on the worker and ``False`` if not. The usage in the controller is fault tolerant, so exceptions raised by ``Task.depend`` will be ignored and functionally equivalent to always returning ``False``. Tasks`` with invalid ``depend`` functions will never be assigned to a worker:: - - In [8]: def dep(properties): - ... return properties["RAM"] > 2**32 # have at least 4GB - In [9]: t = client.Task("a = bigfunc()", depend=dep) - -It is important to note that assignment of values to the properties dict is done entirely by the user, either locally (in the engine) using the EngineAPI, or remotely, through the ``MultiEngineClient``'s get/set_properties methods. - - - - - - diff --git a/docs/source/parallel/parallel_task_old.txt b/docs/source/parallel/parallel_task_old.txt new file mode 100644 index 0000000..bd7ca1d --- /dev/null +++ b/docs/source/parallel/parallel_task_old.txt @@ -0,0 +1,240 @@ +.. _paralleltask: + +========================== +The IPython task interface +========================== + +.. contents:: + +The ``Task`` interface to the controller presents the engines as a fault tolerant, dynamic load-balanced system or workers. Unlike the ``MultiEngine`` interface, in the ``Task`` interface, the user have no direct access to individual engines. In some ways, this interface is simpler, but in other ways it is more powerful. Best of all the user can use both of these interfaces at the same time to take advantage or both of their strengths. When the user can break up the user's work into segments that do not depend on previous execution, the ``Task`` interface is ideal. But it also has more power and flexibility, allowing the user to guide the distribution of jobs, without having to assign Tasks to engines explicitly. + +Starting the IPython controller and engines +=========================================== + +To follow along with this tutorial, the user will need to start the IPython +controller and four IPython engines. The simplest way of doing this is to +use the ``ipcluster`` command:: + + $ ipcluster -n 4 + +For more detailed information about starting the controller and engines, see our :ref:`introduction ` to using IPython for parallel computing. + +The magic here is that this single controller and set of engines is running both the MultiEngine and ``Task`` interfaces simultaneously. + +QuickStart Task Farming +======================= + +First, a quick example of how to start running the most basic Tasks. +The first step is to import the IPython ``client`` module and then create a ``TaskClient`` instance:: + + In [1]: from IPython.kernel import client + + In [2]: tc = client.TaskClient() + +Then the user wrap the commands the user want to run in Tasks:: + + In [3]: tasklist = [] + In [4]: for n in range(1000): + ... tasklist.append(client.Task("a = %i"%n, pull="a")) + +The first argument of the ``Task`` constructor is a string, the command to be executed. The most important optional keyword argument is ``pull``, which can be a string or list of strings, and it specifies the variable names to be saved as results of the ``Task``. + +Next, the user need to submit the Tasks to the ``TaskController`` with the ``TaskClient``:: + + In [5]: taskids = [ tc.run(t) for t in tasklist ] + +This will give the user a list of the TaskIDs used by the controller to keep track of the Tasks and their results. Now at some point the user are going to want to get those results back. The ``barrier`` method allows the user to wait for the Tasks to finish running:: + + In [6]: tc.barrier(taskids) + +This command will block until all the Tasks in ``taskids`` have finished. Now, the user probably want to look at the user's results:: + + In [7]: task_results = [ tc.get_task_result(taskid) for taskid in taskids ] + +Now the user have a list of ``TaskResult`` objects, which have the actual result as a dictionary, but also keep track of some useful metadata about the ``Task``:: + + In [8]: tr = ``Task``_results[73] + + In [9]: tr + Out[9]: ``TaskResult``[ID:73]:{'a':73} + + In [10]: tr.engineid + Out[10]: 1 + + In [11]: tr.submitted, tr.completed, tr.duration + Out[11]: ("2008/03/08 03:41:42", "2008/03/08 03:41:44", 2.12345) + +The actual results are stored in a dictionary, ``tr.results``, and a namespace object ``tr.ns`` which accesses the result keys by attribute:: + + In [12]: tr.results['a'] + Out[12]: 73 + + In [13]: tr.ns.a + Out[13]: 73 + +That should cover the basics of running simple Tasks. There are several more powerful things the user can do with Tasks covered later. The most useful probably being using a ``MutiEngineClient`` interface to initialize all the engines with the import dependencies necessary to run the user's Tasks. + +There are many options for running and managing Tasks. The best way to learn further about the ``Task`` interface is to study the examples in ``docs/examples``. If the user do so and learn a lots about this interface, we encourage the user to expand this documentation about the ``Task`` system. + +Overview of the Task System +=========================== + +The user's view of the ``Task`` system has three basic objects: The ``TaskClient``, the ``Task``, and the ``TaskResult``. The names of these three objects well indicate their role. + +The ``TaskClient`` is the user's ``Task`` farming connection to the IPython cluster. Unlike the ``MultiEngineClient``, the ``TaskControler`` handles all the scheduling and distribution of work, so the ``TaskClient`` has no notion of engines, it just submits Tasks and requests their results. The Tasks are described as ``Task`` objects, and their results are wrapped in ``TaskResult`` objects. Thus, there are very few necessary methods for the user to manage. + +Inside the task system is a Scheduler object, which assigns tasks to workers. The default scheduler is a simple FIFO queue. Subclassing the Scheduler should be easy, just implementing your own priority system. + +The TaskClient +============== + +The ``TaskClient`` is the object the user use to connect to the ``Controller`` that is managing the user's Tasks. It is the analog of the ``MultiEngineClient`` for the standard IPython multiplexing interface. As with all client interfaces, the first step is to import the IPython Client Module:: + + In [1]: from IPython.kernel import client + +Just as with the ``MultiEngineClient``, the user create the ``TaskClient`` with a tuple, containing the ip-address and port of the ``Controller``. the ``client`` module conveniently has the default address of the ``Task`` interface of the controller. Creating a default ``TaskClient`` object would be done with this:: + + In [2]: tc = client.TaskClient(client.default_task_address) + +or, if the user want to specify a non default location of the ``Controller``, the user can specify explicitly:: + + In [3]: tc = client.TaskClient(("192.168.1.1", 10113)) + +As discussed earlier, the ``TaskClient`` only has a few basic methods. + + * ``tc.run(task)`` + ``run`` is the method by which the user submits Tasks. It takes exactly one argument, a ``Task`` object. All the advanced control of ``Task`` behavior is handled by properties of the ``Task`` object, rather than the submission command, so they will be discussed later in the `Task`_ section. ``run`` returns an integer, the ``Task``ID by which the ``Task`` and its results can be tracked and retrieved:: + + In [4]: ``Task``ID = tc.run(``Task``) + + * ``tc.get_task_result(taskid, block=``False``)`` + ``get_task_result`` is the method by which results are retrieved. It takes a single integer argument, the ``Task``ID`` of the result the user wish to retrieve. ``get_task_result`` also takes a keyword argument ``block``. ``block`` specifies whether the user actually want to wait for the result. If ``block`` is false, as it is by default, ``get_task_result`` will return immediately. If the ``Task`` has completed, it will return the ``TaskResult`` object for that ``Task``. But if the ``Task`` has not completed, it will return ``None``. If the user specify ``block=``True``, then ``get_task_result`` will wait for the ``Task`` to complete, and always return the ``TaskResult`` for the requested ``Task``. + * ``tc.barrier(taskid(s))`` + ``barrier`` is a synchronization method. It takes exactly one argument, a ``Task``ID or list of taskIDs. ``barrier`` will block until all the specified Tasks have completed. In practice, a barrier is often called between the ``Task`` submission section of the code and the result gathering section:: + + In [5]: taskIDs = [ tc.run(``Task``) for ``Task`` in myTasks ] + + In [6]: tc.get_task_result(taskIDs[-1]) is None + Out[6]: ``True`` + + In [7]: tc.barrier(``Task``ID) + + In [8]: results = [ tc.get_task_result(tid) for tid in taskIDs ] + + * ``tc.queue_status(verbose=``False``)`` + ``queue_status`` is a method for querying the state of the ``TaskControler``. ``queue_status`` returns a dict of the form:: + + {'scheduled': Tasks that have been submitted but yet run + 'pending' : Tasks that are currently running + 'succeeded': Tasks that have completed successfully + 'failed' : Tasks that have finished with a failure + } + + if @verbose is not specified (or is ``False``), then the values of the dict are integers - the number of Tasks in each state. if @verbose is ``True``, then each element in the dict is a list of the taskIDs in that state:: + + In [8]: tc.queue_status() + Out[8]: {'scheduled': 4, + 'pending' : 2, + 'succeeded': 5, + 'failed' : 1 + } + + In [9]: tc.queue_status(verbose=True) + Out[9]: {'scheduled': [8,9,10,11], + 'pending' : [6,7], + 'succeeded': [0,1,2,4,5], + 'failed' : [3] + } + + * ``tc.abort(taskid)`` + ``abort`` allows the user to abort Tasks that have already been submitted. ``abort`` will always return immediately. If the ``Task`` has completed, ``abort`` will raise an ``IndexError ``Task`` Already Completed``. An obvious case for ``abort`` would be where the user submits a long-running ``Task`` with a number of retries (see ``Task``_ section for how to specify retries) in an interactive session, but realizes there has been a typo. The user can then abort the ``Task``, preventing certain failures from cluttering up the queue. It can also be used for parallel search-type problems, where only one ``Task`` will give the solution, so once the user find the solution, the user would want to abort all remaining Tasks to prevent wasted work. + * ``tc.spin()`` + ``spin`` simply triggers the scheduler in the ``TaskControler``. Under most normal circumstances, this will do nothing. The primary known usage case involves the ``Task`` dependency (see `Dependencies`_). The dependency is a function of an Engine's ``properties``, but changing the ``properties`` via the ``MutliEngineClient`` does not trigger a reschedule event. The main example case for this requires the following event sequence: + * ``engine`` is available, ``Task`` is submitted, but ``engine`` does not have ``Task``'s dependencies. + * ``engine`` gets necessary dependencies while no new Tasks are submitted or completed. + * now ``engine`` can run ``Task``, but a ``Task`` event is required for the ``TaskControler`` to try scheduling ``Task`` again. + + ``spin`` is just an empty ping method to ensure that the Controller has scheduled all available Tasks, and should not be needed under most normal circumstances. + +That covers the ``TaskClient``, a simple interface to the cluster. With this, the user can submit jobs (and abort if necessary), request their results, synchronize on arbitrary subsets of jobs. + +.. _task: The Task Object + +The Task Object +=============== + +The ``Task`` is the basic object for describing a job. It can be used in a very simple manner, where the user just specifies a command string to be executed as the ``Task``. The usage of this first argument is exactly the same as the ``execute`` method of the ``MultiEngine`` (in fact, ``execute`` is called to run the code):: + + In [1]: t = client.Task("a = str(id)") + +This ``Task`` would run, and store the string representation of the ``id`` element in ``a`` in each worker's namespace, but it is fairly useless because the user does not know anything about the state of the ``worker`` on which it ran at the time of retrieving results. It is important that each ``Task`` not expect the state of the ``worker`` to persist after the ``Task`` is completed. +There are many different situations for using ``Task`` Farming, and the ``Task`` object has many attributes for use in customizing the ``Task`` behavior. All of a ``Task``'s attributes may be specified in the constructor, through keyword arguments, or after ``Task`` construction through attribute assignment. + +Data Attributes +*************** +It is likely that the user may want to move data around before or after executing the ``Task``. We provide methods of sending data to initialize the worker's namespace, and specifying what data to bring back as the ``Task``'s results. + + * pull = [] + The obvious case is as above, where ``t`` would execute and store the result of ``myfunc`` in ``a``, it is likely that the user would want to bring ``a`` back to their namespace. This is done through the ``pull`` attribute. ``pull`` can be a string or list of strings, and it specifies the names of variables to be retrieved. The ``TaskResult`` object retrieved by ``get_task_result`` will have a dictionary of keys and values, and the ``Task``'s ``pull`` attribute determines what goes into it:: + + In [2]: t = client.Task("a = str(id)", pull = "a") + + In [3]: t = client.Task("a = str(id)", pull = ["a", "id"]) + + * push = {} + A user might also want to initialize some data into the namespace before the code part of the ``Task`` is run. Enter ``push``. ``push`` is a dictionary of key/value pairs to be loaded from the user's namespace into the worker's immediately before execution:: + + In [4]: t = client.Task("a = f(submitted)", push=dict(submitted=time.time()), pull="a") + +push and pull result directly in calling an ``engine``'s ``push`` and ``pull`` methods before and after ``Task`` execution respectively, and thus their api is the same. + +Namespace Cleaning +****************** +When a user is running a large number of Tasks, it is likely that the namespace of the worker's could become cluttered. Some Tasks might be sensitive to clutter, while others might be known to cause namespace pollution. For these reasons, Tasks have two boolean attributes for cleaning up the namespace. + + * ``clear_after`` + if clear_after is specified ``True``, the worker on which the ``Task`` was run will be reset (via ``engine.reset``) upon completion of the ``Task``. This can be useful for both Tasks that produce clutter or Tasks whose intermediate data one might wish to be kept private:: + + In [5]: t = client.Task("a = range(1e10)", pull = "a",clear_after=True) + + + * ``clear_before`` + as one might guess, clear_before is identical to ``clear_after``, but it takes place before the ``Task`` is run. This ensures that the ``Task`` runs on a fresh worker:: + + In [6]: t = client.Task("a = globals()", pull = "a",clear_before=True) + +Of course, a user can both at the same time, ensuring that all workers are clear except when they are currently running a job. Both of these default to ``False``. + +Fault Tolerance +*************** +It is possible that Tasks might fail, and there are a variety of reasons this could happen. One might be that the worker it was running on disconnected, and there was nothing wrong with the ``Task`` itself. With the fault tolerance attributes of the ``Task``, the user can specify how many times to resubmit the ``Task``, and what to do if it never succeeds. + + * ``retries`` + ``retries`` is an integer, specifying the number of times a ``Task`` is to be retried. It defaults to zero. It is often a good idea for this number to be 1 or 2, to protect the ``Task`` from disconnecting engines, but not a large number. If a ``Task`` is failing 100 times, there is probably something wrong with the ``Task``. The canonical bad example: + + In [7]: t = client.Task("os.kill(os.getpid(), 9)", retries=99) + + This would actually take down 100 workers. + + * ``recovery_task`` + ``recovery_task`` is another ``Task`` object, to be run in the event of the original ``Task`` still failing after running out of retries. Since ``recovery_task`` is another ``Task`` object, it can have its own ``recovery_task``. The chain of Tasks is limitless, except loops are not allowed (that would be bad!). + +Dependencies +************ +Dependencies are the most powerful part of the ``Task`` farming system, because it allows the user to do some classification of the workers, and guide the ``Task`` distribution without meddling with the controller directly. It makes use of two objects - the ``Task``'s ``depend`` attribute, and the engine's ``properties``. See the `MultiEngine`_ reference for how to use engine properties. The engine properties api exists for extending IPython, allowing conditional execution and new controllers that make decisions based on properties of its engines. Currently the ``Task`` dependency is the only internal use of the properties api. + +.. _MultiEngine: ./parallel_multiengine + +The ``depend`` attribute of a ``Task`` must be a function of exactly one argument, the worker's properties dictionary, and it should return ``True`` if the ``Task`` should be allowed to run on the worker and ``False`` if not. The usage in the controller is fault tolerant, so exceptions raised by ``Task.depend`` will be ignored and functionally equivalent to always returning ``False``. Tasks`` with invalid ``depend`` functions will never be assigned to a worker:: + + In [8]: def dep(properties): + ... return properties["RAM"] > 2**32 # have at least 4GB + In [9]: t = client.Task("a = bigfunc()", depend=dep) + +It is important to note that assignment of values to the properties dict is done entirely by the user, either locally (in the engine) using the EngineAPI, or remotely, through the ``MultiEngineClient``'s get/set_properties methods. + + + + + + diff --git a/win32_manual_post_install.py b/win32_manual_post_install.py deleted file mode 100755 index ea7ad13..0000000 --- a/win32_manual_post_install.py +++ /dev/null @@ -1,141 +0,0 @@ -#!python -"""Windows-specific part of the installation""" - -import os, sys - -try: - import shutil,pythoncom - from win32com.shell import shell - import _winreg as wreg -except ImportError: - print """ -You seem to be missing the PythonWin extensions necessary for automatic -installation. You can get them (free) from -http://starship.python.net/crew/mhammond/ - -Please see the manual for details if you want to finish the installation by -hand, or get PythonWin and repeat the procedure. - -Press to exit this installer.""" - raw_input() - sys.exit() - - -def make_shortcut(fname,target,args='',start_in='',comment='',icon=None): - """Make a Windows shortcut (.lnk) file. - - make_shortcut(fname,target,args='',start_in='',comment='',icon=None) - - Arguments: - fname - name of the final shortcut file (include the .lnk) - target - what the shortcut will point to - args - additional arguments to pass to the target program - start_in - directory where the target command will be called - comment - for the popup tooltips - icon - optional icon file. This must be a tuple of the type - (icon_file,index), where index is the index of the icon you want - in the file. For single .ico files, index=0, but for icon libraries - contained in a single file it can be >0. - """ - - shortcut = pythoncom.CoCreateInstance( - shell.CLSID_ShellLink, None, - pythoncom.CLSCTX_INPROC_SERVER, shell.IID_IShellLink - ) - shortcut.SetPath(target) - shortcut.SetArguments(args) - shortcut.SetWorkingDirectory(start_in) - shortcut.SetDescription(comment) - if icon: - shortcut.SetIconLocation(*icon) - shortcut.QueryInterface(pythoncom.IID_IPersistFile).Save(fname,0) - - -def run(wait=0): - # Find where the Start Menu and My Documents are on the filesystem - key = wreg.OpenKey(wreg.HKEY_CURRENT_USER, - r'Software\Microsoft\Windows\CurrentVersion' - r'\Explorer\Shell Folders') - - programs_dir = wreg.QueryValueEx(key,'Programs')[0] - my_documents_dir = wreg.QueryValueEx(key,'Personal')[0] - key.Close() - - # Find where the 'program files' directory is - key = wreg.OpenKey(wreg.HKEY_LOCAL_MACHINE, - r'SOFTWARE\Microsoft\Windows\CurrentVersion') - - program_files_dir = wreg.QueryValueEx(key,'ProgramFilesDir')[0] - key.Close() - - - # File and directory names - ip_dir = program_files_dir + r'\IPython' - ip_prog_dir = programs_dir + r'\IPython' - doc_dir = ip_dir+r'\docs' - ip_filename = ip_dir+r'\IPython_shell.py' - pycon_icon = doc_dir+r'\pycon.ico' - - if not os.path.isdir(ip_dir): - os.mkdir(ip_dir) - - # Copy startup script and documentation - shutil.copy(sys.prefix+r'\Scripts\ipython',ip_filename) - if os.path.isdir(doc_dir): - shutil.rmtree(doc_dir) - shutil.copytree('docs',doc_dir) - - # make shortcuts for IPython, html and pdf docs. - print 'Making entries for IPython in Start Menu...', - - # Create .bat file in \Scripts - fic = open(sys.prefix + r'\Scripts\ipython.bat','w') - fic.write('"' + sys.prefix + r'\python.exe' + '" -i ' + '"' + - sys.prefix + r'\Scripts\ipython" %*') - fic.close() - - # Create .bat file in \\Scripts - fic = open(sys.prefix + '\\Scripts\\ipython.bat','w') - fic.write('"' + sys.prefix + '\\python.exe' + '" -i ' + '"' + sys.prefix + '\\Scripts\ipython" %*') - fic.close() - - # Create shortcuts in Programs\IPython: - if not os.path.isdir(ip_prog_dir): - os.mkdir(ip_prog_dir) - os.chdir(ip_prog_dir) - - man_pdf = doc_dir + r'\dist\ipython.pdf' - man_htm = doc_dir + r'\dist\index.html' - - make_shortcut('IPython.lnk',sys.executable, '"%s"' % ip_filename, - my_documents_dir, - 'IPython - Enhanced python command line interpreter', - (pycon_icon,0)) - make_shortcut('pysh.lnk',sys.executable, '"%s" -p pysh' % ip_filename, - my_documents_dir, - 'pysh - a system shell with Python syntax (IPython based)', - (pycon_icon,0)) - make_shortcut('Manual in HTML format.lnk',man_htm,'','', - 'IPython Manual - HTML format') - make_shortcut('Manual in PDF format.lnk',man_pdf,'','', - 'IPython Manual - PDF format') - - print """Done. - -I created the directory %s. There you will find the -IPython startup script and manuals. - -An IPython menu was also created in your Start Menu, with entries for -IPython itself and the manual in HTML and PDF formats. - -For reading PDF documents you need the freely available Adobe Acrobat -Reader. If you don't have it, you can download it from: -http://www.adobe.com/products/acrobat/readstep2.html -""" % ip_dir - - if wait: - print "Finished with IPython installation. Press Enter to exit this installer.", - raw_input() - -if __name__ == '__main__': - run()