diff --git a/IPython/config/default/ipcluster_config.py b/IPython/config/default/ipcluster_config.py
index 36aa453..26dbd1b 100644
--- a/IPython/config/default/ipcluster_config.py
+++ b/IPython/config/default/ipcluster_config.py
@@ -11,8 +11,8 @@ c = get_config()
# - Start as a regular process on localhost.
# - Start using mpiexec.
# - Start using the Windows HPC Server 2008 scheduler
-# - Start using PBS
-# - Start using SSH (currently broken)
+# - Start using PBS/SGE
+# - Start using SSH
# The selected launchers can be configured below.
@@ -21,15 +21,18 @@ c = get_config()
# - LocalControllerLauncher
# - MPIExecControllerLauncher
# - PBSControllerLauncher
+# - SGEControllerLauncher
# - WindowsHPCControllerLauncher
-# c.Global.controller_launcher = 'IPython.kernel.launcher.LocalControllerLauncher'
+# c.Global.controller_launcher = 'IPython.parallel.launcher.LocalControllerLauncher'
+# c.Global.controller_launcher = 'IPython.parallel.launcher.PBSControllerLauncher'
# Options are:
# - LocalEngineSetLauncher
# - MPIExecEngineSetLauncher
# - PBSEngineSetLauncher
+# - SGEEngineSetLauncher
# - WindowsHPCEngineSetLauncher
-# c.Global.engine_launcher = 'IPython.kernel.launcher.LocalEngineSetLauncher'
+# c.Global.engine_launcher = 'IPython.parallel.launcher.LocalEngineSetLauncher'
#-----------------------------------------------------------------------------
# Global configuration
@@ -68,23 +71,23 @@ c = get_config()
# MPIExec launchers
#-----------------------------------------------------------------------------
-# The mpiexec/mpirun command to use in started the controller.
-# c.MPIExecControllerLauncher.mpi_cmd = ['mpiexec']
+# The mpiexec/mpirun command to use in both the controller and engines.
+# c.MPIExecLauncher.mpi_cmd = ['mpiexec']
# Additional arguments to pass to the actual mpiexec command.
+# c.MPIExecLauncher.mpi_args = []
+
+# The mpiexec/mpirun command and args can be overridden if they should be different
+# for controller and engines.
+# c.MPIExecControllerLauncher.mpi_cmd = ['mpiexec']
# c.MPIExecControllerLauncher.mpi_args = []
+# c.MPIExecEngineSetLauncher.mpi_cmd = ['mpiexec']
+# c.MPIExecEngineSetLauncher.mpi_args = []
# The command line argument to call the controller with.
# c.MPIExecControllerLauncher.controller_args = \
# ['--log-to-file','--log-level', '40']
-
-# The mpiexec/mpirun command to use in started the controller.
-# c.MPIExecEngineSetLauncher.mpi_cmd = ['mpiexec']
-
-# Additional arguments to pass to the actual mpiexec command.
-# c.MPIExecEngineSetLauncher.mpi_args = []
-
# Command line argument passed to the engines.
# c.MPIExecEngineSetLauncher.engine_args = ['--log-to-file','--log-level', '40']
@@ -95,51 +98,105 @@ c = get_config()
# SSH launchers
#-----------------------------------------------------------------------------
-# Todo
+# ipclusterz can be used to launch controller and engines remotely via ssh.
+# Note that currently ipclusterz does not do any file distribution, so if
+# machines are not on a shared filesystem, config and json files must be
+# distributed. For this reason, the reuse_files defaults to True on an
+# ssh-launched Controller. This flag can be overridded by the program_args
+# attribute of c.SSHControllerLauncher.
+
+# set the ssh cmd for launching remote commands. The default is ['ssh']
+# c.SSHLauncher.ssh_cmd = ['ssh']
+
+# set the ssh cmd for launching remote commands. The default is ['ssh']
+# c.SSHLauncher.ssh_args = ['tt']
+
+# Set the user and hostname for the controller
+# c.SSHControllerLauncher.hostname = 'controller.example.com'
+# c.SSHControllerLauncher.user = os.environ.get('USER','username')
+
+# Set the arguments to be passed to ipcontrollerz
+# note that remotely launched ipcontrollerz will not get the contents of
+# the local ipcontrollerz_config.py unless it resides on the *remote host*
+# in the location specified by the --cluster_dir argument.
+# c.SSHControllerLauncher.program_args = ['-r', '-ip', '0.0.0.0', '--cluster_dir', '/path/to/cd']
+
+# Set the default args passed to ipenginez for SSH launched engines
+# c.SSHEngineSetLauncher.engine_args = ['--mpi', 'mpi4py']
+# SSH engines are launched as a dict of locations/n-engines.
+# if a value is a tuple instead of an int, it is assumed to be of the form
+# (n, [args]), setting the arguments to passed to ipenginez on `host`.
+# otherwise, c.SSHEngineSetLauncher.engine_args will be used as the default.
+
+# In this case, there will be 3 engines at my.example.com, and
+# 2 at you@ipython.scipy.org with a special json connector location.
+# c.SSHEngineSetLauncher.engines = {'my.example.com' : 3,
+# 'you@ipython.scipy.org' : (2, ['-f', '/path/to/ipcontroller-engine.json']}
+# }
#-----------------------------------------------------------------------------
# Unix batch (PBS) schedulers launchers
#-----------------------------------------------------------------------------
+# SGE and PBS are very similar. All configurables in this section called 'PBS*'
+# also exist as 'SGE*'.
+
# The command line program to use to submit a PBS job.
-# c.PBSControllerLauncher.submit_command = 'qsub'
+# c.PBSLauncher.submit_command = ['qsub']
# The command line program to use to delete a PBS job.
-# c.PBSControllerLauncher.delete_command = 'qdel'
+# c.PBSLauncher.delete_command = ['qdel']
+
+# The PBS queue in which the job should run
+# c.PBSLauncher.queue = 'myqueue'
# A regular expression that takes the output of qsub and find the job id.
-# c.PBSControllerLauncher.job_id_regexp = r'\d+'
+# c.PBSLauncher.job_id_regexp = r'\d+'
+
+# If for some reason the Controller and Engines have different options above, they
+# can be set as c.PBSControllerLauncher. etc.
+
+# PBS and SGE have default templates, but you can specify your own, either as strings
+# or from files, as described here:
# The batch submission script used to start the controller. This is where
-# environment variables would be setup, etc. This string is interpolated using
+# environment variables would be setup, etc. This string is interpreted using
# the Itpl module in IPython.external. Basically, you can use ${n} for the
# number of engine and ${cluster_dir} for the cluster_dir.
-# c.PBSControllerLauncher.batch_template = """"""
+# c.PBSControllerLauncher.batch_template = """
+# #PBS -N ipcontroller
+# #PBS -q $queue
+#
+# ipcontrollerz --cluster-dir $cluster_dir
+# """
+
+# You can also load this template from a file
+# c.PBSControllerLauncher.batch_template_file = u"/path/to/my/template.sh"
# The name of the instantiated batch script that will actually be used to
# submit the job. This will be written to the cluster directory.
-# c.PBSControllerLauncher.batch_file_name = u'pbs_batch_script_controller'
-
-
-# The command line program to use to submit a PBS job.
-# c.PBSEngineSetLauncher.submit_command = 'qsub'
-
-# The command line program to use to delete a PBS job.
-# c.PBSEngineSetLauncher.delete_command = 'qdel'
-
-# A regular expression that takes the output of qsub and find the job id.
-# c.PBSEngineSetLauncher.job_id_regexp = r'\d+'
+# c.PBSControllerLauncher.batch_file_name = u'pbs_controller'
# The batch submission script used to start the engines. This is where
-# environment variables would be setup, etc. This string is interpolated using
+# environment variables would be setup, etc. This string is interpreted using
# the Itpl module in IPython.external. Basically, you can use ${n} for the
# number of engine and ${cluster_dir} for the cluster_dir.
-# c.PBSEngineSetLauncher.batch_template = """"""
+# c.PBSEngineSetLauncher.batch_template = """
+# #PBS -N ipcontroller
+# #PBS -l nprocs=$n
+#
+# ipenginez --cluster-dir $cluster_dir$s
+# """
+
+# You can also load this template from a file
+# c.PBSControllerLauncher.batch_template_file = u"/path/to/my/template.sh"
# The name of the instantiated batch script that will actually be used to
# submit the job. This will be written to the cluster directory.
-# c.PBSEngineSetLauncher.batch_file_name = u'pbs_batch_script_engines'
+# c.PBSEngineSetLauncher.batch_file_name = u'pbs_engines'
+
+
#-----------------------------------------------------------------------------
# Windows HPC Server 2008 launcher configuration
diff --git a/IPython/config/default/ipclusterz_config.py b/IPython/config/default/ipclusterz_config.py
deleted file mode 100644
index 26dbd1b..0000000
--- a/IPython/config/default/ipclusterz_config.py
+++ /dev/null
@@ -1,241 +0,0 @@
-import os
-
-c = get_config()
-
-#-----------------------------------------------------------------------------
-# Select which launchers to use
-#-----------------------------------------------------------------------------
-
-# This allows you to control what method is used to start the controller
-# and engines. The following methods are currently supported:
-# - Start as a regular process on localhost.
-# - Start using mpiexec.
-# - Start using the Windows HPC Server 2008 scheduler
-# - Start using PBS/SGE
-# - Start using SSH
-
-
-# The selected launchers can be configured below.
-
-# Options are:
-# - LocalControllerLauncher
-# - MPIExecControllerLauncher
-# - PBSControllerLauncher
-# - SGEControllerLauncher
-# - WindowsHPCControllerLauncher
-# c.Global.controller_launcher = 'IPython.parallel.launcher.LocalControllerLauncher'
-# c.Global.controller_launcher = 'IPython.parallel.launcher.PBSControllerLauncher'
-
-# Options are:
-# - LocalEngineSetLauncher
-# - MPIExecEngineSetLauncher
-# - PBSEngineSetLauncher
-# - SGEEngineSetLauncher
-# - WindowsHPCEngineSetLauncher
-# c.Global.engine_launcher = 'IPython.parallel.launcher.LocalEngineSetLauncher'
-
-#-----------------------------------------------------------------------------
-# Global configuration
-#-----------------------------------------------------------------------------
-
-# The default number of engines that will be started. This is overridden by
-# the -n command line option: "ipcluster start -n 4"
-# c.Global.n = 2
-
-# Log to a file in cluster_dir/log, otherwise just log to sys.stdout.
-# c.Global.log_to_file = False
-
-# Remove old logs from cluster_dir/log before starting.
-# c.Global.clean_logs = True
-
-# The working directory for the process. The application will use os.chdir
-# to change to this directory before starting.
-# c.Global.work_dir = os.getcwd()
-
-
-#-----------------------------------------------------------------------------
-# Local process launchers
-#-----------------------------------------------------------------------------
-
-# The command line arguments to call the controller with.
-# c.LocalControllerLauncher.controller_args = \
-# ['--log-to-file','--log-level', '40']
-
-# The working directory for the controller
-# c.LocalEngineSetLauncher.work_dir = u''
-
-# Command line argument passed to the engines.
-# c.LocalEngineSetLauncher.engine_args = ['--log-to-file','--log-level', '40']
-
-#-----------------------------------------------------------------------------
-# MPIExec launchers
-#-----------------------------------------------------------------------------
-
-# The mpiexec/mpirun command to use in both the controller and engines.
-# c.MPIExecLauncher.mpi_cmd = ['mpiexec']
-
-# Additional arguments to pass to the actual mpiexec command.
-# c.MPIExecLauncher.mpi_args = []
-
-# The mpiexec/mpirun command and args can be overridden if they should be different
-# for controller and engines.
-# c.MPIExecControllerLauncher.mpi_cmd = ['mpiexec']
-# c.MPIExecControllerLauncher.mpi_args = []
-# c.MPIExecEngineSetLauncher.mpi_cmd = ['mpiexec']
-# c.MPIExecEngineSetLauncher.mpi_args = []
-
-# The command line argument to call the controller with.
-# c.MPIExecControllerLauncher.controller_args = \
-# ['--log-to-file','--log-level', '40']
-
-# Command line argument passed to the engines.
-# c.MPIExecEngineSetLauncher.engine_args = ['--log-to-file','--log-level', '40']
-
-# The default number of engines to start if not given elsewhere.
-# c.MPIExecEngineSetLauncher.n = 1
-
-#-----------------------------------------------------------------------------
-# SSH launchers
-#-----------------------------------------------------------------------------
-
-# ipclusterz can be used to launch controller and engines remotely via ssh.
-# Note that currently ipclusterz does not do any file distribution, so if
-# machines are not on a shared filesystem, config and json files must be
-# distributed. For this reason, the reuse_files defaults to True on an
-# ssh-launched Controller. This flag can be overridded by the program_args
-# attribute of c.SSHControllerLauncher.
-
-# set the ssh cmd for launching remote commands. The default is ['ssh']
-# c.SSHLauncher.ssh_cmd = ['ssh']
-
-# set the ssh cmd for launching remote commands. The default is ['ssh']
-# c.SSHLauncher.ssh_args = ['tt']
-
-# Set the user and hostname for the controller
-# c.SSHControllerLauncher.hostname = 'controller.example.com'
-# c.SSHControllerLauncher.user = os.environ.get('USER','username')
-
-# Set the arguments to be passed to ipcontrollerz
-# note that remotely launched ipcontrollerz will not get the contents of
-# the local ipcontrollerz_config.py unless it resides on the *remote host*
-# in the location specified by the --cluster_dir argument.
-# c.SSHControllerLauncher.program_args = ['-r', '-ip', '0.0.0.0', '--cluster_dir', '/path/to/cd']
-
-# Set the default args passed to ipenginez for SSH launched engines
-# c.SSHEngineSetLauncher.engine_args = ['--mpi', 'mpi4py']
-
-# SSH engines are launched as a dict of locations/n-engines.
-# if a value is a tuple instead of an int, it is assumed to be of the form
-# (n, [args]), setting the arguments to passed to ipenginez on `host`.
-# otherwise, c.SSHEngineSetLauncher.engine_args will be used as the default.
-
-# In this case, there will be 3 engines at my.example.com, and
-# 2 at you@ipython.scipy.org with a special json connector location.
-# c.SSHEngineSetLauncher.engines = {'my.example.com' : 3,
-# 'you@ipython.scipy.org' : (2, ['-f', '/path/to/ipcontroller-engine.json']}
-# }
-
-#-----------------------------------------------------------------------------
-# Unix batch (PBS) schedulers launchers
-#-----------------------------------------------------------------------------
-
-# SGE and PBS are very similar. All configurables in this section called 'PBS*'
-# also exist as 'SGE*'.
-
-# The command line program to use to submit a PBS job.
-# c.PBSLauncher.submit_command = ['qsub']
-
-# The command line program to use to delete a PBS job.
-# c.PBSLauncher.delete_command = ['qdel']
-
-# The PBS queue in which the job should run
-# c.PBSLauncher.queue = 'myqueue'
-
-# A regular expression that takes the output of qsub and find the job id.
-# c.PBSLauncher.job_id_regexp = r'\d+'
-
-# If for some reason the Controller and Engines have different options above, they
-# can be set as c.PBSControllerLauncher. etc.
-
-# PBS and SGE have default templates, but you can specify your own, either as strings
-# or from files, as described here:
-
-# The batch submission script used to start the controller. This is where
-# environment variables would be setup, etc. This string is interpreted using
-# the Itpl module in IPython.external. Basically, you can use ${n} for the
-# number of engine and ${cluster_dir} for the cluster_dir.
-# c.PBSControllerLauncher.batch_template = """
-# #PBS -N ipcontroller
-# #PBS -q $queue
-#
-# ipcontrollerz --cluster-dir $cluster_dir
-# """
-
-# You can also load this template from a file
-# c.PBSControllerLauncher.batch_template_file = u"/path/to/my/template.sh"
-
-# The name of the instantiated batch script that will actually be used to
-# submit the job. This will be written to the cluster directory.
-# c.PBSControllerLauncher.batch_file_name = u'pbs_controller'
-
-# The batch submission script used to start the engines. This is where
-# environment variables would be setup, etc. This string is interpreted using
-# the Itpl module in IPython.external. Basically, you can use ${n} for the
-# number of engine and ${cluster_dir} for the cluster_dir.
-# c.PBSEngineSetLauncher.batch_template = """
-# #PBS -N ipcontroller
-# #PBS -l nprocs=$n
-#
-# ipenginez --cluster-dir $cluster_dir$s
-# """
-
-# You can also load this template from a file
-# c.PBSControllerLauncher.batch_template_file = u"/path/to/my/template.sh"
-
-# The name of the instantiated batch script that will actually be used to
-# submit the job. This will be written to the cluster directory.
-# c.PBSEngineSetLauncher.batch_file_name = u'pbs_engines'
-
-
-
-#-----------------------------------------------------------------------------
-# Windows HPC Server 2008 launcher configuration
-#-----------------------------------------------------------------------------
-
-# c.IPControllerJob.job_name = 'IPController'
-# c.IPControllerJob.is_exclusive = False
-# c.IPControllerJob.username = r'USERDOMAIN\USERNAME'
-# c.IPControllerJob.priority = 'Highest'
-# c.IPControllerJob.requested_nodes = ''
-# c.IPControllerJob.project = 'MyProject'
-
-# c.IPControllerTask.task_name = 'IPController'
-# c.IPControllerTask.controller_cmd = [u'ipcontroller.exe']
-# c.IPControllerTask.controller_args = ['--log-to-file', '--log-level', '40']
-# c.IPControllerTask.environment_variables = {}
-
-# c.WindowsHPCControllerLauncher.scheduler = 'HEADNODE'
-# c.WindowsHPCControllerLauncher.job_file_name = u'ipcontroller_job.xml'
-
-
-# c.IPEngineSetJob.job_name = 'IPEngineSet'
-# c.IPEngineSetJob.is_exclusive = False
-# c.IPEngineSetJob.username = r'USERDOMAIN\USERNAME'
-# c.IPEngineSetJob.priority = 'Highest'
-# c.IPEngineSetJob.requested_nodes = ''
-# c.IPEngineSetJob.project = 'MyProject'
-
-# c.IPEngineTask.task_name = 'IPEngine'
-# c.IPEngineTask.engine_cmd = [u'ipengine.exe']
-# c.IPEngineTask.engine_args = ['--log-to-file', '--log-level', '40']
-# c.IPEngineTask.environment_variables = {}
-
-# c.WindowsHPCEngineSetLauncher.scheduler = 'HEADNODE'
-# c.WindowsHPCEngineSetLauncher.job_file_name = u'ipengineset_job.xml'
-
-
-
-
-
-
-
diff --git a/IPython/config/default/ipcontroller_config.py b/IPython/config/default/ipcontroller_config.py
index c1d0bce..adf2878 100644
--- a/IPython/config/default/ipcontroller_config.py
+++ b/IPython/config/default/ipcontroller_config.py
@@ -25,112 +25,156 @@ c = get_config()
# be imported in the controller for pickling to work.
# c.Global.import_statements = ['import math']
-# Reuse the controller's FURL files. If False, FURL files are regenerated
+# Reuse the controller's JSON files. If False, JSON files are regenerated
# each time the controller is run. If True, they will be reused, *but*, you
# also must set the network ports by hand. If set, this will override the
# values set for the client and engine connections below.
-# c.Global.reuse_furls = True
+# c.Global.reuse_files = True
-# Enable SSL encryption on all connections to the controller. If set, this
-# will override the values set for the client and engine connections below.
+# Enable exec_key authentication on all messages. Default is True
# c.Global.secure = True
# The working directory for the process. The application will use os.chdir
# to change to this directory before starting.
# c.Global.work_dir = os.getcwd()
+# The log url for logging to an `iploggerz` application. This will override
+# log-to-file.
+# c.Global.log_url = 'tcp://127.0.0.1:20202'
+
+# The specific external IP that is used to disambiguate multi-interface URLs.
+# The default behavior is to guess from external IPs gleaned from `socket`.
+# c.Global.location = '192.168.1.123'
+
+# The ssh server remote clients should use to connect to this controller.
+# It must be a machine that can see the interface specified in client_ip.
+# The default for client_ip is localhost, in which case the sshserver must
+# be an external IP of the controller machine.
+# c.Global.sshserver = 'controller.example.com'
+
+# the url to use for registration. If set, this overrides engine-ip,
+# engine-transport client-ip,client-transport, and regport.
+# c.RegistrationFactory.url = 'tcp://*:12345'
+
+# the port to use for registration. Clients and Engines both use this
+# port for registration.
+# c.RegistrationFactory.regport = 10101
+
#-----------------------------------------------------------------------------
-# Configure the client services
+# Configure the Task Scheduler
#-----------------------------------------------------------------------------
-# Basic client service config attributes
+# The routing scheme. 'pure' will use the pure-ZMQ scheduler. Any other
+# value will use a Python scheduler with various routing schemes.
+# python schemes are: lru, weighted, random, twobin. Default is 'weighted'.
+# Note that the pure ZMQ scheduler does not support many features, such as
+# dying engines, dependencies, or engine-subset load-balancing.
+# c.ControllerFactory.scheme = 'pure'
-# The network interface the controller will listen on for client connections.
-# This should be an IP address or hostname of the controller's host. The empty
-# string means listen on all interfaces.
-# c.FCClientServiceFactory.ip = ''
+# The pure ZMQ scheduler can limit the number of outstanding tasks per engine
+# by using the ZMQ HWM option. This allows engines with long-running tasks
+# to not steal too many tasks from other engines. The default is 0, which
+# means agressively distribute messages, never waiting for them to finish.
+# c.ControllerFactory.hwm = 1
-# The TCP/IP port the controller will listen on for client connections. If 0
-# a random port will be used. If the controller's host has a firewall running
-# it must allow incoming traffic on this port.
-# c.FCClientServiceFactory.port = 0
+# Whether to use Threads or Processes to start the Schedulers. Threads will
+# use less resources, but potentially reduce throughput. Default is to
+# use processes. Note that the a Python scheduler will always be in a Process.
+# c.ControllerFactory.usethreads
-# The client learns how to connect to the controller by looking at the
-# location field embedded in the FURL. If this field is empty, all network
-# interfaces that the controller is listening on will be listed. To have the
-# client connect on a particular interface, list it here.
-# c.FCClientServiceFactory.location = ''
+#-----------------------------------------------------------------------------
+# Configure the Hub
+#-----------------------------------------------------------------------------
+
+# Which class to use for the db backend. Currently supported are DictDB (the
+# default), and MongoDB. Uncomment this line to enable MongoDB, which will
+# slow-down the Hub's responsiveness, but also reduce its memory footprint.
+# c.HubFactory.db_class = 'IPython.parallel.mongodb.MongoDB'
-# Use SSL encryption for the client connection.
-# c.FCClientServiceFactory.secure = True
+# The heartbeat ping frequency. This is the frequency (in ms) at which the
+# Hub pings engines for heartbeats. This determines how quickly the Hub
+# will react to engines coming and going. A lower number means faster response
+# time, but more network activity. The default is 100ms
+# c.HubFactory.ping = 100
-# Reuse the client FURL each time the controller is started. If set, you must
-# also pick a specific network port above (FCClientServiceFactory.port).
-# c.FCClientServiceFactory.reuse_furls = False
+# HubFactory queue port pairs, to set by name: mux, iopub, control, task. Set
+# each as a tuple of length 2 of ints. The default is to find random
+# available ports
+# c.HubFactory.mux = (10102,10112)
#-----------------------------------------------------------------------------
-# Configure the engine services
+# Configure the client connections
#-----------------------------------------------------------------------------
-# Basic config attributes for the engine services.
+# Basic client connection config attributes
-# The network interface the controller will listen on for engine connections.
-# This should be an IP address or hostname of the controller's host. The empty
-# string means listen on all interfaces.
-# c.FCEngineServiceFactory.ip = ''
+# The network interface the controller will listen on for client connections.
+# This should be an IP address or interface on the controller. An asterisk
+# means listen on all interfaces. The transport can be any transport
+# supported by zeromq (tcp,epgm,pgm,ib,ipc):
+# c.HubFactory.client_ip = '*'
+# c.HubFactory.client_transport = 'tcp'
-# The TCP/IP port the controller will listen on for engine connections. If 0
-# a random port will be used. If the controller's host has a firewall running
-# it must allow incoming traffic on this port.
-# c.FCEngineServiceFactory.port = 0
+# individual client ports to configure by name: query_port, notifier_port
+# c.HubFactory.query_port = 12345
-# The engine learns how to connect to the controller by looking at the
-# location field embedded in the FURL. If this field is empty, all network
-# interfaces that the controller is listening on will be listed. To have the
-# client connect on a particular interface, list it here.
-# c.FCEngineServiceFactory.location = ''
+#-----------------------------------------------------------------------------
+# Configure the engine connections
+#-----------------------------------------------------------------------------
-# Use SSL encryption for the engine connection.
-# c.FCEngineServiceFactory.secure = True
+# Basic config attributes for the engine connections.
-# Reuse the client FURL each time the controller is started. If set, you must
-# also pick a specific network port above (FCClientServiceFactory.port).
-# c.FCEngineServiceFactory.reuse_furls = False
+# The network interface the controller will listen on for engine connections.
+# This should be an IP address or interface on the controller. An asterisk
+# means listen on all interfaces. The transport can be any transport
+# supported by zeromq (tcp,epgm,pgm,ib,ipc):
+# c.HubFactory.engine_ip = '*'
+# c.HubFactory.engine_transport = 'tcp'
+
+# set the engine heartbeat ports to use:
+# c.HubFactory.hb = (10303,10313)
#-----------------------------------------------------------------------------
-# Developer level configuration attributes
+# Configure the TaskRecord database backend
#-----------------------------------------------------------------------------
-# You shouldn't have to modify anything in this section. These attributes
-# are more for developers who want to change the behavior of the controller
-# at a fundamental level.
-
-# c.FCClientServiceFactory.cert_file = u'ipcontroller-client.pem'
-
-# default_client_interfaces = Config()
-# default_client_interfaces.Task.interface_chain = [
-# 'IPython.kernel.task.ITaskController',
-# 'IPython.kernel.taskfc.IFCTaskController'
-# ]
-#
-# default_client_interfaces.Task.furl_file = u'ipcontroller-tc.furl'
-#
-# default_client_interfaces.MultiEngine.interface_chain = [
-# 'IPython.kernel.multiengine.IMultiEngine',
-# 'IPython.kernel.multienginefc.IFCSynchronousMultiEngine'
-# ]
-#
-# default_client_interfaces.MultiEngine.furl_file = u'ipcontroller-mec.furl'
-#
-# c.FCEngineServiceFactory.interfaces = default_client_interfaces
-
-# c.FCEngineServiceFactory.cert_file = u'ipcontroller-engine.pem'
-
-# default_engine_interfaces = Config()
-# default_engine_interfaces.Default.interface_chain = [
-# 'IPython.kernel.enginefc.IFCControllerBase'
-# ]
-#
-# default_engine_interfaces.Default.furl_file = u'ipcontroller-engine.furl'
-#
-# c.FCEngineServiceFactory.interfaces = default_engine_interfaces
+# For memory/persistance reasons, tasks can be stored out-of-memory in a database.
+# Currently, only sqlite and mongodb are supported as backends, but the interface
+# is fairly simple, so advanced developers could write their own backend.
+
+# ----- in-memory configuration --------
+# this line restores the default behavior: in-memory storage of all results.
+# c.HubFactory.db_class = 'IPython.parallel.dictdb.DictDB'
+
+# ----- sqlite configuration --------
+# use this line to activate sqlite:
+# c.HubFactory.db_class = 'IPython.parallel.sqlitedb.SQLiteDB'
+
+# You can specify the name of the db-file. By default, this will be located
+# in the active cluster_dir, e.g. ~/.ipython/clusterz_default/tasks.db
+# c.SQLiteDB.filename = 'tasks.db'
+
+# You can also specify the location of the db-file, if you want it to be somewhere
+# other than the cluster_dir.
+# c.SQLiteDB.location = '/scratch/'
+
+# This will specify the name of the table for the controller to use. The default
+# behavior is to use the session ID of the SessionFactory object (a uuid). Overriding
+# this will result in results persisting for multiple sessions.
+# c.SQLiteDB.table = 'results'
+
+# ----- mongodb configuration --------
+# use this line to activate mongodb:
+# c.HubFactory.db_class = 'IPython.parallel.mongodb.MongoDB'
+
+# You can specify the args and kwargs pymongo will use when creating the Connection.
+# For more information on what these options might be, see pymongo documentation.
+# c.MongoDB.connection_kwargs = {}
+# c.MongoDB.connection_args = []
+
+# This will specify the name of the mongo database for the controller to use. The default
+# behavior is to use the session ID of the SessionFactory object (a uuid). Overriding
+# this will result in task results persisting through multiple sessions.
+# c.MongoDB.database = 'ipythondb'
+
+
diff --git a/IPython/config/default/ipcontrollerz_config.py b/IPython/config/default/ipcontrollerz_config.py
deleted file mode 100644
index adf2878..0000000
--- a/IPython/config/default/ipcontrollerz_config.py
+++ /dev/null
@@ -1,180 +0,0 @@
-from IPython.config.loader import Config
-
-c = get_config()
-
-#-----------------------------------------------------------------------------
-# Global configuration
-#-----------------------------------------------------------------------------
-
-# Basic Global config attributes
-
-# Start up messages are logged to stdout using the logging module.
-# These all happen before the twisted reactor is started and are
-# useful for debugging purposes. Can be (10=DEBUG,20=INFO,30=WARN,40=CRITICAL)
-# and smaller is more verbose.
-# c.Global.log_level = 20
-
-# Log to a file in cluster_dir/log, otherwise just log to sys.stdout.
-# c.Global.log_to_file = False
-
-# Remove old logs from cluster_dir/log before starting.
-# c.Global.clean_logs = True
-
-# A list of Python statements that will be run before starting the
-# controller. This is provided because occasionally certain things need to
-# be imported in the controller for pickling to work.
-# c.Global.import_statements = ['import math']
-
-# Reuse the controller's JSON files. If False, JSON files are regenerated
-# each time the controller is run. If True, they will be reused, *but*, you
-# also must set the network ports by hand. If set, this will override the
-# values set for the client and engine connections below.
-# c.Global.reuse_files = True
-
-# Enable exec_key authentication on all messages. Default is True
-# c.Global.secure = True
-
-# The working directory for the process. The application will use os.chdir
-# to change to this directory before starting.
-# c.Global.work_dir = os.getcwd()
-
-# The log url for logging to an `iploggerz` application. This will override
-# log-to-file.
-# c.Global.log_url = 'tcp://127.0.0.1:20202'
-
-# The specific external IP that is used to disambiguate multi-interface URLs.
-# The default behavior is to guess from external IPs gleaned from `socket`.
-# c.Global.location = '192.168.1.123'
-
-# The ssh server remote clients should use to connect to this controller.
-# It must be a machine that can see the interface specified in client_ip.
-# The default for client_ip is localhost, in which case the sshserver must
-# be an external IP of the controller machine.
-# c.Global.sshserver = 'controller.example.com'
-
-# the url to use for registration. If set, this overrides engine-ip,
-# engine-transport client-ip,client-transport, and regport.
-# c.RegistrationFactory.url = 'tcp://*:12345'
-
-# the port to use for registration. Clients and Engines both use this
-# port for registration.
-# c.RegistrationFactory.regport = 10101
-
-#-----------------------------------------------------------------------------
-# Configure the Task Scheduler
-#-----------------------------------------------------------------------------
-
-# The routing scheme. 'pure' will use the pure-ZMQ scheduler. Any other
-# value will use a Python scheduler with various routing schemes.
-# python schemes are: lru, weighted, random, twobin. Default is 'weighted'.
-# Note that the pure ZMQ scheduler does not support many features, such as
-# dying engines, dependencies, or engine-subset load-balancing.
-# c.ControllerFactory.scheme = 'pure'
-
-# The pure ZMQ scheduler can limit the number of outstanding tasks per engine
-# by using the ZMQ HWM option. This allows engines with long-running tasks
-# to not steal too many tasks from other engines. The default is 0, which
-# means agressively distribute messages, never waiting for them to finish.
-# c.ControllerFactory.hwm = 1
-
-# Whether to use Threads or Processes to start the Schedulers. Threads will
-# use less resources, but potentially reduce throughput. Default is to
-# use processes. Note that the a Python scheduler will always be in a Process.
-# c.ControllerFactory.usethreads
-
-#-----------------------------------------------------------------------------
-# Configure the Hub
-#-----------------------------------------------------------------------------
-
-# Which class to use for the db backend. Currently supported are DictDB (the
-# default), and MongoDB. Uncomment this line to enable MongoDB, which will
-# slow-down the Hub's responsiveness, but also reduce its memory footprint.
-# c.HubFactory.db_class = 'IPython.parallel.mongodb.MongoDB'
-
-# The heartbeat ping frequency. This is the frequency (in ms) at which the
-# Hub pings engines for heartbeats. This determines how quickly the Hub
-# will react to engines coming and going. A lower number means faster response
-# time, but more network activity. The default is 100ms
-# c.HubFactory.ping = 100
-
-# HubFactory queue port pairs, to set by name: mux, iopub, control, task. Set
-# each as a tuple of length 2 of ints. The default is to find random
-# available ports
-# c.HubFactory.mux = (10102,10112)
-
-#-----------------------------------------------------------------------------
-# Configure the client connections
-#-----------------------------------------------------------------------------
-
-# Basic client connection config attributes
-
-# The network interface the controller will listen on for client connections.
-# This should be an IP address or interface on the controller. An asterisk
-# means listen on all interfaces. The transport can be any transport
-# supported by zeromq (tcp,epgm,pgm,ib,ipc):
-# c.HubFactory.client_ip = '*'
-# c.HubFactory.client_transport = 'tcp'
-
-# individual client ports to configure by name: query_port, notifier_port
-# c.HubFactory.query_port = 12345
-
-#-----------------------------------------------------------------------------
-# Configure the engine connections
-#-----------------------------------------------------------------------------
-
-# Basic config attributes for the engine connections.
-
-# The network interface the controller will listen on for engine connections.
-# This should be an IP address or interface on the controller. An asterisk
-# means listen on all interfaces. The transport can be any transport
-# supported by zeromq (tcp,epgm,pgm,ib,ipc):
-# c.HubFactory.engine_ip = '*'
-# c.HubFactory.engine_transport = 'tcp'
-
-# set the engine heartbeat ports to use:
-# c.HubFactory.hb = (10303,10313)
-
-#-----------------------------------------------------------------------------
-# Configure the TaskRecord database backend
-#-----------------------------------------------------------------------------
-
-# For memory/persistance reasons, tasks can be stored out-of-memory in a database.
-# Currently, only sqlite and mongodb are supported as backends, but the interface
-# is fairly simple, so advanced developers could write their own backend.
-
-# ----- in-memory configuration --------
-# this line restores the default behavior: in-memory storage of all results.
-# c.HubFactory.db_class = 'IPython.parallel.dictdb.DictDB'
-
-# ----- sqlite configuration --------
-# use this line to activate sqlite:
-# c.HubFactory.db_class = 'IPython.parallel.sqlitedb.SQLiteDB'
-
-# You can specify the name of the db-file. By default, this will be located
-# in the active cluster_dir, e.g. ~/.ipython/clusterz_default/tasks.db
-# c.SQLiteDB.filename = 'tasks.db'
-
-# You can also specify the location of the db-file, if you want it to be somewhere
-# other than the cluster_dir.
-# c.SQLiteDB.location = '/scratch/'
-
-# This will specify the name of the table for the controller to use. The default
-# behavior is to use the session ID of the SessionFactory object (a uuid). Overriding
-# this will result in results persisting for multiple sessions.
-# c.SQLiteDB.table = 'results'
-
-# ----- mongodb configuration --------
-# use this line to activate mongodb:
-# c.HubFactory.db_class = 'IPython.parallel.mongodb.MongoDB'
-
-# You can specify the args and kwargs pymongo will use when creating the Connection.
-# For more information on what these options might be, see pymongo documentation.
-# c.MongoDB.connection_kwargs = {}
-# c.MongoDB.connection_args = []
-
-# This will specify the name of the mongo database for the controller to use. The default
-# behavior is to use the session ID of the SessionFactory object (a uuid). Overriding
-# this will result in task results persisting through multiple sessions.
-# c.MongoDB.database = 'ipythondb'
-
-
diff --git a/IPython/config/default/ipengine_config.py b/IPython/config/default/ipengine_config.py
index 42483ed..402f7fd 100644
--- a/IPython/config/default/ipengine_config.py
+++ b/IPython/config/default/ipengine_config.py
@@ -29,10 +29,10 @@ c = get_config()
# c.Global.connect_delay = 0.1
# c.Global.connect_max_tries = 15
-# By default, the engine will look for the controller's FURL file in its own
-# cluster directory. Sometimes, the FURL file will be elsewhere and this
-# attribute can be set to the full path of the FURL file.
-# c.Global.furl_file = u''
+# By default, the engine will look for the controller's JSON file in its own
+# cluster directory. Sometimes, the JSON file will be elsewhere and this
+# attribute can be set to the full path of the JSON file.
+# c.Global.url_file = u'/path/to/my/ipcontroller-engine.json'
# The working directory for the process. The application will use os.chdir
# to change to this directory before starting.
@@ -78,12 +78,7 @@ c = get_config()
# You should not have to change these attributes.
-# c.Global.shell_class = 'IPython.kernel.core.interpreter.Interpreter'
-
-# c.Global.furl_file_name = u'ipcontroller-engine.furl'
-
-
-
+# c.Global.url_file_name = u'ipcontroller-engine.furl'
diff --git a/IPython/config/default/ipenginez_config.py b/IPython/config/default/ipenginez_config.py
deleted file mode 100644
index 402f7fd..0000000
--- a/IPython/config/default/ipenginez_config.py
+++ /dev/null
@@ -1,85 +0,0 @@
-c = get_config()
-
-#-----------------------------------------------------------------------------
-# Global configuration
-#-----------------------------------------------------------------------------
-
-# Start up messages are logged to stdout using the logging module.
-# These all happen before the twisted reactor is started and are
-# useful for debugging purposes. Can be (10=DEBUG,20=INFO,30=WARN,40=CRITICAL)
-# and smaller is more verbose.
-# c.Global.log_level = 20
-
-# Log to a file in cluster_dir/log, otherwise just log to sys.stdout.
-# c.Global.log_to_file = False
-
-# Remove old logs from cluster_dir/log before starting.
-# c.Global.clean_logs = True
-
-# A list of strings that will be executed in the users namespace on the engine
-# before it connects to the controller.
-# c.Global.exec_lines = ['import numpy']
-
-# The engine will try to connect to the controller multiple times, to allow
-# the controller time to startup and write its FURL file. These parameters
-# control the number of retries (connect_max_tries) and the initial delay
-# (connect_delay) between attemps. The actual delay between attempts gets
-# longer each time by a factor of 1.5 (delay[i] = 1.5*delay[i-1])
-# those attemps.
-# c.Global.connect_delay = 0.1
-# c.Global.connect_max_tries = 15
-
-# By default, the engine will look for the controller's JSON file in its own
-# cluster directory. Sometimes, the JSON file will be elsewhere and this
-# attribute can be set to the full path of the JSON file.
-# c.Global.url_file = u'/path/to/my/ipcontroller-engine.json'
-
-# The working directory for the process. The application will use os.chdir
-# to change to this directory before starting.
-# c.Global.work_dir = os.getcwd()
-
-#-----------------------------------------------------------------------------
-# MPI configuration
-#-----------------------------------------------------------------------------
-
-# Upon starting the engine can be configured to call MPI_Init. This section
-# configures that.
-
-# Select which MPI section to execute to setup MPI. The value of this
-# attribute must match the name of another attribute in the MPI config
-# section (mpi4py, pytrilinos, etc.). This can also be set by the --mpi
-# command line option.
-# c.MPI.use = ''
-
-# Initialize MPI using mpi4py. To use this, set c.MPI.use = 'mpi4py' to use
-# --mpi=mpi4py at the command line.
-# c.MPI.mpi4py = """from mpi4py import MPI as mpi
-# mpi.size = mpi.COMM_WORLD.Get_size()
-# mpi.rank = mpi.COMM_WORLD.Get_rank()
-# """
-
-# Initialize MPI using pytrilinos. To use this, set c.MPI.use = 'pytrilinos'
-# to use --mpi=pytrilinos at the command line.
-# c.MPI.pytrilinos = """from PyTrilinos import Epetra
-# class SimpleStruct:
-# pass
-# mpi = SimpleStruct()
-# mpi.rank = 0
-# mpi.size = 0
-# """
-
-#-----------------------------------------------------------------------------
-# Developer level configuration attributes
-#-----------------------------------------------------------------------------
-
-# You shouldn't have to modify anything in this section. These attributes
-# are more for developers who want to change the behavior of the controller
-# at a fundamental level.
-
-# You should not have to change these attributes.
-
-# c.Global.url_file_name = u'ipcontroller-engine.furl'
-
-
-
-
diff --git a/IPython/parallel/clusterdir.py b/IPython/parallel/clusterdir.py
index b61aa1f..b3f958c 100755
--- a/IPython/parallel/clusterdir.py
+++ b/IPython/parallel/clusterdir.py
@@ -138,8 +138,8 @@ class ClusterDir(Configurable):
def copy_all_config_files(self, path=None, overwrite=False):
"""Copy all config files into the active cluster directory."""
- for f in [u'ipcontrollerz_config.py', u'ipenginez_config.py',
- u'ipclusterz_config.py']:
+ for f in [u'ipcontroller_config.py', u'ipengine_config.py',
+ u'ipcluster_config.py']:
self.copy_config_file(f, path=path, overwrite=overwrite)
@classmethod
@@ -164,11 +164,11 @@ class ClusterDir(Configurable):
The path (directory) to put the cluster directory in.
profile : str
The name of the profile. The name of the cluster directory will
- be "clusterz_".
+ be "cluster_".
"""
if not os.path.isdir(path):
raise ClusterDirError('Directory not found: %s' % path)
- cluster_dir = os.path.join(path, u'clusterz_' + profile)
+ cluster_dir = os.path.join(path, u'cluster_' + profile)
return ClusterDir(location=cluster_dir)
@classmethod
@@ -190,9 +190,9 @@ class ClusterDir(Configurable):
The IPython directory to use.
profile : unicode or str
The name of the profile. The name of the cluster directory
- will be "clusterz_".
+ will be "cluster_".
"""
- dirname = u'clusterz_' + profile
+ dirname = u'cluster_' + profile
cluster_dir_paths = os.environ.get('IPCLUSTER_DIR_PATH','')
if cluster_dir_paths:
cluster_dir_paths = cluster_dir_paths.split(':')
diff --git a/IPython/parallel/ipclusterapp.py b/IPython/parallel/ipclusterapp.py
index c57e788..7c94f6f 100755
--- a/IPython/parallel/ipclusterapp.py
+++ b/IPython/parallel/ipclusterapp.py
@@ -37,7 +37,7 @@ from IPython.parallel.clusterdir import (
#-----------------------------------------------------------------------------
-default_config_file_name = u'ipclusterz_config.py'
+default_config_file_name = u'ipcluster_config.py'
_description = """\
@@ -47,9 +47,9 @@ An IPython cluster consists of 1 controller and 1 or more engines.
This command automates the startup of these processes using a wide
range of startup methods (SSH, local processes, PBS, mpiexec,
Windows HPC Server 2008). To start a cluster with 4 engines on your
-local host simply do 'ipclusterz start -n 4'. For more complex usage
-you will typically do 'ipclusterz create -p mycluster', then edit
-configuration files, followed by 'ipclusterz start -p mycluster -n 4'.
+local host simply do 'ipcluster start -n 4'. For more complex usage
+you will typically do 'ipcluster create -p mycluster', then edit
+configuration files, followed by 'ipcluster start -p mycluster -n 4'.
"""
@@ -108,9 +108,9 @@ class IPClusterAppConfigLoader(ClusterDirConfigLoader):
title='ipcluster subcommands',
description=
"""ipcluster has a variety of subcommands. The general way of
- running ipcluster is 'ipclusterz [options]'. To get help
- on a particular subcommand do 'ipclusterz -h'."""
- # help="For more help, type 'ipclusterz -h'",
+ running ipcluster is 'ipcluster [options]'. To get help
+ on a particular subcommand do 'ipcluster -h'."""
+ # help="For more help, type 'ipcluster -h'",
)
# The "list" subcommand parser
@@ -123,7 +123,7 @@ class IPClusterAppConfigLoader(ClusterDirConfigLoader):
"""List all available clusters, by cluster directory, that can
be found in the current working directly or in the ipython
directory. Cluster directories are named using the convention
- 'clusterz_'."""
+ 'cluster_'."""
)
# The "create" subcommand parser
@@ -136,13 +136,13 @@ class IPClusterAppConfigLoader(ClusterDirConfigLoader):
"""Create an ipython cluster directory by its profile name or
cluster directory path. Cluster directories contain
configuration, log and security related files and are named
- using the convention 'clusterz_'. By default they are
+ using the convention 'cluster_'. By default they are
located in your ipython directory. Once created, you will
probably need to edit the configuration files in the cluster
directory to configure your cluster. Most users will create a
cluster directory by profile name,
- 'ipclusterz create -p mycluster', which will put the directory
- in '/clusterz_mycluster'.
+ 'ipcluster create -p mycluster', which will put the directory
+ in '/cluster_mycluster'.
"""
)
paa = parser_create.add_argument
@@ -162,10 +162,10 @@ class IPClusterAppConfigLoader(ClusterDirConfigLoader):
"""Start an ipython cluster by its profile name or cluster
directory. Cluster directories contain configuration, log and
security related files and are named using the convention
- 'clusterz_' and should be creating using the 'start'
+ 'cluster_' and should be creating using the 'start'
subcommand of 'ipcluster'. If your cluster directory is in
the cwd or the ipython directory, you can simply refer to it
- using its profile name, 'ipclusterz start -n 4 -p `,
+ using its profile name, 'ipcluster start -n 4 -p `,
otherwise use the '--cluster-dir' option.
"""
)
@@ -200,9 +200,9 @@ class IPClusterAppConfigLoader(ClusterDirConfigLoader):
description=
"""Stop a running ipython cluster by its profile name or cluster
directory. Cluster directories are named using the convention
- 'clusterz_'. If your cluster directory is in
+ 'cluster_'. If your cluster directory is in
the cwd or the ipython directory, you can simply refer to it
- using its profile name, 'ipclusterz stop -p `, otherwise
+ using its profile name, 'ipcluster stop -p `, otherwise
use the '--cluster-dir' option.
"""
)
@@ -223,10 +223,10 @@ class IPClusterAppConfigLoader(ClusterDirConfigLoader):
by profile name or cluster directory.
Cluster directories contain configuration, log and
security related files and are named using the convention
- 'clusterz_' and should be creating using the 'start'
+ 'cluster_' and should be creating using the 'start'
subcommand of 'ipcluster'. If your cluster directory is in
the cwd or the ipython directory, you can simply refer to it
- using its profile name, 'ipclusterz engines -n 4 -p `,
+ using its profile name, 'ipcluster engines -n 4 -p `,
otherwise use the '--cluster-dir' option.
"""
)
@@ -249,7 +249,7 @@ class IPClusterAppConfigLoader(ClusterDirConfigLoader):
class IPClusterApp(ApplicationWithClusterDir):
- name = u'ipclusterz'
+ name = u'ipcluster'
description = _description
usage = None
command_line_loader = IPClusterAppConfigLoader
@@ -286,8 +286,8 @@ class IPClusterApp(ApplicationWithClusterDir):
except ClusterDirError:
raise ClusterDirError(
"Could not find a cluster directory. A cluster dir must "
- "be created before running 'ipclusterz start'. Do "
- "'ipclusterz create -h' or 'ipclusterz list -h' for more "
+ "be created before running 'ipcluster start'. Do "
+ "'ipcluster create -h' or 'ipcluster list -h' for more "
"information about creating and listing cluster dirs."
)
elif subcommand=='engines':
@@ -297,8 +297,8 @@ class IPClusterApp(ApplicationWithClusterDir):
except ClusterDirError:
raise ClusterDirError(
"Could not find a cluster directory. A cluster dir must "
- "be created before running 'ipclusterz start'. Do "
- "'ipclusterz create -h' or 'ipclusterz list -h' for more "
+ "be created before running 'ipcluster start'. Do "
+ "'ipcluster create -h' or 'ipcluster list -h' for more "
"information about creating and listing cluster dirs."
)
@@ -322,9 +322,9 @@ class IPClusterApp(ApplicationWithClusterDir):
files = os.listdir(path)
for f in files:
full_path = os.path.join(path, f)
- if os.path.isdir(full_path) and f.startswith('clusterz_'):
+ if os.path.isdir(full_path) and f.startswith('cluster_'):
profile = full_path.split('_')[-1]
- start_cmd = 'ipclusterz start -p %s -n 4' % profile
+ start_cmd = 'ipcluster start -p %s -n 4' % profile
print start_cmd + " ==> " + full_path
def pre_construct(self):
@@ -498,7 +498,7 @@ class IPClusterApp(ApplicationWithClusterDir):
else:
self.log.critical(
'Cluster is already running with [pid=%s]. '
- 'use "ipclusterz stop" to stop the cluster.' % pid
+ 'use "ipcluster stop" to stop the cluster.' % pid
)
# Here I exit with a unusual exit status that other processes
# can watch for to learn how I existed.
@@ -506,7 +506,7 @@ class IPClusterApp(ApplicationWithClusterDir):
# Now log and daemonize
self.log.info(
- 'Starting ipclusterz with [daemon=%r]' % config.Global.daemonize
+ 'Starting ipcluster with [daemon=%r]' % config.Global.daemonize
)
# TODO: Get daemonize working on Windows or as a Windows Server.
if config.Global.daemonize:
diff --git a/IPython/parallel/ipcontrollerapp.py b/IPython/parallel/ipcontrollerapp.py
index 705396a..bd40247 100755
--- a/IPython/parallel/ipcontrollerapp.py
+++ b/IPython/parallel/ipcontrollerapp.py
@@ -48,7 +48,7 @@ from IPython.utils.traitlets import Instance, Unicode
#: The default config file name for this application
-default_config_file_name = u'ipcontrollerz_config.py'
+default_config_file_name = u'ipcontroller_config.py'
_description = """Start the IPython controller for parallel computing.
@@ -57,7 +57,7 @@ The IPython controller provides a gateway between the IPython engines and
clients. The controller needs to be started before the engines and can be
configured using command line options or using a cluster directory. Cluster
directories contain config, log and security files and are usually located in
-your ipython directory and named as "clusterz_". See the --profile
+your ipython directory and named as "cluster_". See the --profile
and --cluster-dir options for details.
"""
@@ -251,7 +251,7 @@ class IPControllerAppConfigLoader(ClusterDirConfigLoader):
class IPControllerApp(ApplicationWithClusterDir):
- name = u'ipcontrollerz'
+ name = u'ipcontroller'
description = _description
command_line_loader = IPControllerAppConfigLoader
default_config_file_name = default_config_file_name
diff --git a/IPython/parallel/ipengineapp.py b/IPython/parallel/ipengineapp.py
index 6cf0562..3aa1ded 100755
--- a/IPython/parallel/ipengineapp.py
+++ b/IPython/parallel/ipengineapp.py
@@ -40,7 +40,7 @@ from IPython.utils.importstring import import_item
#-----------------------------------------------------------------------------
#: The default config file name for this application
-default_config_file_name = u'ipenginez_config.py'
+default_config_file_name = u'ipengine_config.py'
mpi4py_init = """from mpi4py import MPI as mpi
@@ -64,7 +64,7 @@ IPython engines run in parallel and perform computations on behalf of a client
and controller. A controller needs to be started before the engines. The
engine can be configured using command line options or using a cluster
directory. Cluster directories contain config, log and security files and are
-usually located in your ipython directory and named as "clusterz_".
+usually located in your ipython directory and named as "cluster_".
See the --profile and --cluster-dir options for details.
"""
@@ -124,7 +124,7 @@ class IPEngineAppConfigLoader(ClusterDirConfigLoader):
class IPEngineApp(ApplicationWithClusterDir):
- name = u'ipenginez'
+ name = u'ipengine'
description = _description
command_line_loader = IPEngineAppConfigLoader
default_config_file_name = default_config_file_name
diff --git a/IPython/parallel/iploggerapp.py b/IPython/parallel/iploggerapp.py
index 816bf04..4661ff3 100755
--- a/IPython/parallel/iploggerapp.py
+++ b/IPython/parallel/iploggerapp.py
@@ -39,7 +39,7 @@ IPython controllers and engines (and your own processes) can broadcast log messa
by registering a `zmq.log.handlers.PUBHandler` with the `logging` module. The
logger can be configured using command line options or using a cluster
directory. Cluster directories contain config, log and security files and are
-usually located in your ipython directory and named as "clusterz_".
+usually located in your ipython directory and named as "cluster_".
See the --profile and --cluster-dir options for details.
"""
diff --git a/IPython/parallel/launcher.py b/IPython/parallel/launcher.py
index 4069e9b..73cff17 100644
--- a/IPython/parallel/launcher.py
+++ b/IPython/parallel/launcher.py
@@ -63,15 +63,15 @@ except ImportError:
#-----------------------------------------------------------------------------
-ipclusterz_cmd_argv = pycmd2argv(get_ipython_module_path(
+ipcluster_cmd_argv = pycmd2argv(get_ipython_module_path(
'IPython.parallel.ipclusterapp'
))
-ipenginez_cmd_argv = pycmd2argv(get_ipython_module_path(
+ipengine_cmd_argv = pycmd2argv(get_ipython_module_path(
'IPython.parallel.ipengineapp'
))
-ipcontrollerz_cmd_argv = pycmd2argv(get_ipython_module_path(
+ipcontroller_cmd_argv = pycmd2argv(get_ipython_module_path(
'IPython.parallel.ipcontrollerapp'
))
@@ -304,7 +304,7 @@ class LocalProcessLauncher(BaseLauncher):
class LocalControllerLauncher(LocalProcessLauncher):
"""Launch a controller as a regular external process."""
- controller_cmd = List(ipcontrollerz_cmd_argv, config=True)
+ controller_cmd = List(ipcontroller_cmd_argv, config=True)
# Command line arguments to ipcontroller.
controller_args = List(['--log-to-file','--log-level', str(logging.INFO)], config=True)
@@ -322,7 +322,7 @@ class LocalControllerLauncher(LocalProcessLauncher):
class LocalEngineLauncher(LocalProcessLauncher):
"""Launch a single engine as a regular externall process."""
- engine_cmd = List(ipenginez_cmd_argv, config=True)
+ engine_cmd = List(ipengine_cmd_argv, config=True)
# Command line arguments for ipengine.
engine_args = List(
['--log-to-file','--log-level', str(logging.INFO)], config=True
@@ -443,7 +443,7 @@ class MPIExecLauncher(LocalProcessLauncher):
class MPIExecControllerLauncher(MPIExecLauncher):
"""Launch a controller using mpiexec."""
- controller_cmd = List(ipcontrollerz_cmd_argv, config=True)
+ controller_cmd = List(ipcontroller_cmd_argv, config=True)
# Command line arguments to ipcontroller.
controller_args = List(['--log-to-file','--log-level', str(logging.INFO)], config=True)
n = Int(1, config=False)
@@ -462,7 +462,7 @@ class MPIExecControllerLauncher(MPIExecLauncher):
class MPIExecEngineSetLauncher(MPIExecLauncher):
- program = List(ipenginez_cmd_argv, config=True)
+ program = List(ipengine_cmd_argv, config=True)
# Command line arguments for ipengine.
program_args = List(
['--log-to-file','--log-level', str(logging.INFO)], config=True
@@ -531,13 +531,13 @@ class SSHLauncher(LocalProcessLauncher):
class SSHControllerLauncher(SSHLauncher):
- program = List(ipcontrollerz_cmd_argv, config=True)
+ program = List(ipcontroller_cmd_argv, config=True)
# Command line arguments to ipcontroller.
program_args = List(['-r', '--log-to-file','--log-level', str(logging.INFO)], config=True)
class SSHEngineLauncher(SSHLauncher):
- program = List(ipenginez_cmd_argv, config=True)
+ program = List(ipengine_cmd_argv, config=True)
# Command line arguments for ipengine.
program_args = List(
['--log-to-file','--log-level', str(logging.INFO)], config=True
@@ -883,9 +883,9 @@ class PBSControllerLauncher(PBSLauncher):
batch_file_name = CUnicode(u'pbs_controller', config=True)
default_template= CUnicode("""#!/bin/sh
#PBS -V
-#PBS -N ipcontrollerz
+#PBS -N ipcontroller
%s --log-to-file --cluster-dir $cluster_dir
-"""%(' '.join(ipcontrollerz_cmd_argv)))
+"""%(' '.join(ipcontroller_cmd_argv)))
def start(self, cluster_dir):
"""Start the controller by profile or cluster_dir."""
@@ -898,9 +898,9 @@ class PBSEngineSetLauncher(PBSLauncher):
batch_file_name = CUnicode(u'pbs_engines', config=True)
default_template= CUnicode(u"""#!/bin/sh
#PBS -V
-#PBS -N ipenginez
+#PBS -N ipengine
%s --cluster-dir $cluster_dir
-"""%(' '.join(ipenginez_cmd_argv)))
+"""%(' '.join(ipengine_cmd_argv)))
def start(self, n, cluster_dir):
"""Start n engines by profile or cluster_dir."""
@@ -922,9 +922,9 @@ class SGEControllerLauncher(SGELauncher):
batch_file_name = CUnicode(u'sge_controller', config=True)
default_template= CUnicode(u"""#$$ -V
#$$ -S /bin/sh
-#$$ -N ipcontrollerz
+#$$ -N ipcontroller
%s --log-to-file --cluster-dir $cluster_dir
-"""%(' '.join(ipcontrollerz_cmd_argv)))
+"""%(' '.join(ipcontroller_cmd_argv)))
def start(self, cluster_dir):
"""Start the controller by profile or cluster_dir."""
@@ -936,9 +936,9 @@ class SGEEngineSetLauncher(SGELauncher):
batch_file_name = CUnicode(u'sge_engines', config=True)
default_template = CUnicode("""#$$ -V
#$$ -S /bin/sh
-#$$ -N ipenginez
+#$$ -N ipengine
%s --cluster-dir $cluster_dir
-"""%(' '.join(ipenginez_cmd_argv)))
+"""%(' '.join(ipengine_cmd_argv)))
def start(self, n, cluster_dir):
"""Start n engines by profile or cluster_dir."""
@@ -954,7 +954,7 @@ class SGEEngineSetLauncher(SGELauncher):
class IPClusterLauncher(LocalProcessLauncher):
"""Launch the ipcluster program in an external process."""
- ipcluster_cmd = List(ipclusterz_cmd_argv, config=True)
+ ipcluster_cmd = List(ipcluster_cmd_argv, config=True)
# Command line arguments to pass to ipcluster.
ipcluster_args = List(
['--clean-logs', '--log-to-file', '--log-level', str(logging.INFO)], config=True)
diff --git a/IPython/parallel/logwatcher.py b/IPython/parallel/logwatcher.py
index bcf3495..51735f4 100644
--- a/IPython/parallel/logwatcher.py
+++ b/IPython/parallel/logwatcher.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-"""A simple logger object that consolidates messages incoming from ipclusterz processes."""
+"""A simple logger object that consolidates messages incoming from ipcluster processes."""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
diff --git a/IPython/parallel/scripts/ipclusterz b/IPython/parallel/scripts/ipcluster
similarity index 100%
rename from IPython/parallel/scripts/ipclusterz
rename to IPython/parallel/scripts/ipcluster
diff --git a/IPython/parallel/scripts/ipcontrollerz b/IPython/parallel/scripts/ipcontroller
similarity index 100%
rename from IPython/parallel/scripts/ipcontrollerz
rename to IPython/parallel/scripts/ipcontroller
diff --git a/IPython/parallel/scripts/ipenginez b/IPython/parallel/scripts/ipengine
similarity index 100%
rename from IPython/parallel/scripts/ipenginez
rename to IPython/parallel/scripts/ipengine
diff --git a/IPython/parallel/scripts/iploggerz b/IPython/parallel/scripts/iplogger
similarity index 100%
rename from IPython/parallel/scripts/iploggerz
rename to IPython/parallel/scripts/iplogger
diff --git a/IPython/parallel/tests/__init__.py b/IPython/parallel/tests/__init__.py
index c8e4a26..8299cf2 100644
--- a/IPython/parallel/tests/__init__.py
+++ b/IPython/parallel/tests/__init__.py
@@ -23,7 +23,7 @@ blackhole = tempfile.TemporaryFile()
# nose setup/teardown
def setup():
- cp = Popen('ipcontrollerz --profile iptest -r --log-level 10 --log-to-file'.split(), stdout=blackhole, stderr=STDOUT)
+ cp = Popen('ipcontroller --profile iptest -r --log-level 10 --log-to-file'.split(), stdout=blackhole, stderr=STDOUT)
processes.append(cp)
time.sleep(.5)
add_engines(1)
@@ -38,7 +38,7 @@ def add_engines(n=1, profile='iptest'):
base = len(rc)
eps = []
for i in range(n):
- ep = Popen(['ipenginez']+ ['--profile', profile, '--log-level', '10', '--log-to-file'], stdout=blackhole, stderr=STDOUT)
+ ep = Popen(['ipengine']+ ['--profile', profile, '--log-level', '10', '--log-to-file'], stdout=blackhole, stderr=STDOUT)
# ep.start()
processes.append(ep)
eps.append(ep)
diff --git a/IPython/parallel/tests/test_newserialized.py b/IPython/parallel/tests/test_newserialized.py
index e57533f..3c3e5e6 100644
--- a/IPython/parallel/tests/test_newserialized.py
+++ b/IPython/parallel/tests/test_newserialized.py
@@ -13,7 +13,7 @@
from unittest import TestCase
-from IPython.testing.parametric import parametric
+from IPython.testing.decorators import parametric
from IPython.utils import newserialized as ns
from IPython.utils.pickleutil import can, uncan, CannedObject, CannedFunction
from IPython.parallel.tests.clienttest import skip_without
diff --git a/IPython/testing/iptest.py b/IPython/testing/iptest.py
index 6fa4211..9d00571 100644
--- a/IPython/testing/iptest.py
+++ b/IPython/testing/iptest.py
@@ -185,6 +185,7 @@ def make_exclude():
if not have['zmq']:
exclusions.append(ipjoin('zmq'))
+ exclusions.append(ipjoin('parallel'))
# This is needed for the reg-exp to match on win32 in the ipdoctest plugin.
if sys.platform == 'win32':
diff --git a/docs/source/index.txt b/docs/source/index.txt
index 7009b84..243bdec 100644
--- a/docs/source/index.txt
+++ b/docs/source/index.txt
@@ -19,8 +19,7 @@ Contents
whatsnew/index.txt
install/index.txt
interactive/index.txt
- .. parallel/index.txt
- parallelz/index.txt
+ parallel/index.txt
config/index.txt
development/index.txt
api/index.txt
diff --git a/docs/source/install/install.txt b/docs/source/install/install.txt
index 467416e..3bb75dd 100644
--- a/docs/source/install/install.txt
+++ b/docs/source/install/install.txt
@@ -9,16 +9,16 @@ install all of its dependencies.
Please let us know if you have problems installing IPython or any of its
-dependencies. Officially, IPython requires Python version 2.5 or 2.6. We
-have *not* yet started to port IPython to Python 3.0.
+dependencies. Officially, IPython requires Python version 2.6 or 2.7. There
+is an experimental port of IPython for Python3 `on GitHub
+`_
.. warning::
- Officially, IPython supports Python versions 2.5 and 2.6.
+ Officially, IPython supports Python versions 2.6 and 2.7.
- IPython 0.10 has only been well tested with Python 2.5 and 2.6. Parts of
- it may work with Python 2.4, but we do not officially support Python 2.4
- anymore. If you need to use 2.4, you can still run IPython 0.9.
+ IPython 0.11 has a hard syntax dependency on 2.6, and will no longer work
+ on Python <= 2.5.
Some of the installation approaches use the :mod:`setuptools` package and its
:command:`easy_install` command line program. In many scenarios, this provides
@@ -38,9 +38,9 @@ optional dependencies:
.. code-block:: bash
- $ easy_install ipython[kernel,security,test]
+ $ easy_install ipython[zmq,test]
-This will get Twisted, zope.interface and Foolscap, which are needed for
+This will get pyzmq, which is needed for
IPython's parallel computing features as well as the nose package, which will
enable you to run IPython's test suite.
@@ -221,8 +221,7 @@ On Windows, you will need the PyReadline module. PyReadline is a separate,
Windows only implementation of readline that uses native Windows calls through
:mod:`ctypes`. The easiest way of installing PyReadline is you use the binary
installer available `here `_. The :mod:`ctypes`
-module, which comes with Python 2.5 and greater, is required by PyReadline. It
-is available for Python 2.4 at http://python.net/crew/theller/ctypes.
+module, which comes with Python 2.5 and greater, is required by PyReadline.
nose
----
@@ -267,91 +266,30 @@ The `pexpect `_ package is used in IPython's
Windows users are out of luck as pexpect does not run there.
-Dependencies for IPython.kernel (parallel computing)
-====================================================
+Dependencies for IPython.parallel (parallel computing)
+======================================================
-The IPython kernel provides a nice architecture for parallel computing. The
-main focus of this architecture is on interactive parallel computing. These
-features require a number of additional packages:
+:mod:`IPython.kernel` has been replaced by :mod:`IPython.parallel`,
+which uses ZeroMQ for all communication.
-* zope.interface (yep, we use interfaces)
-* Twisted (asynchronous networking framework)
-* Foolscap (a nice, secure network protocol)
-* pyOpenSSL (security for network connections)
+IPython.parallel provides a nice architecture for parallel computing. The
+main focus of this architecture is on interactive parallel computing. These
+features require just one package: pyzmq. See the next section for pyzmq
+details.
On a Unix style platform (including OS X), if you want to use
:mod:`setuptools`, you can just do:
.. code-block:: bash
- $ easy_install ipython[kernel] # the first three
- $ easy_install ipython[security] # pyOpenSSL
-
-zope.interface and Twisted
---------------------------
-
-Twisted [Twisted]_ and zope.interface [ZopeInterface]_ are used for networking
-related things. On Unix style platforms (including OS X), the simplest way of
-getting the these is to use :command:`easy_install`:
-
-.. code-block:: bash
-
- $ easy_install zope.interface
- $ easy_install Twisted
+ $ easy_install ipython[zmq] # will include pyzmq
-Of course, you can also download the source tarballs from the Twisted website
-[Twisted]_ and the
-`zope.interface page at PyPI `_
-and do the usual ``python setup.py install`` if you prefer.
+Security in IPython.parallel is provided by SSH tunnels. By default, Linux
+and OSX clients will use the shell ssh command, but on Windows, we also
+support tunneling with paramiko [paramiko]_.
-Windows is a bit different. For zope.interface and Twisted, simply get the
-latest binary ``.exe`` installer from the Twisted website. This installer
-includes both zope.interface and Twisted and should just work.
-
-Foolscap
---------
-
-Foolscap [Foolscap]_ uses Twisted to provide a very nice secure RPC protocol that we use to implement our parallel computing features.
-
-On all platforms a simple:
-
-.. code-block:: bash
-
- $ easy_install foolscap
-
-should work. You can also download the source tarballs from the `Foolscap
-website `_ and do ``python setup.py install``
-if you prefer.
-
-pyOpenSSL
----------
-
-IPython does not work with version 0.7 of pyOpenSSL [pyOpenSSL]_. It is known
-to work with version 0.6 and will likely work with the more recent 0.8 and 0.9
-versions. There are a couple of options for getting this:
-
-1. Most Linux distributions have packages for pyOpenSSL.
-2. The built-in Python 2.5 on OS X 10.5 already has it installed.
-3. There are source tarballs on the pyOpenSSL website. On Unix-like
- platforms, these can be built using ``python seutp.py install``.
-4. There is also a binary ``.exe`` Windows installer on the
- `pyOpenSSL website `_.
-
-Dependencies for IPython.frontend (the IPython GUI)
-===================================================
-
-wxPython
---------
-
-Starting with IPython 0.9, IPython has a new :mod:`IPython.frontend` package
-that has a nice wxPython based IPython GUI. As you would expect, this GUI
-requires wxPython. Most Linux distributions have wxPython packages available
-and the built-in Python on OS X comes with wxPython preinstalled. For Windows,
-a binary installer is available on the `wxPython website
- `_.
-
-Dependencies for IPython.zmq (new parallel)
-===========================================
+Dependencies for IPython.zmq
+============================
pyzmq
-----
@@ -359,9 +297,11 @@ pyzmq
IPython 0.11 introduced some new functionality, including a two-process
execution model using ZeroMQ for communication [ZeroMQ]_. The Python bindings
to ZeroMQ are found in the pyzmq project, which is easy_install-able once you
-have ZeroMQ installed. :mod:`IPython.kernel` is also in the process of being
-replaced by :mod:`IPython.zmq.parallel`, which uses ZeroMQ for all
-communication.
+have ZeroMQ installed (or even if you don't).
+
+IPython.zmq depends on pyzmq >= 2.0.10.1, but IPython.parallel requires the more
+recent 2.1.4. 2.1.4 also has binary releases for OSX and Windows, that do not
+require prior installation of libzmq.
Dependencies for ipython-qtconsole (new GUI)
============================================
@@ -377,11 +317,12 @@ which can be installed from the
pygments
--------
-The syntax-highlighting in ``ipython-qtconsole`` is done with the pygments project, which is easy_install-able.
+The syntax-highlighting in ``ipython-qtconsole`` is done with the pygments project,
+which is easy_install-able.
.. [Twisted] Twisted matrix. http://twistedmatrix.org
.. [ZopeInterface] http://pypi.python.org/pypi/zope.interface
.. [Foolscap] Foolscap network protocol. http://foolscap.lothar.com/trac
.. [pyOpenSSL] pyOpenSSL. http://pyopenssl.sourceforge.net
.. [ZeroMQ] ZeroMQ. http://www.zeromq.org
-
+.. [paramiko] paramiko. https://github.com/robey/paramiko
diff --git a/docs/source/parallelz/asian_call.pdf b/docs/source/parallel/asian_call.pdf
similarity index 100%
rename from docs/source/parallelz/asian_call.pdf
rename to docs/source/parallel/asian_call.pdf
Binary files a/docs/source/parallelz/asian_call.pdf and b/docs/source/parallel/asian_call.pdf differ
diff --git a/docs/source/parallelz/asian_call.png b/docs/source/parallel/asian_call.png
similarity index 100%
rename from docs/source/parallelz/asian_call.png
rename to docs/source/parallel/asian_call.png
Binary files a/docs/source/parallelz/asian_call.png and b/docs/source/parallel/asian_call.png differ
diff --git a/docs/source/parallelz/asian_put.pdf b/docs/source/parallel/asian_put.pdf
similarity index 100%
rename from docs/source/parallelz/asian_put.pdf
rename to docs/source/parallel/asian_put.pdf
Binary files a/docs/source/parallelz/asian_put.pdf and b/docs/source/parallel/asian_put.pdf differ
diff --git a/docs/source/parallelz/asian_put.png b/docs/source/parallel/asian_put.png
similarity index 100%
rename from docs/source/parallelz/asian_put.png
rename to docs/source/parallel/asian_put.png
Binary files a/docs/source/parallelz/asian_put.png and b/docs/source/parallel/asian_put.png differ
diff --git a/docs/source/parallelz/dag_dependencies.txt b/docs/source/parallel/dag_dependencies.txt
similarity index 100%
rename from docs/source/parallelz/dag_dependencies.txt
rename to docs/source/parallel/dag_dependencies.txt
diff --git a/docs/source/parallelz/dagdeps.pdf b/docs/source/parallel/dagdeps.pdf
similarity index 100%
rename from docs/source/parallelz/dagdeps.pdf
rename to docs/source/parallel/dagdeps.pdf
Binary files a/docs/source/parallelz/dagdeps.pdf and b/docs/source/parallel/dagdeps.pdf differ
diff --git a/docs/source/parallelz/dagdeps.png b/docs/source/parallel/dagdeps.png
similarity index 100%
rename from docs/source/parallelz/dagdeps.png
rename to docs/source/parallel/dagdeps.png
Binary files a/docs/source/parallelz/dagdeps.png and b/docs/source/parallel/dagdeps.png differ
diff --git a/docs/source/parallelz/hpc_job_manager.pdf b/docs/source/parallel/hpc_job_manager.pdf
similarity index 100%
rename from docs/source/parallelz/hpc_job_manager.pdf
rename to docs/source/parallel/hpc_job_manager.pdf
Binary files a/docs/source/parallelz/hpc_job_manager.pdf and b/docs/source/parallel/hpc_job_manager.pdf differ
diff --git a/docs/source/parallelz/hpc_job_manager.png b/docs/source/parallel/hpc_job_manager.png
similarity index 100%
rename from docs/source/parallelz/hpc_job_manager.png
rename to docs/source/parallel/hpc_job_manager.png
Binary files a/docs/source/parallelz/hpc_job_manager.png and b/docs/source/parallel/hpc_job_manager.png differ
diff --git a/docs/source/parallel/index.txt b/docs/source/parallel/index.txt
index d1a1e40..0446207 100644
--- a/docs/source/parallel/index.txt
+++ b/docs/source/parallel/index.txt
@@ -4,9 +4,19 @@
Using IPython for parallel computing
====================================
-The twisted-based :mod:`IPython.kernel` has been removed, in favor of
-the new 0MQ-based :mod:`IPython.parallel`, whose merge into master is imminent.
+.. toctree::
+ :maxdepth: 2
+
+ parallel_intro.txt
+ parallel_process.txt
+ parallel_multiengine.txt
+ parallel_task.txt
+ parallel_mpi.txt
+ parallel_security.txt
+ parallel_winhpc.txt
+ parallel_demos.txt
+ dag_dependencies.txt
+ parallel_details.txt
+ parallel_transition.txt
+
-Until that code is merged, it can be found in the `newparallel branch
-`_, and its draft documentation can be
-found `here `_.
\ No newline at end of file
diff --git a/docs/source/parallelz/ipcluster_create.pdf b/docs/source/parallel/ipcluster_create.pdf
similarity index 100%
rename from docs/source/parallelz/ipcluster_create.pdf
rename to docs/source/parallel/ipcluster_create.pdf
Binary files a/docs/source/parallelz/ipcluster_create.pdf and b/docs/source/parallel/ipcluster_create.pdf differ
diff --git a/docs/source/parallelz/ipcluster_create.png b/docs/source/parallel/ipcluster_create.png
similarity index 100%
rename from docs/source/parallelz/ipcluster_create.png
rename to docs/source/parallel/ipcluster_create.png
Binary files a/docs/source/parallelz/ipcluster_create.png and b/docs/source/parallel/ipcluster_create.png differ
diff --git a/docs/source/parallelz/ipcluster_start.pdf b/docs/source/parallel/ipcluster_start.pdf
similarity index 100%
rename from docs/source/parallelz/ipcluster_start.pdf
rename to docs/source/parallel/ipcluster_start.pdf
Binary files a/docs/source/parallelz/ipcluster_start.pdf and b/docs/source/parallel/ipcluster_start.pdf differ
diff --git a/docs/source/parallelz/ipcluster_start.png b/docs/source/parallel/ipcluster_start.png
similarity index 100%
rename from docs/source/parallelz/ipcluster_start.png
rename to docs/source/parallel/ipcluster_start.png
Binary files a/docs/source/parallelz/ipcluster_start.png and b/docs/source/parallel/ipcluster_start.png differ
diff --git a/docs/source/parallelz/ipython_shell.pdf b/docs/source/parallel/ipython_shell.pdf
similarity index 100%
rename from docs/source/parallelz/ipython_shell.pdf
rename to docs/source/parallel/ipython_shell.pdf
Binary files a/docs/source/parallelz/ipython_shell.pdf and b/docs/source/parallel/ipython_shell.pdf differ
diff --git a/docs/source/parallelz/ipython_shell.png b/docs/source/parallel/ipython_shell.png
similarity index 100%
rename from docs/source/parallelz/ipython_shell.png
rename to docs/source/parallel/ipython_shell.png
Binary files a/docs/source/parallelz/ipython_shell.png and b/docs/source/parallel/ipython_shell.png differ
diff --git a/docs/source/parallelz/mec_simple.pdf b/docs/source/parallel/mec_simple.pdf
similarity index 100%
rename from docs/source/parallelz/mec_simple.pdf
rename to docs/source/parallel/mec_simple.pdf
Binary files a/docs/source/parallelz/mec_simple.pdf and b/docs/source/parallel/mec_simple.pdf differ
diff --git a/docs/source/parallelz/mec_simple.png b/docs/source/parallel/mec_simple.png
similarity index 100%
rename from docs/source/parallelz/mec_simple.png
rename to docs/source/parallel/mec_simple.png
Binary files a/docs/source/parallelz/mec_simple.png and b/docs/source/parallel/mec_simple.png differ
diff --git a/docs/source/parallelz/parallel_demos.txt b/docs/source/parallel/parallel_demos.txt
similarity index 95%
rename from docs/source/parallelz/parallel_demos.txt
rename to docs/source/parallel/parallel_demos.txt
index 0f3aff7..34ce2d0 100644
--- a/docs/source/parallelz/parallel_demos.txt
+++ b/docs/source/parallel/parallel_demos.txt
@@ -110,7 +110,7 @@ results. The code to run this calculation in parallel is contained in
:file:`docs/examples/newparallel/parallelpi.py`. This code can be run in parallel
using IPython by following these steps:
-1. Use :command:`ipclusterz` to start 15 engines. We used an 8 core (2 quad
+1. Use :command:`ipcluster` to start 15 engines. We used an 8 core (2 quad
core CPUs) cluster with hyperthreading enabled which makes the 8 cores
looks like 16 (1 controller + 15 engines) in the OS. However, the maximum
speedup we can observe is still only 8x.
@@ -230,7 +230,7 @@ plot using Matplotlib.
.. literalinclude:: ../../examples/newparallel/mcdriver.py
:language: python
-To use this code, start an IPython cluster using :command:`ipclusterz`, open
+To use this code, start an IPython cluster using :command:`ipcluster`, open
IPython in the pylab mode with the file :file:`mcdriver.py` in your current
working directory and then type:
diff --git a/docs/source/parallelz/parallel_details.txt b/docs/source/parallel/parallel_details.txt
similarity index 100%
rename from docs/source/parallelz/parallel_details.txt
rename to docs/source/parallel/parallel_details.txt
diff --git a/docs/source/parallelz/parallel_intro.txt b/docs/source/parallel/parallel_intro.txt
similarity index 95%
rename from docs/source/parallelz/parallel_intro.txt
rename to docs/source/parallel/parallel_intro.txt
index e72e360..093dac9 100644
--- a/docs/source/parallelz/parallel_intro.txt
+++ b/docs/source/parallel/parallel_intro.txt
@@ -156,7 +156,7 @@ To connect and authenticate to the controller an engine or client needs
some information that the controller has stored in a JSON file.
Thus, the JSON files need to be copied to a location where
the clients and engines can find them. Typically, this is the
-:file:`~/.ipython/clusterz_default/security` directory on the host where the
+:file:`~/.ipython/cluster_default/security` directory on the host where the
client/engine is running (which could be a different host than the controller).
Once the JSON files are copied over, everything should work fine.
@@ -192,10 +192,10 @@ Getting Started
To use IPython for parallel computing, you need to start one instance of the
controller and one or more instances of the engine. Initially, it is best to
simply start a controller and engines on a single host using the
-:command:`ipclusterz` command. To start a controller and 4 engines on your
+:command:`ipcluster` command. To start a controller and 4 engines on your
localhost, just do::
- $ ipclusterz start -n 4
+ $ ipcluster start -n 4
More details about starting the IPython controller and engines can be found
:ref:`here `
@@ -218,7 +218,7 @@ everything is working correctly, try the following commands:
When a client is created with no arguments, the client tries to find the corresponding JSON file
-in the local `~/.ipython/clusterz_default/security` directory. Or if you specified a profile,
+in the local `~/.ipython/cluster_default/security` directory. Or if you specified a profile,
you can use that with the Client. This should cover most cases:
.. sourcecode:: ipython
diff --git a/docs/source/parallelz/parallel_mpi.txt b/docs/source/parallel/parallel_mpi.txt
similarity index 90%
rename from docs/source/parallelz/parallel_mpi.txt
rename to docs/source/parallel/parallel_mpi.txt
index fe5cbf9..e0fee30 100644
--- a/docs/source/parallelz/parallel_mpi.txt
+++ b/docs/source/parallel/parallel_mpi.txt
@@ -50,16 +50,16 @@ To use code that calls MPI, there are typically two things that MPI requires.
There are a couple of ways that you can start the IPython engines and get
these things to happen.
-Automatic starting using :command:`mpiexec` and :command:`ipclusterz`
+Automatic starting using :command:`mpiexec` and :command:`ipcluster`
--------------------------------------------------------------------
-The easiest approach is to use the `mpiexec` mode of :command:`ipclusterz`,
+The easiest approach is to use the `mpiexec` mode of :command:`ipcluster`,
which will first start a controller and then a set of engines using
:command:`mpiexec`::
- $ ipclusterz mpiexec -n 4
+ $ ipcluster mpiexec -n 4
-This approach is best as interrupting :command:`ipclusterz` will automatically
+This approach is best as interrupting :command:`ipcluster` will automatically
stop and clean up the controller and engines.
Manual starting using :command:`mpiexec`
@@ -68,20 +68,20 @@ Manual starting using :command:`mpiexec`
If you want to start the IPython engines using the :command:`mpiexec`, just
do::
- $ mpiexec -n 4 ipenginez --mpi=mpi4py
+ $ mpiexec -n 4 ipengine --mpi=mpi4py
This requires that you already have a controller running and that the FURL
files for the engines are in place. We also have built in support for
PyTrilinos [PyTrilinos]_, which can be used (assuming is installed) by
starting the engines with::
- $ mpiexec -n 4 ipenginez --mpi=pytrilinos
+ $ mpiexec -n 4 ipengine --mpi=pytrilinos
-Automatic starting using PBS and :command:`ipclusterz`
+Automatic starting using PBS and :command:`ipcluster`
------------------------------------------------------
-The :command:`ipclusterz` command also has built-in integration with PBS. For
-more information on this approach, see our documentation on :ref:`ipclusterz
+The :command:`ipcluster` command also has built-in integration with PBS. For
+more information on this approach, see our documentation on :ref:`ipcluster
`.
Actually using MPI
@@ -110,7 +110,7 @@ distributed array. Save the following text in a file called :file:`psum.py`:
Now, start an IPython cluster::
- $ ipclusterz start -p mpi -n 4
+ $ ipcluster start -p mpi -n 4
.. note::
diff --git a/docs/source/parallelz/parallel_multiengine.txt b/docs/source/parallel/parallel_multiengine.txt
similarity index 95%
rename from docs/source/parallelz/parallel_multiengine.txt
rename to docs/source/parallel/parallel_multiengine.txt
index b44c249..9d879f5 100644
--- a/docs/source/parallelz/parallel_multiengine.txt
+++ b/docs/source/parallel/parallel_multiengine.txt
@@ -17,9 +17,9 @@ Starting the IPython controller and engines
To follow along with this tutorial, you will need to start the IPython
controller and four IPython engines. The simplest way of doing this is to use
-the :command:`ipclusterz` command::
+the :command:`ipcluster` command::
- $ ipclusterz start -n 4
+ $ ipcluster start -n 4
For more detailed information about starting the controller and engines, see
our :ref:`introduction ` to using IPython for parallel computing.
@@ -37,7 +37,7 @@ module and then create a :class:`.Client` instance:
In [2]: rc = Client()
This form assumes that the default connection information (stored in
-:file:`ipcontroller-client.json` found in :file:`IPYTHON_DIR/clusterz_default/security`) is
+:file:`ipcontroller-client.json` found in :file:`IPYTHON_DIR/cluster_default/security`) is
accurate. If the controller was started on a remote machine, you must copy that connection
file to the client machine, or enter its contents as arguments to the Client constructor:
diff --git a/docs/source/parallelz/parallel_pi.pdf b/docs/source/parallel/parallel_pi.pdf
similarity index 100%
rename from docs/source/parallelz/parallel_pi.pdf
rename to docs/source/parallel/parallel_pi.pdf
Binary files a/docs/source/parallelz/parallel_pi.pdf and b/docs/source/parallel/parallel_pi.pdf differ
diff --git a/docs/source/parallelz/parallel_pi.png b/docs/source/parallel/parallel_pi.png
similarity index 100%
rename from docs/source/parallelz/parallel_pi.png
rename to docs/source/parallel/parallel_pi.png
Binary files a/docs/source/parallelz/parallel_pi.png and b/docs/source/parallel/parallel_pi.png differ
diff --git a/docs/source/parallelz/parallel_process.txt b/docs/source/parallel/parallel_process.txt
similarity index 70%
rename from docs/source/parallelz/parallel_process.txt
rename to docs/source/parallel/parallel_process.txt
index aeafebc..3375555 100644
--- a/docs/source/parallelz/parallel_process.txt
+++ b/docs/source/parallel/parallel_process.txt
@@ -11,12 +11,12 @@ Because of this, there are many different possibilities.
Broadly speaking, there are two ways of going about starting a controller and engines:
-* In an automated manner using the :command:`ipclusterz` command.
-* In a more manual way using the :command:`ipcontrollerz` and
- :command:`ipenginez` commands.
+* In an automated manner using the :command:`ipcluster` command.
+* In a more manual way using the :command:`ipcontroller` and
+ :command:`ipengine` commands.
This document describes both of these methods. We recommend that new users
-start with the :command:`ipclusterz` command as it simplifies many common usage
+start with the :command:`ipcluster` command as it simplifies many common usage
cases.
General considerations
@@ -30,29 +30,29 @@ matter which method you use to start your IPython cluster.
Let's say that you want to start the controller on ``host0`` and engines on
hosts ``host1``-``hostn``. The following steps are then required:
-1. Start the controller on ``host0`` by running :command:`ipcontrollerz` on
+1. Start the controller on ``host0`` by running :command:`ipcontroller` on
``host0``.
2. Move the JSON file (:file:`ipcontroller-engine.json`) created by the
controller from ``host0`` to hosts ``host1``-``hostn``.
3. Start the engines on hosts ``host1``-``hostn`` by running
- :command:`ipenginez`. This command has to be told where the JSON file
+ :command:`ipengine`. This command has to be told where the JSON file
(:file:`ipcontroller-engine.json`) is located.
At this point, the controller and engines will be connected. By default, the JSON files
-created by the controller are put into the :file:`~/.ipython/clusterz_default/security`
+created by the controller are put into the :file:`~/.ipython/cluster_default/security`
directory. If the engines share a filesystem with the controller, step 2 can be skipped as
the engines will automatically look at that location.
The final step required to actually use the running controller from a client is to move
the JSON file :file:`ipcontroller-client.json` from ``host0`` to any host where clients
-will be run. If these file are put into the :file:`~/.ipython/clusterz_default/security`
+will be run. If these file are put into the :file:`~/.ipython/cluster_default/security`
directory of the client's host, they will be found automatically. Otherwise, the full path
to them has to be passed to the client's constructor.
-Using :command:`ipclusterz`
+Using :command:`ipcluster`
===========================
-The :command:`ipclusterz` command provides a simple way of starting a
+The :command:`ipcluster` command provides a simple way of starting a
controller and engines in the following situations:
1. When the controller and engines are all run on localhost. This is useful
@@ -67,24 +67,24 @@ controller and engines in the following situations:
.. note::
- Currently :command:`ipclusterz` requires that the
+ Currently :command:`ipcluster` requires that the
:file:`~/.ipython/cluster_/security` directory live on a shared filesystem that is
seen by both the controller and engines. If you don't have a shared file
- system you will need to use :command:`ipcontrollerz` and
- :command:`ipenginez` directly.
+ system you will need to use :command:`ipcontroller` and
+ :command:`ipengine` directly.
-Under the hood, :command:`ipclusterz` just uses :command:`ipcontrollerz`
-and :command:`ipenginez` to perform the steps described above.
+Under the hood, :command:`ipcluster` just uses :command:`ipcontroller`
+and :command:`ipengine` to perform the steps described above.
-The simplest way to use ipclusterz requires no configuration, and will
+The simplest way to use ipcluster requires no configuration, and will
launch a controller and a number of engines on the local machine. For instance,
to start one controller and 4 engines on localhost, just do::
- $ ipclusterz start -n 4
+ $ ipcluster start -n 4
To see other command line options for the local mode, do::
- $ ipclusterz -h
+ $ ipcluster -h
Configuring an IPython cluster
@@ -92,25 +92,25 @@ Configuring an IPython cluster
Cluster configurations are stored as `profiles`. You can create a new profile with::
- $ ipclusterz create -p myprofile
+ $ ipcluster create -p myprofile
-This will create the directory :file:`IPYTHONDIR/clusterz_myprofile`, and populate it
+This will create the directory :file:`IPYTHONDIR/cluster_myprofile`, and populate it
with the default configuration files for the three IPython cluster commands. Once
-you edit those files, you can continue to call ipclusterz/ipcontrollerz/ipenginez
+you edit those files, you can continue to call ipcluster/ipcontroller/ipengine
with no arguments beyond ``-p myprofile``, and any configuration will be maintained.
There is no limit to the number of profiles you can have, so you can maintain a profile for each
of your common use cases. The default profile will be used whenever the
-profile argument is not specified, so edit :file:`IPYTHONDIR/clusterz_default/*_config.py` to
+profile argument is not specified, so edit :file:`IPYTHONDIR/cluster_default/*_config.py` to
represent your most common use case.
The configuration files are loaded with commented-out settings and explanations,
which should cover most of the available possibilities.
-Using various batch systems with :command:`ipclusterz`
+Using various batch systems with :command:`ipcluster`
------------------------------------------------------
-:command:`ipclusterz` has a notion of Launchers that can start controllers
+:command:`ipcluster` has a notion of Launchers that can start controllers
and engines with various remote execution schemes. Currently supported
models include `mpiexec`, PBS-style (Torque, SGE), and Windows HPC Server.
@@ -120,7 +120,7 @@ models include `mpiexec`, PBS-style (Torque, SGE), and Windows HPC Server.
users can subclass and configure them to fit their own system that we
have not yet supported (such as Condor)
-Using :command:`ipclusterz` in mpiexec/mpirun mode
+Using :command:`ipcluster` in mpiexec/mpirun mode
--------------------------------------------------
@@ -132,11 +132,11 @@ The mpiexec/mpirun mode is useful if you:
If these are satisfied, you can create a new profile::
- $ ipclusterz create -p mpi
+ $ ipcluster create -p mpi
-and edit the file :file:`IPYTHONDIR/clusterz_mpi/ipclusterz_config.py`.
+and edit the file :file:`IPYTHONDIR/cluster_mpi/ipcluster_config.py`.
-There, instruct ipclusterz to use the MPIExec launchers by adding the lines:
+There, instruct ipcluster to use the MPIExec launchers by adding the lines:
.. sourcecode:: python
@@ -144,7 +144,7 @@ There, instruct ipclusterz to use the MPIExec launchers by adding the lines:
If the default MPI configuration is correct, then you can now start your cluster, with::
- $ ipclusterz start -n 4 -p mpi
+ $ ipcluster start -n 4 -p mpi
This does the following:
@@ -166,7 +166,7 @@ On newer MPI implementations (such as OpenMPI), this will work even if you
don't make any calls to MPI or call :func:`MPI_Init`. However, older MPI
implementations actually require each process to call :func:`MPI_Init` upon
starting. The easiest way of having this done is to install the mpi4py
-[mpi4py]_ package and then specify the ``c.MPI.use`` option in :file:`ipenginez_config.py`:
+[mpi4py]_ package and then specify the ``c.MPI.use`` option in :file:`ipengine_config.py`:
.. sourcecode:: python
@@ -177,21 +177,21 @@ having problems with this, you will likely have to use a custom Python
executable that itself calls :func:`MPI_Init` at the appropriate time.
Fortunately, mpi4py comes with such a custom Python executable that is easy to
install and use. However, this custom Python executable approach will not work
-with :command:`ipclusterz` currently.
+with :command:`ipcluster` currently.
More details on using MPI with IPython can be found :ref:`here `.
-Using :command:`ipclusterz` in PBS mode
+Using :command:`ipcluster` in PBS mode
---------------------------------------
The PBS mode uses the Portable Batch System [PBS]_ to start the engines.
As usual, we will start by creating a fresh profile::
- $ ipclusterz create -p pbs
+ $ ipcluster create -p pbs
-And in :file:`ipclusterz_config.py`, we will select the PBS launchers for the controller
+And in :file:`ipcluster_config.py`, we will select the PBS launchers for the controller
and engines:
.. sourcecode:: python
@@ -213,7 +213,7 @@ to specify your own. Here is a sample PBS script template:
cd $$PBS_O_WORKDIR
export PATH=$$HOME/usr/local/bin
export PYTHONPATH=$$HOME/usr/local/lib/python2.7/site-packages
- /usr/local/bin/mpiexec -n ${n} ipenginez --cluster_dir=${cluster_dir}
+ /usr/local/bin/mpiexec -n ${n} ipengine --cluster_dir=${cluster_dir}
There are a few important points about this template:
@@ -232,8 +232,8 @@ There are a few important points about this template:
environment variables in the template, or in SGE, where the config lines start
with ``#$``, which will have to be ``#$$``.
-4. Any options to :command:`ipenginez` can be given in the batch script
- template, or in :file:`ipenginez_config.py`.
+4. Any options to :command:`ipengine` can be given in the batch script
+ template, or in :file:`ipengine_config.py`.
5. Depending on the configuration of you system, you may have to set
environment variables in the script template.
@@ -251,11 +251,11 @@ The controller template should be similar, but simpler:
cd $$PBS_O_WORKDIR
export PATH=$$HOME/usr/local/bin
export PYTHONPATH=$$HOME/usr/local/lib/python2.7/site-packages
- ipcontrollerz --cluster_dir=${cluster_dir}
+ ipcontroller --cluster_dir=${cluster_dir}
Once you have created these scripts, save them with names like
-:file:`pbs.engine.template`. Now you can load them into the :file:`ipclusterz_config` with:
+:file:`pbs.engine.template`. Now you can load them into the :file:`ipcluster_config` with:
.. sourcecode:: python
@@ -264,12 +264,12 @@ Once you have created these scripts, save them with names like
c.PBSControllerLauncher.batch_template_file = "pbs.controller.template"
-Alternately, you can just define the templates as strings inside :file:`ipclusterz_config`.
+Alternately, you can just define the templates as strings inside :file:`ipcluster_config`.
Whether you are using your own templates or our defaults, the extra configurables available are
the number of engines to launch (``$n``, and the batch system queue to which the jobs are to be
submitted (``$queue``)). These are configurables, and can be specified in
-:file:`ipclusterz_config`:
+:file:`ipcluster_config`:
.. sourcecode:: python
@@ -279,7 +279,7 @@ submitted (``$queue``)). These are configurables, and can be specified in
Note that assuming you are running PBS on a multi-node cluster, the Controller's default behavior
of listening only on localhost is likely too restrictive. In this case, also assuming the
nodes are safely behind a firewall, you can simply instruct the Controller to listen for
-connections on all its interfaces, by adding in :file:`ipcontrollerz_config`:
+connections on all its interfaces, by adding in :file:`ipcontroller_config`:
.. sourcecode:: python
@@ -287,9 +287,9 @@ connections on all its interfaces, by adding in :file:`ipcontrollerz_config`:
You can now run the cluster with::
- $ ipclusterz start -p pbs -n 128
+ $ ipcluster start -p pbs -n 128
-Additional configuration options can be found in the PBS section of :file:`ipclusterz_config`.
+Additional configuration options can be found in the PBS section of :file:`ipcluster_config`.
.. note::
@@ -298,12 +298,12 @@ Additional configuration options can be found in the PBS section of :file:`ipclu
and with further configuration in similar batch systems like Condor.
-Using :command:`ipclusterz` in SSH mode
+Using :command:`ipcluster` in SSH mode
---------------------------------------
-The SSH mode uses :command:`ssh` to execute :command:`ipenginez` on remote
-nodes and :command:`ipcontrollerz` can be run remotely as well, or on localhost.
+The SSH mode uses :command:`ssh` to execute :command:`ipengine` on remote
+nodes and :command:`ipcontroller` can be run remotely as well, or on localhost.
.. note::
@@ -312,9 +312,9 @@ nodes and :command:`ipcontrollerz` can be run remotely as well, or on localhost.
As usual, we start by creating a clean profile::
- $ ipclusterz create -p ssh
+ $ ipcluster create -p ssh
-To use this mode, select the SSH launchers in :file:`ipclusterz_config.py`:
+To use this mode, select the SSH launchers in :file:`ipcluster_config.py`:
.. sourcecode:: python
@@ -331,9 +331,9 @@ The controller's remote location and configuration can be specified:
# c.SSHControllerLauncher.hostname = 'controller.example.com'
# c.SSHControllerLauncher.user = os.environ.get('USER','username')
- # Set the arguments to be passed to ipcontrollerz
- # note that remotely launched ipcontrollerz will not get the contents of
- # the local ipcontrollerz_config.py unless it resides on the *remote host*
+ # Set the arguments to be passed to ipcontroller
+ # note that remotely launched ipcontroller will not get the contents of
+ # the local ipcontroller_config.py unless it resides on the *remote host*
# in the location specified by the --cluster_dir argument.
# c.SSHControllerLauncher.program_args = ['-r', '-ip', '0.0.0.0', '--cluster_dir', '/path/to/cd']
@@ -357,46 +357,46 @@ on that host.
* The `engines` dict, where the keys are the host we want to run engines on and
the value is the number of engines to run on that host.
* on host3, the value is a tuple, where the number of engines is first, and the arguments
- to be passed to :command:`ipenginez` are the second element.
+ to be passed to :command:`ipengine` are the second element.
For engines without explicitly specified arguments, the default arguments are set in
a single location:
.. sourcecode:: python
- c.SSHEngineSetLauncher.engine_args = ['--cluster_dir', '/path/to/clusterz_ssh']
+ c.SSHEngineSetLauncher.engine_args = ['--cluster_dir', '/path/to/cluster_ssh']
-Current limitations of the SSH mode of :command:`ipclusterz` are:
+Current limitations of the SSH mode of :command:`ipcluster` are:
* Untested on Windows. Would require a working :command:`ssh` on Windows.
Also, we are using shell scripts to setup and execute commands on remote
hosts.
* No file movement -
-Using the :command:`ipcontrollerz` and :command:`ipenginez` commands
+Using the :command:`ipcontroller` and :command:`ipengine` commands
====================================================================
-It is also possible to use the :command:`ipcontrollerz` and :command:`ipenginez`
+It is also possible to use the :command:`ipcontroller` and :command:`ipengine`
commands to start your controller and engines. This approach gives you full
control over all aspects of the startup process.
Starting the controller and engine on your local machine
--------------------------------------------------------
-To use :command:`ipcontrollerz` and :command:`ipenginez` to start things on your
+To use :command:`ipcontroller` and :command:`ipengine` to start things on your
local machine, do the following.
First start the controller::
- $ ipcontrollerz
+ $ ipcontroller
Next, start however many instances of the engine you want using (repeatedly)
the command::
- $ ipenginez
+ $ ipengine
The engines should start and automatically connect to the controller using the
-JSON files in :file:`~/.ipython/clusterz_default/security`. You are now ready to use the
+JSON files in :file:`~/.ipython/cluster_default/security`. You are now ready to use the
controller and engines from IPython.
.. warning::
@@ -418,18 +418,18 @@ Starting the controller and engines on different hosts
When the controller and engines are running on different hosts, things are
slightly more complicated, but the underlying ideas are the same:
-1. Start the controller on a host using :command:`ipcontrollerz`.
+1. Start the controller on a host using :command:`ipcontroller`.
2. Copy :file:`ipcontroller-engine.json` from :file:`~/.ipython/cluster_/security` on
the controller's host to the host where the engines will run.
-3. Use :command:`ipenginez` on the engine's hosts to start the engines.
+3. Use :command:`ipengine` on the engine's hosts to start the engines.
-The only thing you have to be careful of is to tell :command:`ipenginez` where
+The only thing you have to be careful of is to tell :command:`ipengine` where
the :file:`ipcontroller-engine.json` file is located. There are two ways you
can do this:
* Put :file:`ipcontroller-engine.json` in the :file:`~/.ipython/cluster_/security`
directory on the engine's host, where it will be found automatically.
-* Call :command:`ipenginez` with the ``--file=full_path_to_the_file``
+* Call :command:`ipengine` with the ``--file=full_path_to_the_file``
flag.
The ``--file`` flag works like this::
@@ -455,7 +455,7 @@ any point in the future.
To do this, the only thing you have to do is specify the `-r` flag, so that
the connection information in the JSON files remains accurate::
- $ ipcontrollerz -r
+ $ ipcontroller -r
Then, just copy the JSON files over the first time and you are set. You can
start and stop the controller and engines any many times as you want in the
@@ -478,7 +478,7 @@ IPython and can be found in the directory :file:`~/.ipython/cluster_/lo
Sending the log files to us will often help us to debug any problems.
-Configuring `ipcontrollerz`
+Configuring `ipcontroller`
---------------------------
Ports and addresses
@@ -493,7 +493,7 @@ Database Backend
-Configuring `ipenginez`
+Configuring `ipengine`
-----------------------
.. note::
diff --git a/docs/source/parallelz/parallel_security.txt b/docs/source/parallel/parallel_security.txt
similarity index 99%
rename from docs/source/parallelz/parallel_security.txt
rename to docs/source/parallel/parallel_security.txt
index a588db2..7f65a96 100644
--- a/docs/source/parallelz/parallel_security.txt
+++ b/docs/source/parallel/parallel_security.txt
@@ -130,7 +130,7 @@ way.
There is exactly one key per cluster - it must be the same everywhere. Typically, the
controller creates this key, and stores it in the private connection files
`ipython-{engine|client}.json`. These files are typically stored in the
-`~/.ipython/clusterz_/security` directory, and are maintained as readable only by
+`~/.ipython/cluster_/security` directory, and are maintained as readable only by
the owner, just as is common practice with a user's keys in their `.ssh` directory.
.. warning::
diff --git a/docs/source/parallelz/parallel_task.txt b/docs/source/parallel/parallel_task.txt
similarity index 96%
rename from docs/source/parallelz/parallel_task.txt
rename to docs/source/parallel/parallel_task.txt
index dead318..326dc25 100644
--- a/docs/source/parallelz/parallel_task.txt
+++ b/docs/source/parallel/parallel_task.txt
@@ -22,9 +22,9 @@ Starting the IPython controller and engines
To follow along with this tutorial, you will need to start the IPython
controller and four IPython engines. The simplest way of doing this is to use
-the :command:`ipclusterz` command::
+the :command:`ipcluster` command::
- $ ipclusterz start -n 4
+ $ ipcluster start -n 4
For more detailed information about starting the controller and engines, see
our :ref:`introduction ` to using IPython for parallel computing.
@@ -321,16 +321,16 @@ Schedulers
There are a variety of valid ways to determine where jobs should be assigned in a
load-balancing situation. In IPython, we support several standard schemes, and
even make it easy to define your own. The scheme can be selected via the ``--scheme``
-argument to :command:`ipcontrollerz`, or in the :attr:`HubFactory.scheme` attribute
+argument to :command:`ipcontroller`, or in the :attr:`HubFactory.scheme` attribute
of a controller config object.
The built-in routing schemes:
To select one of these schemes, simply do::
- $ ipcontrollerz --scheme
+ $ ipcontroller --scheme
for instance:
- $ ipcontrollerz --scheme lru
+ $ ipcontroller --scheme lru
lru: Least Recently Used
diff --git a/docs/source/parallelz/parallel_transition.txt b/docs/source/parallel/parallel_transition.txt
similarity index 100%
rename from docs/source/parallelz/parallel_transition.txt
rename to docs/source/parallel/parallel_transition.txt
diff --git a/docs/source/parallelz/parallel_winhpc.txt b/docs/source/parallel/parallel_winhpc.txt
similarity index 88%
rename from docs/source/parallelz/parallel_winhpc.txt
rename to docs/source/parallel/parallel_winhpc.txt
index e4a36f6..0aac695 100644
--- a/docs/source/parallelz/parallel_winhpc.txt
+++ b/docs/source/parallel/parallel_winhpc.txt
@@ -144,25 +144,25 @@ in parallel on the engines from within the IPython shell using an appropriate
client. This includes the ability to interact with, plot and visualize data
from the engines.
-IPython has a command line program called :command:`ipclusterz` that automates
+IPython has a command line program called :command:`ipcluster` that automates
all aspects of starting the controller and engines on the compute nodes.
-:command:`ipclusterz` has full support for the Windows HPC job scheduler,
-meaning that :command:`ipclusterz` can use this job scheduler to start the
+:command:`ipcluster` has full support for the Windows HPC job scheduler,
+meaning that :command:`ipcluster` can use this job scheduler to start the
controller and engines. In our experience, the Windows HPC job scheduler is
particularly well suited for interactive applications, such as IPython. Once
-:command:`ipclusterz` is configured properly, a user can start an IPython
+:command:`ipcluster` is configured properly, a user can start an IPython
cluster from their local workstation almost instantly, without having to log
on to the head node (as is typically required by Unix based job schedulers).
This enables a user to move seamlessly between serial and parallel
computations.
-In this section we show how to use :command:`ipclusterz` to start an IPython
+In this section we show how to use :command:`ipcluster` to start an IPython
cluster using the Windows HPC Server 2008 job scheduler. To make sure that
-:command:`ipclusterz` is installed and working properly, you should first try
+:command:`ipcluster` is installed and working properly, you should first try
to start an IPython cluster on your local host. To do this, open a Windows
Command Prompt and type the following command::
- ipclusterz start -n 2
+ ipcluster start -n 2
You should see a number of messages printed to the screen, ending with
"IPython cluster: started". The result should look something like the following
@@ -174,12 +174,12 @@ At this point, the controller and two engines are running on your local host.
This configuration is useful for testing and for situations where you want to
take advantage of multiple cores on your local computer.
-Now that we have confirmed that :command:`ipclusterz` is working properly, we
+Now that we have confirmed that :command:`ipcluster` is working properly, we
describe how to configure and run an IPython cluster on an actual compute
cluster running Windows HPC Server 2008. Here is an outline of the needed
steps:
-1. Create a cluster profile using: ``ipclusterz create -p mycluster``
+1. Create a cluster profile using: ``ipcluster create -p mycluster``
2. Edit configuration files in the directory :file:`.ipython\\cluster_mycluster`
@@ -191,7 +191,7 @@ Creating a cluster profile
In most cases, you will have to create a cluster profile to use IPython on a
cluster. A cluster profile is a name (like "mycluster") that is associated
with a particular cluster configuration. The profile name is used by
-:command:`ipclusterz` when working with the cluster.
+:command:`ipcluster` when working with the cluster.
Associated with each cluster profile is a cluster directory. This cluster
directory is a specially named directory (typically located in the
@@ -204,10 +204,10 @@ security keys. The naming convention for cluster directories is:
To create a new cluster profile (named "mycluster") and the associated cluster
directory, type the following command at the Windows Command Prompt::
- ipclusterz create -p mycluster
+ ipcluster create -p mycluster
The output of this command is shown in the screenshot below. Notice how
-:command:`ipclusterz` prints out the location of the newly created cluster
+:command:`ipcluster` prints out the location of the newly created cluster
directory.
.. image:: ipcluster_create.*
@@ -218,19 +218,19 @@ Configuring a cluster profile
Next, you will need to configure the newly created cluster profile by editing
the following configuration files in the cluster directory:
-* :file:`ipclusterz_config.py`
+* :file:`ipcluster_config.py`
* :file:`ipcontroller_config.py`
* :file:`ipengine_config.py`
-When :command:`ipclusterz` is run, these configuration files are used to
+When :command:`ipcluster` is run, these configuration files are used to
determine how the engines and controller will be started. In most cases,
you will only have to set a few of the attributes in these files.
-To configure :command:`ipclusterz` to use the Windows HPC job scheduler, you
+To configure :command:`ipcluster` to use the Windows HPC job scheduler, you
will need to edit the following attributes in the file
-:file:`ipclusterz_config.py`::
+:file:`ipcluster_config.py`::
- # Set these at the top of the file to tell ipclusterz to use the
+ # Set these at the top of the file to tell ipcluster to use the
# Windows HPC job scheduler.
c.Global.controller_launcher = \
'IPython.parallel.launcher.WindowsHPCControllerLauncher'
@@ -257,15 +257,15 @@ Starting the cluster profile
Once a cluster profile has been configured, starting an IPython cluster using
the profile is simple::
- ipclusterz start -p mycluster -n 32
+ ipcluster start -p mycluster -n 32
-The ``-n`` option tells :command:`ipclusterz` how many engines to start (in
+The ``-n`` option tells :command:`ipcluster` how many engines to start (in
this case 32). Stopping the cluster is as simple as typing Control-C.
Using the HPC Job Manager
-------------------------
-When ``ipclusterz start`` is run the first time, :command:`ipclusterz` creates
+When ``ipcluster start`` is run the first time, :command:`ipcluster` creates
two XML job description files in the cluster directory:
* :file:`ipcontroller_job.xml`
@@ -273,8 +273,8 @@ two XML job description files in the cluster directory:
Once these files have been created, they can be imported into the HPC Job
Manager application. Then, the controller and engines for that profile can be
-started using the HPC Job Manager directly, without using :command:`ipclusterz`.
-However, anytime the cluster profile is re-configured, ``ipclusterz start``
+started using the HPC Job Manager directly, without using :command:`ipcluster`.
+However, anytime the cluster profile is re-configured, ``ipcluster start``
must be run again to regenerate the XML job description files. The
following screenshot shows what the HPC Job Manager interface looks like
with a running IPython cluster.
diff --git a/docs/source/parallelz/simpledag.pdf b/docs/source/parallel/simpledag.pdf
similarity index 100%
rename from docs/source/parallelz/simpledag.pdf
rename to docs/source/parallel/simpledag.pdf
Binary files a/docs/source/parallelz/simpledag.pdf and b/docs/source/parallel/simpledag.pdf differ
diff --git a/docs/source/parallelz/simpledag.png b/docs/source/parallel/simpledag.png
similarity index 100%
rename from docs/source/parallelz/simpledag.png
rename to docs/source/parallel/simpledag.png
Binary files a/docs/source/parallelz/simpledag.png and b/docs/source/parallel/simpledag.png differ
diff --git a/docs/source/parallelz/single_digits.pdf b/docs/source/parallel/single_digits.pdf
similarity index 100%
rename from docs/source/parallelz/single_digits.pdf
rename to docs/source/parallel/single_digits.pdf
Binary files a/docs/source/parallelz/single_digits.pdf and b/docs/source/parallel/single_digits.pdf differ
diff --git a/docs/source/parallelz/single_digits.png b/docs/source/parallel/single_digits.png
similarity index 100%
rename from docs/source/parallelz/single_digits.png
rename to docs/source/parallel/single_digits.png
Binary files a/docs/source/parallelz/single_digits.png and b/docs/source/parallel/single_digits.png differ
diff --git a/docs/source/parallelz/two_digit_counts.pdf b/docs/source/parallel/two_digit_counts.pdf
similarity index 100%
rename from docs/source/parallelz/two_digit_counts.pdf
rename to docs/source/parallel/two_digit_counts.pdf
Binary files a/docs/source/parallelz/two_digit_counts.pdf and b/docs/source/parallel/two_digit_counts.pdf differ
diff --git a/docs/source/parallelz/two_digit_counts.png b/docs/source/parallel/two_digit_counts.png
similarity index 100%
rename from docs/source/parallelz/two_digit_counts.png
rename to docs/source/parallel/two_digit_counts.png
Binary files a/docs/source/parallelz/two_digit_counts.png and b/docs/source/parallel/two_digit_counts.png differ
diff --git a/docs/source/parallelz/winhpc_index.txt b/docs/source/parallel/winhpc_index.txt
similarity index 100%
rename from docs/source/parallelz/winhpc_index.txt
rename to docs/source/parallel/winhpc_index.txt
diff --git a/docs/source/parallelz/index.txt b/docs/source/parallelz/index.txt
deleted file mode 100644
index 798c3c5..0000000
--- a/docs/source/parallelz/index.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-.. _parallelz_index:
-
-==========================================
-Using IPython for parallel computing (ZMQ)
-==========================================
-
-.. toctree::
- :maxdepth: 2
-
- parallel_intro.txt
- parallel_process.txt
- parallel_multiengine.txt
- parallel_task.txt
- parallel_mpi.txt
- parallel_security.txt
- parallel_winhpc.txt
- parallel_demos.txt
- dag_dependencies.txt
- parallel_details.txt
- parallel_transition.txt
-
-
diff --git a/setup.py b/setup.py
index 13ec6aa..c928ab8 100755
--- a/setup.py
+++ b/setup.py
@@ -215,16 +215,16 @@ if 'setuptools' in sys.modules:
'ipython = IPython.frontend.terminal.ipapp:launch_new_instance',
'ipython-qtconsole = IPython.frontend.qt.console.ipythonqt:main',
'pycolor = IPython.utils.PyColorize:main',
- 'ipcontrollerz = IPython.parallel.ipcontrollerapp:launch_new_instance',
- 'ipenginez = IPython.parallel.ipengineapp:launch_new_instance',
- 'iploggerz = IPython.parallel.iploggerapp:launch_new_instance',
- 'ipclusterz = IPython.parallel.ipclusterapp:launch_new_instance',
+ 'ipcontroller = IPython.parallel.ipcontrollerapp:launch_new_instance',
+ 'ipengine = IPython.parallel.ipengineapp:launch_new_instance',
+ 'iplogger = IPython.parallel.iploggerapp:launch_new_instance',
+ 'ipcluster = IPython.parallel.ipclusterapp:launch_new_instance',
'iptest = IPython.testing.iptest:main',
'irunner = IPython.lib.irunner:main'
]
}
setup_args['extras_require'] = dict(
- zmq = 'pyzmq>=2.0.10',
+ zmq = 'pyzmq>=2.0.10.1',
doc='Sphinx>=0.3',
test='nose>=0.10.1',
security='pyOpenSSL>=0.6'
diff --git a/setupbase.py b/setupbase.py
index f039830..683a3a6 100644
--- a/setupbase.py
+++ b/setupbase.py
@@ -127,6 +127,7 @@ def find_packages():
add_package(packages, 'frontend.qt.console', tests=True)
add_package(packages, 'frontend.terminal', tests=True)
add_package(packages, 'lib', tests=True)
+ add_package(packages, 'parallel', tests=True)
add_package(packages, 'quarantine', tests=True)
add_package(packages, 'scripts')
add_package(packages, 'testing', tests=True)
@@ -134,7 +135,6 @@ def find_packages():
add_package(packages, 'utils', tests=True)
add_package(packages, 'zmq')
add_package(packages, 'zmq.pylab')
- add_package(packages, 'parallel')
return packages
#---------------------------------------------------------------------------
@@ -265,10 +265,10 @@ def find_scripts():
parallel_scripts = pjoin('IPython','parallel','scripts')
main_scripts = pjoin('IPython','scripts')
scripts = [
- pjoin(parallel_scripts, 'ipenginez'),
- pjoin(parallel_scripts, 'ipcontrollerz'),
- pjoin(parallel_scripts, 'ipclusterz'),
- pjoin(parallel_scripts, 'iploggerz'),
+ pjoin(parallel_scripts, 'ipengine'),
+ pjoin(parallel_scripts, 'ipcontroller'),
+ pjoin(parallel_scripts, 'ipcluster'),
+ pjoin(parallel_scripts, 'iplogger'),
pjoin(main_scripts, 'ipython'),
pjoin(main_scripts, 'ipython-qtconsole'),
pjoin(main_scripts, 'pycolor'),