##// END OF EJS Templates
Fixing how the working directory is handled in kernel....
Brian Granger -
Show More
@@ -1,202 +1,184
1 1 import os
2 2
3 3 c = get_config()
4 4
5 5 #-----------------------------------------------------------------------------
6 6 # Select which launchers to use
7 7 #-----------------------------------------------------------------------------
8 8
9 9 # This allows you to control what method is used to start the controller
10 10 # and engines. The following methods are currently supported:
11 11 # - Start as a regular process on localhost.
12 12 # - Start using mpiexec.
13 13 # - Start using the Windows HPC Server 2008 scheduler
14 14 # - Start using PBS
15 15 # - Start using SSH (currently broken)
16 16
17 17
18 18 # The selected launchers can be configured below.
19 19
20 20 # Options are:
21 21 # - LocalControllerLauncher
22 22 # - MPIExecControllerLauncher
23 23 # - PBSControllerLauncher
24 24 # - WindowsHPCControllerLauncher
25 25 # c.Global.controller_launcher = 'IPython.kernel.launcher.LocalControllerLauncher'
26 26
27 27 # Options are:
28 28 # - LocalEngineSetLauncher
29 29 # - MPIExecEngineSetLauncher
30 30 # - PBSEngineSetLauncher
31 31 # - WindowsHPCEngineSetLauncher
32 32 # c.Global.engine_launcher = 'IPython.kernel.launcher.LocalEngineSetLauncher'
33 33
34 34 #-----------------------------------------------------------------------------
35 35 # Global configuration
36 36 #-----------------------------------------------------------------------------
37 37
38 38 # The default number of engines that will be started. This is overridden by
39 39 # the -n command line option: "ipcluster start -n 4"
40 40 # c.Global.n = 2
41 41
42 42 # Log to a file in cluster_dir/log, otherwise just log to sys.stdout.
43 43 # c.Global.log_to_file = False
44 44
45 45 # Remove old logs from cluster_dir/log before starting.
46 46 # c.Global.clean_logs = True
47 47
48 48 # The working directory for the process. The application will use os.chdir
49 49 # to change to this directory before starting.
50 # c.Global.working_dir = os.getcwd()
50 # c.Global.work_dir = os.getcwd()
51 51
52 52
53 53 #-----------------------------------------------------------------------------
54 54 # Local process launchers
55 55 #-----------------------------------------------------------------------------
56 56
57 # The working directory for the controller
58 # c.LocalControllerLauncher.working_dir = u''
59
60 57 # The command line arguments to call the controller with.
61 58 # c.LocalControllerLauncher.controller_args = \
62 59 # ['--log-to-file','--log-level', '40']
63 60
64 61 # The working directory for the controller
65 # c.LocalEngineSetLauncher.working_dir = u''
62 # c.LocalEngineSetLauncher.work_dir = u''
66 63
67 64 # Command line argument passed to the engines.
68 65 # c.LocalEngineSetLauncher.engine_args = ['--log-to-file','--log-level', '40']
69 66
70 67 #-----------------------------------------------------------------------------
71 68 # MPIExec launchers
72 69 #-----------------------------------------------------------------------------
73 70
74 # The working directory for the controller
75 # c.MPIExecControllerLauncher.working_dir = u''
76
77 71 # The mpiexec/mpirun command to use in started the controller.
78 72 # c.MPIExecControllerLauncher.mpi_cmd = ['mpiexec']
79 73
80 74 # Additional arguments to pass to the actual mpiexec command.
81 75 # c.MPIExecControllerLauncher.mpi_args = []
82 76
83 77 # The command line argument to call the controller with.
84 78 # c.MPIExecControllerLauncher.controller_args = \
85 79 # ['--log-to-file','--log-level', '40']
86 80
87 81
88 # The working directory for the controller
89 # c.MPIExecEngineSetLauncher.working_dir = u''
90
91 82 # The mpiexec/mpirun command to use in started the controller.
92 83 # c.MPIExecEngineSetLauncher.mpi_cmd = ['mpiexec']
93 84
94 85 # Additional arguments to pass to the actual mpiexec command.
95 86 # c.MPIExecEngineSetLauncher.mpi_args = []
96 87
97 88 # Command line argument passed to the engines.
98 89 # c.MPIExecEngineSetLauncher.engine_args = ['--log-to-file','--log-level', '40']
99 90
100 91 # The default number of engines to start if not given elsewhere.
101 92 # c.MPIExecEngineSetLauncher.n = 1
102 93
103 94 #-----------------------------------------------------------------------------
104 95 # SSH launchers
105 96 #-----------------------------------------------------------------------------
106 97
107 98 # Todo
108 99
109 100
110 101 #-----------------------------------------------------------------------------
111 102 # Unix batch (PBS) schedulers launchers
112 103 #-----------------------------------------------------------------------------
113 104
114 # The working directory for the controller
115 # c.PBSControllerLauncher.working_dir = u''
116
117 105 # The command line program to use to submit a PBS job.
118 106 # c.PBSControllerLauncher.submit_command = 'qsub'
119 107
120 108 # The command line program to use to delete a PBS job.
121 109 # c.PBSControllerLauncher.delete_command = 'qdel'
122 110
123 111 # A regular expression that takes the output of qsub and find the job id.
124 112 # c.PBSControllerLauncher.job_id_regexp = '\d+'
125 113
126 114 # The batch submission script used to start the controller. This is where
127 115 # environment variables would be setup, etc. This string is interpolated using
128 # the Itpl module in IPython.external. Basically, you can use ${profile} for
129 # the controller profile or ${cluster_dir} for the cluster_dir.
116 # the Itpl module in IPython.external. Basically, you can use ${n} for the
117 # number of engine and ${cluster_dir} for the cluster_dir.
130 118 # c.PBSControllerLauncher.batch_template = """"""
131 119
132 120 # The name of the instantiated batch script that will actually be used to
133 121 # submit the job. This will be written to the cluster directory.
134 122 # c.PBSControllerLauncher.batch_file_name = u'pbs_batch_script_controller'
135 123
136 124
137 # The working directory for the controller
138 # c.PBSEngineSetLauncher.working_dir = u''
139
140 125 # The command line program to use to submit a PBS job.
141 126 # c.PBSEngineSetLauncher.submit_command = 'qsub'
142 127
143 128 # The command line program to use to delete a PBS job.
144 129 # c.PBSEngineSetLauncher.delete_command = 'qdel'
145 130
146 131 # A regular expression that takes the output of qsub and find the job id.
147 132 # c.PBSEngineSetLauncher.job_id_regexp = '\d+'
148 133
149 134 # The batch submission script used to start the engines. This is where
150 135 # environment variables would be setup, etc. This string is interpolated using
151 136 # the Itpl module in IPython.external. Basically, you can use ${n} for the
152 # number of engine, ${profile} or the engine profile and ${cluster_dir}
153 # for the cluster_dir.
137 # number of engine and ${cluster_dir} for the cluster_dir.
154 138 # c.PBSEngineSetLauncher.batch_template = """"""
155 139
156 140 # The name of the instantiated batch script that will actually be used to
157 141 # submit the job. This will be written to the cluster directory.
158 142 # c.PBSEngineSetLauncher.batch_file_name = u'pbs_batch_script_engines'
159 143
160 144 #-----------------------------------------------------------------------------
161 145 # Windows HPC Server 2008 launcher configuration
162 146 #-----------------------------------------------------------------------------
163 147
164 148 # c.IPControllerJob.job_name = 'IPController'
165 149 # c.IPControllerJob.is_exclusive = False
166 150 # c.IPControllerJob.username = 'USERDOMAIN\\USERNAME'
167 151 # c.IPControllerJob.priority = 'Highest'
168 152 # c.IPControllerJob.requested_nodes = ''
169 153 # c.IPControllerJob.project = 'MyProject'
170 154
171 155 # c.IPControllerTask.task_name = 'IPController'
172 156 # c.IPControllerTask.controller_cmd = [u'ipcontroller.exe']
173 157 # c.IPControllerTask.controller_args = ['--log-to-file', '--log-level', '40']
174 158 # c.IPControllerTask.environment_variables = {}
175 159
176 # c.WindowsHPCControllerLauncher.working_dir = u''
177 160 # c.WindowsHPCControllerLauncher.scheduler = 'HEADNODE'
178 161 # c.WindowsHPCControllerLauncher.job_file_name = u'ipcontroller_job.xml'
179 162
180 163
181 164 # c.IPEngineSetJob.job_name = 'IPEngineSet'
182 165 # c.IPEngineSetJob.is_exclusive = False
183 166 # c.IPEngineSetJob.username = 'USERDOMAIN\\USERNAME'
184 167 # c.IPEngineSetJob.priority = 'Highest'
185 168 # c.IPEngineSetJob.requested_nodes = ''
186 169 # c.IPEngineSetJob.project = 'MyProject'
187 170
188 171 # c.IPEngineTask.task_name = 'IPEngine'
189 172 # c.IPEngineTask.engine_cmd = [u'ipengine.exe']
190 173 # c.IPEngineTask.engine_args = ['--log-to-file', '--log-level', '40']
191 174 # c.IPEngineTask.environment_variables = {}
192 175
193 # c.WindowsHPCEngineSetLauncher.working_dir = u''
194 176 # c.WindowsHPCEngineSetLauncher.scheduler = 'HEADNODE'
195 177 # c.WindowsHPCEngineSetLauncher.job_file_name = u'ipengineset_job.xml'
196 178
197 179
198 180
199 181
200 182
201 183
202 184
@@ -1,136 +1,136
1 1 from IPython.config.loader import Config
2 2
3 3 c = get_config()
4 4
5 5 #-----------------------------------------------------------------------------
6 6 # Global configuration
7 7 #-----------------------------------------------------------------------------
8 8
9 9 # Basic Global config attributes
10 10
11 11 # Start up messages are logged to stdout using the logging module.
12 12 # These all happen before the twisted reactor is started and are
13 13 # useful for debugging purposes. Can be (10=DEBUG,20=INFO,30=WARN,40=CRITICAL)
14 14 # and smaller is more verbose.
15 15 # c.Global.log_level = 20
16 16
17 17 # Log to a file in cluster_dir/log, otherwise just log to sys.stdout.
18 18 # c.Global.log_to_file = False
19 19
20 20 # Remove old logs from cluster_dir/log before starting.
21 21 # c.Global.clean_logs = True
22 22
23 23 # A list of Python statements that will be run before starting the
24 24 # controller. This is provided because occasionally certain things need to
25 25 # be imported in the controller for pickling to work.
26 26 # c.Global.import_statements = ['import math']
27 27
28 28 # Reuse the controller's FURL files. If False, FURL files are regenerated
29 29 # each time the controller is run. If True, they will be reused, *but*, you
30 30 # also must set the network ports by hand. If set, this will override the
31 31 # values set for the client and engine connections below.
32 32 # c.Global.reuse_furls = True
33 33
34 34 # Enable SSL encryption on all connections to the controller. If set, this
35 35 # will override the values set for the client and engine connections below.
36 36 # c.Global.secure = True
37 37
38 38 # The working directory for the process. The application will use os.chdir
39 39 # to change to this directory before starting.
40 # c.Global.working_dir = os.getcwd()
40 # c.Global.work_dir = os.getcwd()
41 41
42 42 #-----------------------------------------------------------------------------
43 43 # Configure the client services
44 44 #-----------------------------------------------------------------------------
45 45
46 46 # Basic client service config attributes
47 47
48 48 # The network interface the controller will listen on for client connections.
49 49 # This should be an IP address or hostname of the controller's host. The empty
50 50 # string means listen on all interfaces.
51 51 # c.FCClientServiceFactory.ip = ''
52 52
53 53 # The TCP/IP port the controller will listen on for client connections. If 0
54 54 # a random port will be used. If the controller's host has a firewall running
55 55 # it must allow incoming traffic on this port.
56 56 # c.FCClientServiceFactory.port = 0
57 57
58 58 # The client learns how to connect to the controller by looking at the
59 59 # location field embedded in the FURL. If this field is empty, all network
60 60 # interfaces that the controller is listening on will be listed. To have the
61 61 # client connect on a particular interface, list it here.
62 62 # c.FCClientServiceFactory.location = ''
63 63
64 64 # Use SSL encryption for the client connection.
65 65 # c.FCClientServiceFactory.secure = True
66 66
67 67 # Reuse the client FURL each time the controller is started. If set, you must
68 68 # also pick a specific network port above (FCClientServiceFactory.port).
69 69 # c.FCClientServiceFactory.reuse_furls = False
70 70
71 71 #-----------------------------------------------------------------------------
72 72 # Configure the engine services
73 73 #-----------------------------------------------------------------------------
74 74
75 75 # Basic config attributes for the engine services.
76 76
77 77 # The network interface the controller will listen on for engine connections.
78 78 # This should be an IP address or hostname of the controller's host. The empty
79 79 # string means listen on all interfaces.
80 80 # c.FCEngineServiceFactory.ip = ''
81 81
82 82 # The TCP/IP port the controller will listen on for engine connections. If 0
83 83 # a random port will be used. If the controller's host has a firewall running
84 84 # it must allow incoming traffic on this port.
85 85 # c.FCEngineServiceFactory.port = 0
86 86
87 87 # The engine learns how to connect to the controller by looking at the
88 88 # location field embedded in the FURL. If this field is empty, all network
89 89 # interfaces that the controller is listening on will be listed. To have the
90 90 # client connect on a particular interface, list it here.
91 91 # c.FCEngineServiceFactory.location = ''
92 92
93 93 # Use SSL encryption for the engine connection.
94 94 # c.FCEngineServiceFactory.secure = True
95 95
96 96 # Reuse the client FURL each time the controller is started. If set, you must
97 97 # also pick a specific network port above (FCClientServiceFactory.port).
98 98 # c.FCEngineServiceFactory.reuse_furls = False
99 99
100 100 #-----------------------------------------------------------------------------
101 101 # Developer level configuration attributes
102 102 #-----------------------------------------------------------------------------
103 103
104 104 # You shouldn't have to modify anything in this section. These attributes
105 105 # are more for developers who want to change the behavior of the controller
106 106 # at a fundamental level.
107 107
108 108 # c.FCClientServiceFactory.cert_file = u'ipcontroller-client.pem'
109 109
110 110 # default_client_interfaces = Config()
111 111 # default_client_interfaces.Task.interface_chain = [
112 112 # 'IPython.kernel.task.ITaskController',
113 113 # 'IPython.kernel.taskfc.IFCTaskController'
114 114 # ]
115 115 #
116 116 # default_client_interfaces.Task.furl_file = u'ipcontroller-tc.furl'
117 117 #
118 118 # default_client_interfaces.MultiEngine.interface_chain = [
119 119 # 'IPython.kernel.multiengine.IMultiEngine',
120 120 # 'IPython.kernel.multienginefc.IFCSynchronousMultiEngine'
121 121 # ]
122 122 #
123 123 # default_client_interfaces.MultiEngine.furl_file = u'ipcontroller-mec.furl'
124 124 #
125 125 # c.FCEngineServiceFactory.interfaces = default_client_interfaces
126 126
127 127 # c.FCEngineServiceFactory.cert_file = u'ipcontroller-engine.pem'
128 128
129 129 # default_engine_interfaces = Config()
130 130 # default_engine_interfaces.Default.interface_chain = [
131 131 # 'IPython.kernel.enginefc.IFCControllerBase'
132 132 # ]
133 133 #
134 134 # default_engine_interfaces.Default.furl_file = u'ipcontroller-engine.furl'
135 135 #
136 136 # c.FCEngineServiceFactory.interfaces = default_engine_interfaces
@@ -1,90 +1,90
1 1 c = get_config()
2 2
3 3 #-----------------------------------------------------------------------------
4 4 # Global configuration
5 5 #-----------------------------------------------------------------------------
6 6
7 7 # Start up messages are logged to stdout using the logging module.
8 8 # These all happen before the twisted reactor is started and are
9 9 # useful for debugging purposes. Can be (10=DEBUG,20=INFO,30=WARN,40=CRITICAL)
10 10 # and smaller is more verbose.
11 11 # c.Global.log_level = 20
12 12
13 13 # Log to a file in cluster_dir/log, otherwise just log to sys.stdout.
14 14 # c.Global.log_to_file = False
15 15
16 16 # Remove old logs from cluster_dir/log before starting.
17 17 # c.Global.clean_logs = True
18 18
19 19 # A list of strings that will be executed in the users namespace on the engine
20 20 # before it connects to the controller.
21 21 # c.Global.exec_lines = ['import numpy']
22 22
23 23 # The engine will try to connect to the controller multiple times, to allow
24 24 # the controller time to startup and write its FURL file. These parameters
25 25 # control the number of retries (connect_max_tries) and the initial delay
26 26 # (connect_delay) between attemps. The actual delay between attempts gets
27 27 # longer each time by a factor of 1.5 (delay[i] = 1.5*delay[i-1])
28 28 # those attemps.
29 29 # c.Global.connect_delay = 0.1
30 30 # c.Global.connect_max_tries = 15
31 31
32 32 # By default, the engine will look for the controller's FURL file in its own
33 33 # cluster directory. Sometimes, the FURL file will be elsewhere and this
34 34 # attribute can be set to the full path of the FURL file.
35 35 # c.Global.furl_file = u''
36 36
37 37 # The working directory for the process. The application will use os.chdir
38 38 # to change to this directory before starting.
39 # c.Global.working_dir = os.getcwd()
39 # c.Global.work_dir = os.getcwd()
40 40
41 41 #-----------------------------------------------------------------------------
42 42 # MPI configuration
43 43 #-----------------------------------------------------------------------------
44 44
45 45 # Upon starting the engine can be configured to call MPI_Init. This section
46 46 # configures that.
47 47
48 48 # Select which MPI section to execute to setup MPI. The value of this
49 49 # attribute must match the name of another attribute in the MPI config
50 50 # section (mpi4py, pytrilinos, etc.). This can also be set by the --mpi
51 51 # command line option.
52 52 # c.MPI.use = ''
53 53
54 54 # Initialize MPI using mpi4py. To use this, set c.MPI.use = 'mpi4py' to use
55 55 # --mpi=mpi4py at the command line.
56 56 # c.MPI.mpi4py = """from mpi4py import MPI as mpi
57 57 # mpi.size = mpi.COMM_WORLD.Get_size()
58 58 # mpi.rank = mpi.COMM_WORLD.Get_rank()
59 59 # """
60 60
61 61 # Initialize MPI using pytrilinos. To use this, set c.MPI.use = 'pytrilinos'
62 62 # to use --mpi=pytrilinos at the command line.
63 63 # c.MPI.pytrilinos = """from PyTrilinos import Epetra
64 64 # class SimpleStruct:
65 65 # pass
66 66 # mpi = SimpleStruct()
67 67 # mpi.rank = 0
68 68 # mpi.size = 0
69 69 # """
70 70
71 71 #-----------------------------------------------------------------------------
72 72 # Developer level configuration attributes
73 73 #-----------------------------------------------------------------------------
74 74
75 75 # You shouldn't have to modify anything in this section. These attributes
76 76 # are more for developers who want to change the behavior of the controller
77 77 # at a fundamental level.
78 78
79 79 # You should not have to change these attributes.
80 80
81 81 # c.Global.shell_class = 'IPython.kernel.core.interpreter.Interpreter'
82 82
83 83 # c.Global.furl_file_name = u'ipcontroller-engine.furl'
84 84
85 85
86 86
87 87
88 88
89 89
90 90
@@ -1,481 +1,481
1 1 #!/usr/bin/env python
2 2 # encoding: utf-8
3 3 """
4 4 The IPython cluster directory
5 5 """
6 6
7 7 #-----------------------------------------------------------------------------
8 8 # Copyright (C) 2008-2009 The IPython Development Team
9 9 #
10 10 # Distributed under the terms of the BSD License. The full license is in
11 11 # the file COPYING, distributed as part of this software.
12 12 #-----------------------------------------------------------------------------
13 13
14 14 #-----------------------------------------------------------------------------
15 15 # Imports
16 16 #-----------------------------------------------------------------------------
17 17
18 18 from __future__ import with_statement
19 19
20 20 import os
21 21 import shutil
22 22 import sys
23 23
24 24 from twisted.python import log
25 25
26 26 from IPython.core import release
27 27 from IPython.config.loader import PyFileConfigLoader
28 28 from IPython.core.application import Application
29 29 from IPython.core.component import Component
30 30 from IPython.config.loader import ArgParseConfigLoader, NoConfigDefault
31 31 from IPython.utils.traitlets import Unicode, Bool
32 32 from IPython.utils import genutils
33 33
34 34 #-----------------------------------------------------------------------------
35 35 # Imports
36 36 #-----------------------------------------------------------------------------
37 37
38 38
39 39 class ClusterDirError(Exception):
40 40 pass
41 41
42 42
43 43 class PIDFileError(Exception):
44 44 pass
45 45
46 46
47 47 class ClusterDir(Component):
48 48 """An object to manage the cluster directory and its resources.
49 49
50 50 The cluster directory is used by :command:`ipcontroller`,
51 51 :command:`ipcontroller` and :command:`ipcontroller` to manage the
52 52 configuration, logging and security of these applications.
53 53
54 54 This object knows how to find, create and manage these directories. This
55 55 should be used by any code that want's to handle cluster directories.
56 56 """
57 57
58 58 security_dir_name = Unicode('security')
59 59 log_dir_name = Unicode('log')
60 60 pid_dir_name = Unicode('pid')
61 61 security_dir = Unicode(u'')
62 62 log_dir = Unicode(u'')
63 63 pid_dir = Unicode(u'')
64 64 location = Unicode(u'')
65 65
66 66 def __init__(self, location):
67 67 super(ClusterDir, self).__init__(None)
68 68 self.location = location
69 69
70 70 def _location_changed(self, name, old, new):
71 71 if not os.path.isdir(new):
72 72 os.makedirs(new, mode=0777)
73 73 else:
74 74 os.chmod(new, 0777)
75 75 self.security_dir = os.path.join(new, self.security_dir_name)
76 76 self.log_dir = os.path.join(new, self.log_dir_name)
77 77 self.pid_dir = os.path.join(new, self.pid_dir_name)
78 78 self.check_dirs()
79 79
80 80 def _log_dir_changed(self, name, old, new):
81 81 self.check_log_dir()
82 82
83 83 def check_log_dir(self):
84 84 if not os.path.isdir(self.log_dir):
85 85 os.mkdir(self.log_dir, 0777)
86 86 else:
87 87 os.chmod(self.log_dir, 0777)
88 88
89 89 def _security_dir_changed(self, name, old, new):
90 90 self.check_security_dir()
91 91
92 92 def check_security_dir(self):
93 93 if not os.path.isdir(self.security_dir):
94 94 os.mkdir(self.security_dir, 0700)
95 95 else:
96 96 os.chmod(self.security_dir, 0700)
97 97
98 98 def _pid_dir_changed(self, name, old, new):
99 99 self.check_pid_dir()
100 100
101 101 def check_pid_dir(self):
102 102 if not os.path.isdir(self.pid_dir):
103 103 os.mkdir(self.pid_dir, 0700)
104 104 else:
105 105 os.chmod(self.pid_dir, 0700)
106 106
107 107 def check_dirs(self):
108 108 self.check_security_dir()
109 109 self.check_log_dir()
110 110 self.check_pid_dir()
111 111
112 112 def load_config_file(self, filename):
113 113 """Load a config file from the top level of the cluster dir.
114 114
115 115 Parameters
116 116 ----------
117 117 filename : unicode or str
118 118 The filename only of the config file that must be located in
119 119 the top-level of the cluster directory.
120 120 """
121 121 loader = PyFileConfigLoader(filename, self.location)
122 122 return loader.load_config()
123 123
124 124 def copy_config_file(self, config_file, path=None, overwrite=False):
125 125 """Copy a default config file into the active cluster directory.
126 126
127 127 Default configuration files are kept in :mod:`IPython.config.default`.
128 128 This function moves these from that location to the working cluster
129 129 directory.
130 130 """
131 131 if path is None:
132 132 import IPython.config.default
133 133 path = IPython.config.default.__file__.split(os.path.sep)[:-1]
134 134 path = os.path.sep.join(path)
135 135 src = os.path.join(path, config_file)
136 136 dst = os.path.join(self.location, config_file)
137 137 if not os.path.isfile(dst) or overwrite:
138 138 shutil.copy(src, dst)
139 139
140 140 def copy_all_config_files(self, path=None, overwrite=False):
141 141 """Copy all config files into the active cluster directory."""
142 142 for f in [u'ipcontroller_config.py', u'ipengine_config.py',
143 143 u'ipcluster_config.py']:
144 144 self.copy_config_file(f, path=path, overwrite=overwrite)
145 145
146 146 @classmethod
147 147 def create_cluster_dir(csl, cluster_dir):
148 148 """Create a new cluster directory given a full path.
149 149
150 150 Parameters
151 151 ----------
152 152 cluster_dir : str
153 153 The full path to the cluster directory. If it does exist, it will
154 154 be used. If not, it will be created.
155 155 """
156 156 return ClusterDir(cluster_dir)
157 157
158 158 @classmethod
159 159 def create_cluster_dir_by_profile(cls, path, profile=u'default'):
160 160 """Create a cluster dir by profile name and path.
161 161
162 162 Parameters
163 163 ----------
164 164 path : str
165 165 The path (directory) to put the cluster directory in.
166 166 profile : str
167 167 The name of the profile. The name of the cluster directory will
168 168 be "cluster_<profile>".
169 169 """
170 170 if not os.path.isdir(path):
171 171 raise ClusterDirError('Directory not found: %s' % path)
172 172 cluster_dir = os.path.join(path, u'cluster_' + profile)
173 173 return ClusterDir(cluster_dir)
174 174
175 175 @classmethod
176 176 def find_cluster_dir_by_profile(cls, ipython_dir, profile=u'default'):
177 177 """Find an existing cluster dir by profile name, return its ClusterDir.
178 178
179 179 This searches through a sequence of paths for a cluster dir. If it
180 180 is not found, a :class:`ClusterDirError` exception will be raised.
181 181
182 182 The search path algorithm is:
183 183 1. ``os.getcwd()``
184 184 2. ``ipython_dir``
185 185 3. The directories found in the ":" separated
186 186 :env:`IPCLUSTER_DIR_PATH` environment variable.
187 187
188 188 Parameters
189 189 ----------
190 190 ipython_dir : unicode or str
191 191 The IPython directory to use.
192 192 profile : unicode or str
193 193 The name of the profile. The name of the cluster directory
194 194 will be "cluster_<profile>".
195 195 """
196 196 dirname = u'cluster_' + profile
197 197 cluster_dir_paths = os.environ.get('IPCLUSTER_DIR_PATH','')
198 198 if cluster_dir_paths:
199 199 cluster_dir_paths = cluster_dir_paths.split(':')
200 200 else:
201 201 cluster_dir_paths = []
202 202 paths = [os.getcwd(), ipython_dir] + cluster_dir_paths
203 203 for p in paths:
204 204 cluster_dir = os.path.join(p, dirname)
205 205 if os.path.isdir(cluster_dir):
206 206 return ClusterDir(cluster_dir)
207 207 else:
208 208 raise ClusterDirError('Cluster directory not found in paths: %s' % dirname)
209 209
210 210 @classmethod
211 211 def find_cluster_dir(cls, cluster_dir):
212 212 """Find/create a cluster dir and return its ClusterDir.
213 213
214 214 This will create the cluster directory if it doesn't exist.
215 215
216 216 Parameters
217 217 ----------
218 218 cluster_dir : unicode or str
219 219 The path of the cluster directory. This is expanded using
220 220 :func:`IPython.utils.genutils.expand_path`.
221 221 """
222 222 cluster_dir = genutils.expand_path(cluster_dir)
223 223 if not os.path.isdir(cluster_dir):
224 224 raise ClusterDirError('Cluster directory not found: %s' % cluster_dir)
225 225 return ClusterDir(cluster_dir)
226 226
227 227
228 228 class AppWithClusterDirArgParseConfigLoader(ArgParseConfigLoader):
229 229 """Default command line options for IPython cluster applications."""
230 230
231 231 def _add_other_arguments(self):
232 232 self.parser.add_argument('--ipython-dir',
233 233 dest='Global.ipython_dir',type=unicode,
234 234 help='Set to override default location of Global.ipython_dir.',
235 235 default=NoConfigDefault,
236 236 metavar='Global.ipython_dir'
237 237 )
238 238 self.parser.add_argument('-p', '--profile',
239 239 dest='Global.profile',type=unicode,
240 240 help='The string name of the profile to be used. This determines '
241 241 'the name of the cluster dir as: cluster_<profile>. The default profile '
242 242 'is named "default". The cluster directory is resolve this way '
243 243 'if the --cluster-dir option is not used.',
244 244 default=NoConfigDefault,
245 245 metavar='Global.profile'
246 246 )
247 247 self.parser.add_argument('--log-level',
248 248 dest="Global.log_level",type=int,
249 249 help='Set the log level (0,10,20,30,40,50). Default is 30.',
250 250 default=NoConfigDefault,
251 251 metavar="Global.log_level"
252 252 )
253 253 self.parser.add_argument('--cluster-dir',
254 254 dest='Global.cluster_dir',type=unicode,
255 255 help='Set the cluster dir. This overrides the logic used by the '
256 256 '--profile option.',
257 257 default=NoConfigDefault,
258 258 metavar='Global.cluster_dir'
259 259 ),
260 self.parser.add_argument('--working-dir',
261 dest='Global.working_dir',type=unicode,
260 self.parser.add_argument('--work-dir',
261 dest='Global.work_dir',type=unicode,
262 262 help='Set the working dir for the process.',
263 263 default=NoConfigDefault,
264 metavar='Global.working_dir'
264 metavar='Global.work_dir'
265 265 )
266 266 self.parser.add_argument('--clean-logs',
267 267 dest='Global.clean_logs', action='store_true',
268 268 help='Delete old log flies before starting.',
269 269 default=NoConfigDefault
270 270 )
271 271 self.parser.add_argument('--no-clean-logs',
272 272 dest='Global.clean_logs', action='store_false',
273 273 help="Don't Delete old log flies before starting.",
274 274 default=NoConfigDefault
275 275 )
276 276
277 277 class ApplicationWithClusterDir(Application):
278 278 """An application that puts everything into a cluster directory.
279 279
280 280 Instead of looking for things in the ipython_dir, this type of application
281 281 will use its own private directory called the "cluster directory"
282 282 for things like config files, log files, etc.
283 283
284 284 The cluster directory is resolved as follows:
285 285
286 286 * If the ``--cluster-dir`` option is given, it is used.
287 287 * If ``--cluster-dir`` is not given, the application directory is
288 288 resolve using the profile name as ``cluster_<profile>``. The search
289 289 path for this directory is then i) cwd if it is found there
290 290 and ii) in ipython_dir otherwise.
291 291
292 292 The config file for the application is to be put in the cluster
293 293 dir and named the value of the ``config_file_name`` class attribute.
294 294 """
295 295
296 296 auto_create_cluster_dir = True
297 297
298 298 def create_default_config(self):
299 299 super(ApplicationWithClusterDir, self).create_default_config()
300 300 self.default_config.Global.profile = u'default'
301 301 self.default_config.Global.cluster_dir = u''
302 self.default_config.Global.working_dir = os.getcwd()
302 self.default_config.Global.work_dir = os.getcwd()
303 303 self.default_config.Global.log_to_file = False
304 304 self.default_config.Global.clean_logs = False
305 305
306 306 def create_command_line_config(self):
307 307 """Create and return a command line config loader."""
308 308 return AppWithClusterDirArgParseConfigLoader(
309 309 description=self.description,
310 310 version=release.version
311 311 )
312 312
313 313 def find_resources(self):
314 314 """This resolves the cluster directory.
315 315
316 316 This tries to find the cluster directory and if successful, it will
317 317 have done:
318 318 * Sets ``self.cluster_dir_obj`` to the :class:`ClusterDir` object for
319 319 the application.
320 320 * Sets ``self.cluster_dir`` attribute of the application and config
321 321 objects.
322 322
323 323 The algorithm used for this is as follows:
324 324 1. Try ``Global.cluster_dir``.
325 325 2. Try using ``Global.profile``.
326 326 3. If both of these fail and ``self.auto_create_cluster_dir`` is
327 327 ``True``, then create the new cluster dir in the IPython directory.
328 328 4. If all fails, then raise :class:`ClusterDirError`.
329 329 """
330 330
331 331 try:
332 332 cluster_dir = self.command_line_config.Global.cluster_dir
333 333 except AttributeError:
334 334 cluster_dir = self.default_config.Global.cluster_dir
335 335 cluster_dir = genutils.expand_path(cluster_dir)
336 336 try:
337 337 self.cluster_dir_obj = ClusterDir.find_cluster_dir(cluster_dir)
338 338 except ClusterDirError:
339 339 pass
340 340 else:
341 341 self.log.info('Using existing cluster dir: %s' % \
342 342 self.cluster_dir_obj.location
343 343 )
344 344 self.finish_cluster_dir()
345 345 return
346 346
347 347 try:
348 348 self.profile = self.command_line_config.Global.profile
349 349 except AttributeError:
350 350 self.profile = self.default_config.Global.profile
351 351 try:
352 352 self.cluster_dir_obj = ClusterDir.find_cluster_dir_by_profile(
353 353 self.ipython_dir, self.profile)
354 354 except ClusterDirError:
355 355 pass
356 356 else:
357 357 self.log.info('Using existing cluster dir: %s' % \
358 358 self.cluster_dir_obj.location
359 359 )
360 360 self.finish_cluster_dir()
361 361 return
362 362
363 363 if self.auto_create_cluster_dir:
364 364 self.cluster_dir_obj = ClusterDir.create_cluster_dir_by_profile(
365 365 self.ipython_dir, self.profile
366 366 )
367 367 self.log.info('Creating new cluster dir: %s' % \
368 368 self.cluster_dir_obj.location
369 369 )
370 370 self.finish_cluster_dir()
371 371 else:
372 372 raise ClusterDirError('Could not find a valid cluster directory.')
373 373
374 374 def finish_cluster_dir(self):
375 375 # Set the cluster directory
376 376 self.cluster_dir = self.cluster_dir_obj.location
377 377
378 378 # These have to be set because they could be different from the one
379 379 # that we just computed. Because command line has the highest
380 380 # priority, this will always end up in the master_config.
381 381 self.default_config.Global.cluster_dir = self.cluster_dir
382 382 self.command_line_config.Global.cluster_dir = self.cluster_dir
383 383
384 384 # Set the search path to the cluster directory
385 385 self.config_file_paths = (self.cluster_dir,)
386 386
387 387 def find_config_file_name(self):
388 388 """Find the config file name for this application."""
389 389 # For this type of Application it should be set as a class attribute.
390 390 if not hasattr(self, 'config_file_name'):
391 391 self.log.critical("No config filename found")
392 392
393 393 def find_config_file_paths(self):
394 394 # Set the search path to the cluster directory
395 395 self.config_file_paths = (self.cluster_dir,)
396 396
397 397 def pre_construct(self):
398 398 # The log and security dirs were set earlier, but here we put them
399 399 # into the config and log them.
400 400 config = self.master_config
401 401 sdir = self.cluster_dir_obj.security_dir
402 402 self.security_dir = config.Global.security_dir = sdir
403 403 ldir = self.cluster_dir_obj.log_dir
404 404 self.log_dir = config.Global.log_dir = ldir
405 405 pdir = self.cluster_dir_obj.pid_dir
406 406 self.pid_dir = config.Global.pid_dir = pdir
407 407 self.log.info("Cluster directory set to: %s" % self.cluster_dir)
408 config.Global.working_dir = unicode(genutils.expand_path(config.Global.working_dir))
408 config.Global.work_dir = unicode(genutils.expand_path(config.Global.work_dir))
409 409 # Change to the working directory. We do this just before construct
410 410 # is called so all the components there have the right working dir.
411 self.to_working_dir()
411 self.to_work_dir()
412 412
413 def to_working_dir(self):
414 wd = self.master_config.Global.working_dir
413 def to_work_dir(self):
414 wd = self.master_config.Global.work_dir
415 415 if unicode(wd) != unicode(os.getcwd()):
416 416 os.chdir(wd)
417 417 self.log.info("Changing to working dir: %s" % wd)
418 418
419 419 def start_logging(self):
420 420 # Remove old log files
421 421 if self.master_config.Global.clean_logs:
422 422 log_dir = self.master_config.Global.log_dir
423 423 for f in os.listdir(log_dir):
424 424 if f.startswith(self.name + u'-') and f.endswith('.log'):
425 425 os.remove(os.path.join(log_dir, f))
426 426 # Start logging to the new log file
427 427 if self.master_config.Global.log_to_file:
428 428 log_filename = self.name + u'-' + str(os.getpid()) + u'.log'
429 429 logfile = os.path.join(self.log_dir, log_filename)
430 430 open_log_file = open(logfile, 'w')
431 431 else:
432 432 open_log_file = sys.stdout
433 433 log.startLogging(open_log_file)
434 434
435 435 def write_pid_file(self, overwrite=False):
436 436 """Create a .pid file in the pid_dir with my pid.
437 437
438 438 This must be called after pre_construct, which sets `self.pid_dir`.
439 439 This raises :exc:`PIDFileError` if the pid file exists already.
440 440 """
441 441 pid_file = os.path.join(self.pid_dir, self.name + u'.pid')
442 442 if os.path.isfile(pid_file):
443 443 pid = self.get_pid_from_file()
444 444 if not overwrite:
445 445 raise PIDFileError(
446 446 'The pid file [%s] already exists. \nThis could mean that this '
447 447 'server is already running with [pid=%s].' % (pid_file, pid)
448 448 )
449 449 with open(pid_file, 'w') as f:
450 450 self.log.info("Creating pid file: %s" % pid_file)
451 451 f.write(repr(os.getpid())+'\n')
452 452
453 453 def remove_pid_file(self):
454 454 """Remove the pid file.
455 455
456 456 This should be called at shutdown by registering a callback with
457 457 :func:`reactor.addSystemEventTrigger`. This needs to return
458 458 ``None``.
459 459 """
460 460 pid_file = os.path.join(self.pid_dir, self.name + u'.pid')
461 461 if os.path.isfile(pid_file):
462 462 try:
463 463 self.log.info("Removing pid file: %s" % pid_file)
464 464 os.remove(pid_file)
465 465 except:
466 466 self.log.warn("Error removing the pid file: %s" % pid_file)
467 467
468 468 def get_pid_from_file(self):
469 469 """Get the pid from the pid file.
470 470
471 471 If the pid file doesn't exist a :exc:`PIDFileError` is raised.
472 472 """
473 473 pid_file = os.path.join(self.pid_dir, self.name + u'.pid')
474 474 if os.path.isfile(pid_file):
475 475 with open(pid_file, 'r') as f:
476 476 pid = int(f.read().strip())
477 477 return pid
478 478 else:
479 479 raise PIDFileError('pid file not found: %s' % pid_file)
480 480
481 481
@@ -1,454 +1,457
1 1 #!/usr/bin/env python
2 2 # encoding: utf-8
3 3 """
4 4 The ipcluster application.
5 5 """
6 6
7 7 #-----------------------------------------------------------------------------
8 8 # Copyright (C) 2008-2009 The IPython Development Team
9 9 #
10 10 # Distributed under the terms of the BSD License. The full license is in
11 11 # the file COPYING, distributed as part of this software.
12 12 #-----------------------------------------------------------------------------
13 13
14 14 #-----------------------------------------------------------------------------
15 15 # Imports
16 16 #-----------------------------------------------------------------------------
17 17
18 18 import logging
19 19 import os
20 20 import signal
21 21 import sys
22 22
23 23 if os.name=='posix':
24 24 from twisted.scripts._twistd_unix import daemonize
25 25
26 26 from IPython.core import release
27 27 from IPython.external import argparse
28 28 from IPython.config.loader import ArgParseConfigLoader, NoConfigDefault
29 29 from IPython.utils.importstring import import_item
30 30
31 31 from IPython.kernel.clusterdir import (
32 32 ApplicationWithClusterDir, ClusterDirError, PIDFileError
33 33 )
34 34
35 35 from twisted.internet import reactor, defer
36 36 from twisted.python import log, failure
37 37
38 38
39 39 #-----------------------------------------------------------------------------
40 40 # The ipcluster application
41 41 #-----------------------------------------------------------------------------
42 42
43 43
44 44 # Exit codes for ipcluster
45 45
46 46 # This will be the exit code if the ipcluster appears to be running because
47 47 # a .pid file exists
48 48 ALREADY_STARTED = 10
49 49
50 50 # This will be the exit code if ipcluster stop is run, but there is not .pid
51 51 # file to be found.
52 52 ALREADY_STOPPED = 11
53 53
54 54
55 55 class IPClusterCLLoader(ArgParseConfigLoader):
56 56
57 57 def _add_arguments(self):
58 58 # This has all the common options that all subcommands use
59 59 parent_parser1 = argparse.ArgumentParser(add_help=False)
60 60 parent_parser1.add_argument('--ipython-dir',
61 61 dest='Global.ipython_dir',type=unicode,
62 62 help='Set to override default location of Global.ipython_dir.',
63 63 default=NoConfigDefault,
64 64 metavar='Global.ipython_dir')
65 65 parent_parser1.add_argument('--log-level',
66 66 dest="Global.log_level",type=int,
67 67 help='Set the log level (0,10,20,30,40,50). Default is 30.',
68 68 default=NoConfigDefault,
69 69 metavar='Global.log_level')
70 70
71 71 # This has all the common options that other subcommands use
72 72 parent_parser2 = argparse.ArgumentParser(add_help=False)
73 73 parent_parser2.add_argument('-p','--profile',
74 74 dest='Global.profile',type=unicode,
75 75 default=NoConfigDefault,
76 76 help='The string name of the profile to be used. This determines '
77 77 'the name of the cluster dir as: cluster_<profile>. The default profile '
78 78 'is named "default". The cluster directory is resolve this way '
79 79 'if the --cluster-dir option is not used.',
80 80 default=NoConfigDefault,
81 81 metavar='Global.profile')
82 82 parent_parser2.add_argument('--cluster-dir',
83 83 dest='Global.cluster_dir',type=unicode,
84 84 default=NoConfigDefault,
85 85 help='Set the cluster dir. This overrides the logic used by the '
86 86 '--profile option.',
87 87 default=NoConfigDefault,
88 88 metavar='Global.cluster_dir'),
89 parent_parser2.add_argument('--working-dir',
90 dest='Global.working_dir',type=unicode,
89 parent_parser2.add_argument('--work-dir',
90 dest='Global.work_dir',type=unicode,
91 91 help='Set the working dir for the process.',
92 92 default=NoConfigDefault,
93 metavar='Global.working_dir')
93 metavar='Global.work_dir')
94 94 parent_parser2.add_argument('--log-to-file',
95 95 action='store_true', dest='Global.log_to_file',
96 96 default=NoConfigDefault,
97 97 help='Log to a file in the log directory (default is stdout)'
98 98 )
99 99
100 100 subparsers = self.parser.add_subparsers(
101 101 dest='Global.subcommand',
102 102 title='ipcluster subcommands',
103 103 description='ipcluster has a variety of subcommands. '
104 104 'The general way of running ipcluster is "ipcluster <cmd> '
105 105 ' [options]""',
106 106 help='For more help, type "ipcluster <cmd> -h"')
107 107
108 108 parser_list = subparsers.add_parser(
109 109 'list',
110 110 help='List all clusters in cwd and ipython_dir.',
111 111 parents=[parent_parser1]
112 112 )
113 113
114 114 parser_create = subparsers.add_parser(
115 115 'create',
116 116 help='Create a new cluster directory.',
117 117 parents=[parent_parser1, parent_parser2]
118 118 )
119 119 parser_create.add_argument(
120 120 '--reset-config',
121 121 dest='Global.reset_config', action='store_true',
122 122 default=NoConfigDefault,
123 123 help='Recopy the default config files to the cluster directory. '
124 124 'You will loose any modifications you have made to these files.'
125 125 )
126 126
127 127 parser_start = subparsers.add_parser(
128 128 'start',
129 129 help='Start a cluster.',
130 130 parents=[parent_parser1, parent_parser2]
131 131 )
132 132 parser_start.add_argument(
133 133 '-n', '--number',
134 134 type=int, dest='Global.n',
135 135 default=NoConfigDefault,
136 136 help='The number of engines to start.',
137 137 metavar='Global.n'
138 138 )
139 139 parser_start.add_argument('--clean-logs',
140 140 dest='Global.clean_logs', action='store_true',
141 141 help='Delete old log flies before starting.',
142 142 default=NoConfigDefault
143 143 )
144 144 parser_start.add_argument('--no-clean-logs',
145 145 dest='Global.clean_logs', action='store_false',
146 146 help="Don't delete old log flies before starting.",
147 147 default=NoConfigDefault
148 148 )
149 149 parser_start.add_argument('--daemon',
150 150 dest='Global.daemonize', action='store_true',
151 151 help='Daemonize the ipcluster program. This implies --log-to-file',
152 152 default=NoConfigDefault
153 153 )
154 154 parser_start.add_argument('--no-daemon',
155 155 dest='Global.daemonize', action='store_false',
156 156 help="Dont't daemonize the ipcluster program.",
157 157 default=NoConfigDefault
158 158 )
159 159
160 160 parser_start = subparsers.add_parser(
161 161 'stop',
162 162 help='Stop a cluster.',
163 163 parents=[parent_parser1, parent_parser2]
164 164 )
165 165 parser_start.add_argument('--signal',
166 166 dest='Global.signal', type=int,
167 167 help="The signal number to use in stopping the cluster (default=2).",
168 168 metavar="Global.signal",
169 169 default=NoConfigDefault
170 170 )
171 171
172 172
173 173 default_config_file_name = u'ipcluster_config.py'
174 174
175 175
176 176 class IPClusterApp(ApplicationWithClusterDir):
177 177
178 178 name = u'ipcluster'
179 179 description = 'Start an IPython cluster (controller and engines).'
180 180 config_file_name = default_config_file_name
181 181 default_log_level = logging.INFO
182 182 auto_create_cluster_dir = False
183 183
184 184 def create_default_config(self):
185 185 super(IPClusterApp, self).create_default_config()
186 186 self.default_config.Global.controller_launcher = \
187 187 'IPython.kernel.launcher.LocalControllerLauncher'
188 188 self.default_config.Global.engine_launcher = \
189 189 'IPython.kernel.launcher.LocalEngineSetLauncher'
190 190 self.default_config.Global.n = 2
191 191 self.default_config.Global.reset_config = False
192 192 self.default_config.Global.clean_logs = True
193 193 self.default_config.Global.signal = 2
194 194 self.default_config.Global.daemonize = False
195 195
196 196 def create_command_line_config(self):
197 197 """Create and return a command line config loader."""
198 198 return IPClusterCLLoader(
199 199 description=self.description,
200 200 version=release.version
201 201 )
202 202
203 203 def find_resources(self):
204 204 subcommand = self.command_line_config.Global.subcommand
205 205 if subcommand=='list':
206 206 self.list_cluster_dirs()
207 207 # Exit immediately because there is nothing left to do.
208 208 self.exit()
209 209 elif subcommand=='create':
210 210 self.auto_create_cluster_dir = True
211 211 super(IPClusterApp, self).find_resources()
212 212 elif subcommand=='start' or subcommand=='stop':
213 213 self.auto_create_cluster_dir = False
214 214 try:
215 215 super(IPClusterApp, self).find_resources()
216 216 except ClusterDirError:
217 217 raise ClusterDirError(
218 218 "Could not find a cluster directory. A cluster dir must "
219 219 "be created before running 'ipcluster start'. Do "
220 220 "'ipcluster create -h' or 'ipcluster list -h' for more "
221 221 "information about creating and listing cluster dirs."
222 222 )
223 223
224 224 def list_cluster_dirs(self):
225 225 # Find the search paths
226 226 cluster_dir_paths = os.environ.get('IPCLUSTER_DIR_PATH','')
227 227 if cluster_dir_paths:
228 228 cluster_dir_paths = cluster_dir_paths.split(':')
229 229 else:
230 230 cluster_dir_paths = []
231 231 try:
232 232 ipython_dir = self.command_line_config.Global.ipython_dir
233 233 except AttributeError:
234 234 ipython_dir = self.default_config.Global.ipython_dir
235 235 paths = [os.getcwd(), ipython_dir] + \
236 236 cluster_dir_paths
237 237 paths = list(set(paths))
238 238
239 239 self.log.info('Searching for cluster dirs in paths: %r' % paths)
240 240 for path in paths:
241 241 files = os.listdir(path)
242 242 for f in files:
243 243 full_path = os.path.join(path, f)
244 244 if os.path.isdir(full_path) and f.startswith('cluster_'):
245 245 profile = full_path.split('_')[-1]
246 246 start_cmd = '"ipcluster start -n 4 -p %s"' % profile
247 247 print start_cmd + " ==> " + full_path
248 248
249 249 def pre_construct(self):
250 # This is where we cd to the working directory.
250 # IPClusterApp.pre_construct() is where we cd to the working directory.
251 251 super(IPClusterApp, self).pre_construct()
252 252 config = self.master_config
253 253 try:
254 254 daemon = config.Global.daemonize
255 255 if daemon:
256 256 config.Global.log_to_file = True
257 257 except AttributeError:
258 258 pass
259 259
260 260 def construct(self):
261 261 config = self.master_config
262 262 if config.Global.subcommand=='list':
263 263 pass
264 264 elif config.Global.subcommand=='create':
265 265 self.log.info('Copying default config files to cluster directory '
266 266 '[overwrite=%r]' % (config.Global.reset_config,))
267 267 self.cluster_dir_obj.copy_all_config_files(overwrite=config.Global.reset_config)
268 268 elif config.Global.subcommand=='start':
269 269 self.start_logging()
270 270 reactor.callWhenRunning(self.start_launchers)
271 271
272 272 def start_launchers(self):
273 273 config = self.master_config
274 274
275 # Create the launchers
275 # Create the launchers. In both bases, we set the work_dir of
276 # the launcher to the cluster_dir. This is where the launcher's
277 # subprocesses will be launched. It is not where the controller
278 # and engine will be launched.
276 279 el_class = import_item(config.Global.engine_launcher)
277 280 self.engine_launcher = el_class(
278 self.cluster_dir, config=config
281 work_dir=self.cluster_dir, config=config
279 282 )
280 283 cl_class = import_item(config.Global.controller_launcher)
281 284 self.controller_launcher = cl_class(
282 self.cluster_dir, config=config
285 work_dir=self.cluster_dir, config=config
283 286 )
284 287
285 288 # Setup signals
286 289 signal.signal(signal.SIGINT, self.sigint_handler)
287 290
288 291 # Setup the observing of stopping. If the controller dies, shut
289 292 # everything down as that will be completely fatal for the engines.
290 293 d1 = self.controller_launcher.observe_stop()
291 294 d1.addCallback(self.stop_launchers)
292 295 # But, we don't monitor the stopping of engines. An engine dying
293 296 # is just fine and in principle a user could start a new engine.
294 297 # Also, if we did monitor engine stopping, it is difficult to
295 298 # know what to do when only some engines die. Currently, the
296 299 # observing of engine stopping is inconsistent. Some launchers
297 300 # might trigger on a single engine stopping, other wait until
298 301 # all stop. TODO: think more about how to handle this.
299 302
300 303 # Start the controller and engines
301 304 self._stopping = False # Make sure stop_launchers is not called 2x.
302 305 d = self.start_controller()
303 306 d.addCallback(self.start_engines)
304 307 d.addCallback(self.startup_message)
305 308 # If the controller or engines fail to start, stop everything
306 309 d.addErrback(self.stop_launchers)
307 310 return d
308 311
309 312 def startup_message(self, r=None):
310 313 log.msg("IPython cluster: started")
311 314 return r
312 315
313 316 def start_controller(self, r=None):
314 317 # log.msg("In start_controller")
315 318 config = self.master_config
316 319 d = self.controller_launcher.start(
317 320 cluster_dir=config.Global.cluster_dir
318 321 )
319 322 return d
320 323
321 324 def start_engines(self, r=None):
322 325 # log.msg("In start_engines")
323 326 config = self.master_config
324 327 d = self.engine_launcher.start(
325 328 config.Global.n,
326 329 cluster_dir=config.Global.cluster_dir
327 330 )
328 331 return d
329 332
330 333 def stop_controller(self, r=None):
331 334 # log.msg("In stop_controller")
332 335 if self.controller_launcher.running:
333 336 d = self.controller_launcher.stop()
334 337 d.addErrback(self.log_err)
335 338 return d
336 339 else:
337 340 return defer.succeed(None)
338 341
339 342 def stop_engines(self, r=None):
340 343 # log.msg("In stop_engines")
341 344 if self.engine_launcher.running:
342 345 d = self.engine_launcher.stop()
343 346 d.addErrback(self.log_err)
344 347 return d
345 348 else:
346 349 return defer.succeed(None)
347 350
348 351 def log_err(self, f):
349 352 log.msg(f.getTraceback())
350 353 return None
351 354
352 355 def stop_launchers(self, r=None):
353 356 if not self._stopping:
354 357 self._stopping = True
355 358 if isinstance(r, failure.Failure):
356 359 log.msg('Unexpected error in ipcluster:')
357 360 log.msg(r.getTraceback())
358 361 log.msg("IPython cluster: stopping")
359 362 d= self.stop_engines()
360 363 d2 = self.stop_controller()
361 364 # Wait a few seconds to let things shut down.
362 365 reactor.callLater(3.0, reactor.stop)
363 366
364 367 def sigint_handler(self, signum, frame):
365 368 self.stop_launchers()
366 369
367 370 def start_logging(self):
368 371 # Remove old log files of the controller and engine
369 372 if self.master_config.Global.clean_logs:
370 373 log_dir = self.master_config.Global.log_dir
371 374 for f in os.listdir(log_dir):
372 375 if f.startswith('ipengine' + '-'):
373 376 if f.endswith('.log') or f.endswith('.out') or f.endswith('.err'):
374 377 os.remove(os.path.join(log_dir, f))
375 378 if f.startswith('ipcontroller' + '-'):
376 379 if f.endswith('.log') or f.endswith('.out') or f.endswith('.err'):
377 380 os.remove(os.path.join(log_dir, f))
378 381 # This will remote old log files for ipcluster itself
379 382 super(IPClusterApp, self).start_logging()
380 383
381 384 def start_app(self):
382 385 """Start the application, depending on what subcommand is used."""
383 386 subcmd = self.master_config.Global.subcommand
384 387 if subcmd=='create' or subcmd=='list':
385 388 return
386 389 elif subcmd=='start':
387 390 self.start_app_start()
388 391 elif subcmd=='stop':
389 392 self.start_app_stop()
390 393
391 394 def start_app_start(self):
392 395 """Start the app for the start subcommand."""
393 396 config = self.master_config
394 397 # First see if the cluster is already running
395 398 try:
396 399 pid = self.get_pid_from_file()
397 400 except PIDFileError:
398 401 pass
399 402 else:
400 403 self.log.critical(
401 404 'Cluster is already running with [pid=%s]. '
402 405 'use "ipcluster stop" to stop the cluster.' % pid
403 406 )
404 407 # Here I exit with a unusual exit status that other processes
405 408 # can watch for to learn how I existed.
406 409 self.exit(ALREADY_STARTED)
407 410
408 411 # Now log and daemonize
409 412 self.log.info(
410 413 'Starting ipcluster with [daemon=%r]' % config.Global.daemonize
411 414 )
412 415 if config.Global.daemonize:
413 416 if os.name=='posix':
414 417 daemonize()
415 418
416 419 # Now write the new pid file AFTER our new forked pid is active.
417 420 self.write_pid_file()
418 421 reactor.addSystemEventTrigger('during','shutdown', self.remove_pid_file)
419 422 reactor.run()
420 423
421 424 def start_app_stop(self):
422 425 """Start the app for the stop subcommand."""
423 426 config = self.master_config
424 427 try:
425 428 pid = self.get_pid_from_file()
426 429 except PIDFileError:
427 430 self.log.critical(
428 431 'Problem reading pid file, cluster is probably not running.'
429 432 )
430 433 # Here I exit with a unusual exit status that other processes
431 434 # can watch for to learn how I existed.
432 435 self.exit(ALREADY_STOPPED)
433 436 else:
434 437 if os.name=='posix':
435 438 sig = config.Global.signal
436 439 self.log.info(
437 440 "Stopping cluster [pid=%r] with [signal=%r]" % (pid, sig)
438 441 )
439 442 os.kill(pid, sig)
440 443 elif os.name=='nt':
441 444 # As of right now, we don't support daemonize on Windows, so
442 445 # stop will not do anything. Minimally, it should clean up the
443 446 # old .pid files.
444 447 self.remove_pid_file()
445 448
446 449 def launch_new_instance():
447 450 """Create and run the IPython cluster."""
448 451 app = IPClusterApp()
449 452 app.start()
450 453
451 454
452 455 if __name__ == '__main__':
453 456 launch_new_instance()
454 457
@@ -1,867 +1,867
1 1 #!/usr/bin/env python
2 2 # encoding: utf-8
3 3 """
4 4 Facilities for launching IPython processes asynchronously.
5 5 """
6 6
7 7 #-----------------------------------------------------------------------------
8 8 # Copyright (C) 2008-2009 The IPython Development Team
9 9 #
10 10 # Distributed under the terms of the BSD License. The full license is in
11 11 # the file COPYING, distributed as part of this software.
12 12 #-----------------------------------------------------------------------------
13 13
14 14 #-----------------------------------------------------------------------------
15 15 # Imports
16 16 #-----------------------------------------------------------------------------
17 17
18 18 import os
19 19 import re
20 20 import sys
21 21
22 22 from IPython.core.component import Component
23 23 from IPython.external import Itpl
24 24 from IPython.utils.traitlets import Str, Int, List, Unicode, Enum
25 25 from IPython.utils.platutils import find_cmd
26 26 from IPython.kernel.twistedutil import gatherBoth, make_deferred, sleep_deferred
27 27 from IPython.kernel.winhpcjob import (
28 28 WinHPCJob, WinHPCTask,
29 29 IPControllerTask, IPEngineTask,
30 30 IPControllerJob, IPEngineSetJob
31 31 )
32 32
33 33 from twisted.internet import reactor, defer
34 34 from twisted.internet.defer import inlineCallbacks
35 35 from twisted.internet.protocol import ProcessProtocol
36 36 from twisted.internet.utils import getProcessOutput
37 37 from twisted.internet.error import ProcessDone, ProcessTerminated
38 38 from twisted.python import log
39 39 from twisted.python.failure import Failure
40 40
41 41 #-----------------------------------------------------------------------------
42 42 # Utilities
43 43 #-----------------------------------------------------------------------------
44 44
45 45
46 46 def find_controller_cmd():
47 47 """Find the command line ipcontroller program in a cross platform way."""
48 48 if sys.platform == 'win32':
49 49 # This logic is needed because the ipcontroller script doesn't
50 50 # always get installed in the same way or in the same location.
51 51 from IPython.kernel import ipcontrollerapp
52 52 script_location = ipcontrollerapp.__file__.replace('.pyc', '.py')
53 53 # The -u option here turns on unbuffered output, which is required
54 54 # on Win32 to prevent wierd conflict and problems with Twisted.
55 55 # Also, use sys.executable to make sure we are picking up the
56 56 # right python exe.
57 57 cmd = [sys.executable, '-u', script_location]
58 58 else:
59 59 # ipcontroller has to be on the PATH in this case.
60 60 cmd = ['ipcontroller']
61 61 return cmd
62 62
63 63
64 64 def find_engine_cmd():
65 65 """Find the command line ipengine program in a cross platform way."""
66 66 if sys.platform == 'win32':
67 67 # This logic is needed because the ipengine script doesn't
68 68 # always get installed in the same way or in the same location.
69 69 from IPython.kernel import ipengineapp
70 70 script_location = ipengineapp.__file__.replace('.pyc', '.py')
71 71 # The -u option here turns on unbuffered output, which is required
72 72 # on Win32 to prevent wierd conflict and problems with Twisted.
73 73 # Also, use sys.executable to make sure we are picking up the
74 74 # right python exe.
75 75 cmd = [sys.executable, '-u', script_location]
76 76 else:
77 77 # ipcontroller has to be on the PATH in this case.
78 78 cmd = ['ipengine']
79 79 return cmd
80 80
81 81
82 82 #-----------------------------------------------------------------------------
83 83 # Base launchers and errors
84 84 #-----------------------------------------------------------------------------
85 85
86 86
87 87 class LauncherError(Exception):
88 88 pass
89 89
90 90
91 91 class ProcessStateError(LauncherError):
92 92 pass
93 93
94 94
95 95 class UnknownStatus(LauncherError):
96 96 pass
97 97
98 98
99 99 class BaseLauncher(Component):
100 100 """An asbtraction for starting, stopping and signaling a process."""
101 101
102 working_dir = Unicode(u'')
103
104 def __init__(self, working_dir, parent=None, name=None, config=None):
102 # In all of the launchers, the work_dir is where child processes will be
103 # run. This will usually be the cluster_dir, but may not be. any work_dir
104 # passed into the __init__ method will override the config value.
105 # This should not be used to set the work_dir for the actual engine
106 # and controller. Instead, use their own config files or the
107 # controller_args, engine_args attributes of the launchers to add
108 # the --work-dir option.
109 work_dir = Unicode(u'')
110
111 def __init__(self, work_dir, parent=None, name=None, config=None):
105 112 super(BaseLauncher, self).__init__(parent, name, config)
106 self.working_dir = working_dir
113 self.work_dir = work_dir
107 114 self.state = 'before' # can be before, running, after
108 115 self.stop_deferreds = []
109 116 self.start_data = None
110 117 self.stop_data = None
111 118
112 119 @property
113 120 def args(self):
114 121 """A list of cmd and args that will be used to start the process.
115 122
116 123 This is what is passed to :func:`spawnProcess` and the first element
117 124 will be the process name.
118 125 """
119 126 return self.find_args()
120 127
121 128 def find_args(self):
122 129 """The ``.args`` property calls this to find the args list.
123 130
124 131 Subcommand should implement this to construct the cmd and args.
125 132 """
126 133 raise NotImplementedError('find_args must be implemented in a subclass')
127 134
128 135 @property
129 136 def arg_str(self):
130 137 """The string form of the program arguments."""
131 138 return ' '.join(self.args)
132 139
133 140 @property
134 141 def running(self):
135 142 """Am I running."""
136 143 if self.state == 'running':
137 144 return True
138 145 else:
139 146 return False
140 147
141 148 def start(self):
142 149 """Start the process.
143 150
144 151 This must return a deferred that fires with information about the
145 152 process starting (like a pid, job id, etc.).
146 153 """
147 154 return defer.fail(
148 155 Failure(NotImplementedError(
149 156 'start must be implemented in a subclass')
150 157 )
151 158 )
152 159
153 160 def stop(self):
154 161 """Stop the process and notify observers of stopping.
155 162
156 163 This must return a deferred that fires with information about the
157 164 processing stopping, like errors that occur while the process is
158 165 attempting to be shut down. This deferred won't fire when the process
159 166 actually stops. To observe the actual process stopping, see
160 167 :func:`observe_stop`.
161 168 """
162 169 return defer.fail(
163 170 Failure(NotImplementedError(
164 171 'stop must be implemented in a subclass')
165 172 )
166 173 )
167 174
168 175 def observe_stop(self):
169 176 """Get a deferred that will fire when the process stops.
170 177
171 178 The deferred will fire with data that contains information about
172 179 the exit status of the process.
173 180 """
174 181 if self.state=='after':
175 182 return defer.succeed(self.stop_data)
176 183 else:
177 184 d = defer.Deferred()
178 185 self.stop_deferreds.append(d)
179 186 return d
180 187
181 188 def notify_start(self, data):
182 189 """Call this to trigger startup actions.
183 190
184 191 This logs the process startup and sets the state to 'running'. It is
185 192 a pass-through so it can be used as a callback.
186 193 """
187 194
188 195 log.msg('Process %r started: %r' % (self.args[0], data))
189 196 self.start_data = data
190 197 self.state = 'running'
191 198 return data
192 199
193 200 def notify_stop(self, data):
194 201 """Call this to trigger process stop actions.
195 202
196 203 This logs the process stopping and sets the state to 'after'. Call
197 204 this to trigger all the deferreds from :func:`observe_stop`."""
198 205
199 206 log.msg('Process %r stopped: %r' % (self.args[0], data))
200 207 self.stop_data = data
201 208 self.state = 'after'
202 209 for i in range(len(self.stop_deferreds)):
203 210 d = self.stop_deferreds.pop()
204 211 d.callback(data)
205 212 return data
206 213
207 214 def signal(self, sig):
208 215 """Signal the process.
209 216
210 217 Return a semi-meaningless deferred after signaling the process.
211 218
212 219 Parameters
213 220 ----------
214 221 sig : str or int
215 222 'KILL', 'INT', etc., or any signal number
216 223 """
217 224 return defer.fail(
218 225 Failure(NotImplementedError(
219 226 'signal must be implemented in a subclass')
220 227 )
221 228 )
222 229
223 230
224 231 #-----------------------------------------------------------------------------
225 232 # Local process launchers
226 233 #-----------------------------------------------------------------------------
227 234
228 235
229 236 class LocalProcessLauncherProtocol(ProcessProtocol):
230 237 """A ProcessProtocol to go with the LocalProcessLauncher."""
231 238
232 239 def __init__(self, process_launcher):
233 240 self.process_launcher = process_launcher
234 241 self.pid = None
235 242
236 243 def connectionMade(self):
237 244 self.pid = self.transport.pid
238 245 self.process_launcher.notify_start(self.transport.pid)
239 246
240 247 def processEnded(self, status):
241 248 value = status.value
242 249 if isinstance(value, ProcessDone):
243 250 self.process_launcher.notify_stop(
244 251 {'exit_code':0,
245 252 'signal':None,
246 253 'status':None,
247 254 'pid':self.pid
248 255 }
249 256 )
250 257 elif isinstance(value, ProcessTerminated):
251 258 self.process_launcher.notify_stop(
252 259 {'exit_code':value.exitCode,
253 260 'signal':value.signal,
254 261 'status':value.status,
255 262 'pid':self.pid
256 263 }
257 264 )
258 265 else:
259 266 raise UnknownStatus("Unknown exit status, this is probably a "
260 267 "bug in Twisted")
261 268
262 269 def outReceived(self, data):
263 270 log.msg(data)
264 271
265 272 def errReceived(self, data):
266 273 log.err(data)
267 274
268 275
269 276 class LocalProcessLauncher(BaseLauncher):
270 277 """Start and stop an external process in an asynchronous manner.
271 278
272 279 This will launch the external process with a working directory of
273 ``self.working_dir``.
280 ``self.work_dir``.
274 281 """
275 282
276 283 # This is used to to construct self.args, which is passed to
277 284 # spawnProcess.
278 285 cmd_and_args = List([])
279 286
280 def __init__(self, working_dir, parent=None, name=None, config=None):
287 def __init__(self, work_dir, parent=None, name=None, config=None):
281 288 super(LocalProcessLauncher, self).__init__(
282 working_dir, parent, name, config
289 work_dir, parent, name, config
283 290 )
284 291 self.process_protocol = None
285 292 self.start_deferred = None
286 293
287 294 def find_args(self):
288 295 return self.cmd_and_args
289 296
290 297 def start(self):
291 298 if self.state == 'before':
292 299 self.process_protocol = LocalProcessLauncherProtocol(self)
293 300 self.start_deferred = defer.Deferred()
294 301 self.process_transport = reactor.spawnProcess(
295 302 self.process_protocol,
296 303 str(self.args[0]), # twisted expects these to be str, not unicode
297 304 [str(a) for a in self.args], # str expected, not unicode
298 305 env=os.environ,
299 path=self.working_dir # start in the working_dir
306 path=self.work_dir # start in the work_dir
300 307 )
301 308 return self.start_deferred
302 309 else:
303 310 s = 'The process was already started and has state: %r' % self.state
304 311 return defer.fail(ProcessStateError(s))
305 312
306 313 def notify_start(self, data):
307 314 super(LocalProcessLauncher, self).notify_start(data)
308 315 self.start_deferred.callback(data)
309 316
310 317 def stop(self):
311 318 return self.interrupt_then_kill()
312 319
313 320 @make_deferred
314 321 def signal(self, sig):
315 322 if self.state == 'running':
316 323 self.process_transport.signalProcess(sig)
317 324
318 325 @inlineCallbacks
319 326 def interrupt_then_kill(self, delay=2.0):
320 327 """Send INT, wait a delay and then send KILL."""
321 328 yield self.signal('INT')
322 329 yield sleep_deferred(delay)
323 330 yield self.signal('KILL')
324 331
325 332
326 333 class LocalControllerLauncher(LocalProcessLauncher):
327 334 """Launch a controller as a regular external process."""
328 335
329 336 controller_cmd = List(find_controller_cmd(), config=True)
330 337 # Command line arguments to ipcontroller.
331 338 controller_args = List(['--log-to-file','--log-level', '40'], config=True)
332 339
333 340 def find_args(self):
334 return self.controller_cmd + self.controller_args + \
335 ['--working-dir', self.working_dir]
341 return self.controller_cmd + self.controller_args
336 342
337 343 def start(self, cluster_dir):
338 344 """Start the controller by cluster_dir."""
339 345 self.controller_args.extend(['--cluster-dir', cluster_dir])
340 346 self.cluster_dir = unicode(cluster_dir)
341 347 log.msg("Starting LocalControllerLauncher: %r" % self.args)
342 348 return super(LocalControllerLauncher, self).start()
343 349
344 350
345 351 class LocalEngineLauncher(LocalProcessLauncher):
346 352 """Launch a single engine as a regular externall process."""
347 353
348 354 engine_cmd = List(find_engine_cmd(), config=True)
349 355 # Command line arguments for ipengine.
350 356 engine_args = List(
351 357 ['--log-to-file','--log-level', '40'], config=True
352 358 )
353 359
354 360 def find_args(self):
355 return self.engine_cmd + self.engine_args + \
356 ['--working-dir', self.working_dir]
361 return self.engine_cmd + self.engine_args
357 362
358 363 def start(self, cluster_dir):
359 364 """Start the engine by cluster_dir."""
360 365 self.engine_args.extend(['--cluster-dir', cluster_dir])
361 366 self.cluster_dir = unicode(cluster_dir)
362 367 return super(LocalEngineLauncher, self).start()
363 368
364 369
365 370 class LocalEngineSetLauncher(BaseLauncher):
366 371 """Launch a set of engines as regular external processes."""
367 372
368 373 # Command line arguments for ipengine.
369 374 engine_args = List(
370 375 ['--log-to-file','--log-level', '40'], config=True
371 376 )
372 377
373 def __init__(self, working_dir, parent=None, name=None, config=None):
378 def __init__(self, work_dir, parent=None, name=None, config=None):
374 379 super(LocalEngineSetLauncher, self).__init__(
375 working_dir, parent, name, config
380 work_dir, parent, name, config
376 381 )
377 382 self.launchers = []
378 383
379 384 def start(self, n, cluster_dir):
380 385 """Start n engines by profile or cluster_dir."""
381 386 self.cluster_dir = unicode(cluster_dir)
382 387 dlist = []
383 388 for i in range(n):
384 el = LocalEngineLauncher(self.working_dir, self)
389 el = LocalEngineLauncher(self.work_dir, self)
385 390 # Copy the engine args over to each engine launcher.
386 391 import copy
387 392 el.engine_args = copy.deepcopy(self.engine_args)
388 393 d = el.start(cluster_dir)
389 394 if i==0:
390 395 log.msg("Starting LocalEngineSetLauncher: %r" % el.args)
391 396 self.launchers.append(el)
392 397 dlist.append(d)
393 398 # The consumeErrors here could be dangerous
394 399 dfinal = gatherBoth(dlist, consumeErrors=True)
395 400 dfinal.addCallback(self.notify_start)
396 401 return dfinal
397 402
398 403 def find_args(self):
399 404 return ['engine set']
400 405
401 406 def signal(self, sig):
402 407 dlist = []
403 408 for el in self.launchers:
404 409 d = el.signal(sig)
405 410 dlist.append(d)
406 411 dfinal = gatherBoth(dlist, consumeErrors=True)
407 412 return dfinal
408 413
409 414 def interrupt_then_kill(self, delay=1.0):
410 415 dlist = []
411 416 for el in self.launchers:
412 417 d = el.interrupt_then_kill(delay)
413 418 dlist.append(d)
414 419 dfinal = gatherBoth(dlist, consumeErrors=True)
415 420 return dfinal
416 421
417 422 def stop(self):
418 423 return self.interrupt_then_kill()
419 424
420 425 def observe_stop(self):
421 426 dlist = [el.observe_stop() for el in self.launchers]
422 427 dfinal = gatherBoth(dlist, consumeErrors=False)
423 428 dfinal.addCallback(self.notify_stop)
424 429 return dfinal
425 430
426 431
427 432 #-----------------------------------------------------------------------------
428 433 # MPIExec launchers
429 434 #-----------------------------------------------------------------------------
430 435
431 436
432 437 class MPIExecLauncher(LocalProcessLauncher):
433 438 """Launch an external process using mpiexec."""
434 439
435 440 # The mpiexec command to use in starting the process.
436 441 mpi_cmd = List(['mpiexec'], config=True)
437 442 # The command line arguments to pass to mpiexec.
438 443 mpi_args = List([], config=True)
439 444 # The program to start using mpiexec.
440 445 program = List(['date'], config=True)
441 446 # The command line argument to the program.
442 447 program_args = List([], config=True)
443 448 # The number of instances of the program to start.
444 449 n = Int(1, config=True)
445 450
446 451 def find_args(self):
447 452 """Build self.args using all the fields."""
448 453 return self.mpi_cmd + ['-n', self.n] + self.mpi_args + \
449 454 self.program + self.program_args
450 455
451 456 def start(self, n):
452 457 """Start n instances of the program using mpiexec."""
453 458 self.n = n
454 459 return super(MPIExecLauncher, self).start()
455 460
456 461
457 462 class MPIExecControllerLauncher(MPIExecLauncher):
458 463 """Launch a controller using mpiexec."""
459 464
460 465 controller_cmd = List(find_controller_cmd(), config=True)
461 466 # Command line arguments to ipcontroller.
462 467 controller_args = List(['--log-to-file','--log-level', '40'], config=True)
463 468 n = Int(1, config=False)
464 469
465 470 def start(self, cluster_dir):
466 471 """Start the controller by cluster_dir."""
467 472 self.controller_args.extend(['--cluster-dir', cluster_dir])
468 473 self.cluster_dir = unicode(cluster_dir)
469 474 log.msg("Starting MPIExecControllerLauncher: %r" % self.args)
470 475 return super(MPIExecControllerLauncher, self).start(1)
471 476
472 477 def find_args(self):
473 478 return self.mpi_cmd + ['-n', self.n] + self.mpi_args + \
474 self.controller_cmd + self.controller_args + \
475 ['--working-dir', self.working_dir]
479 self.controller_cmd + self.controller_args
476 480
477 481
478 482 class MPIExecEngineSetLauncher(MPIExecLauncher):
479 483
480 484 engine_cmd = List(find_engine_cmd(), config=True)
481 485 # Command line arguments for ipengine.
482 486 engine_args = List(
483 487 ['--log-to-file','--log-level', '40'], config=True
484 )
488 )
485 489 n = Int(1, config=True)
486 490
487 491 def start(self, n, cluster_dir):
488 492 """Start n engines by profile or cluster_dir."""
489 493 self.engine_args.extend(['--cluster-dir', cluster_dir])
490 494 self.cluster_dir = unicode(cluster_dir)
495 self.n = n
491 496 log.msg('Starting MPIExecEngineSetLauncher: %r' % self.args)
492 497 return super(MPIExecEngineSetLauncher, self).start(n)
493 498
494 499 def find_args(self):
495 500 return self.mpi_cmd + ['-n', self.n] + self.mpi_args + \
496 self.engine_cmd + self.engine_args + \
497 ['--working-dir', self.working_dir]
501 self.engine_cmd + self.engine_args
498 502
499 503
500 504 #-----------------------------------------------------------------------------
501 505 # SSH launchers
502 506 #-----------------------------------------------------------------------------
503 507
504 508
505 509 class SSHLauncher(BaseLauncher):
506 510 """A minimal launcher for ssh.
507 511
508 512 To be useful this will probably have to be extended to use the ``sshx``
509 513 idea for environment variables. There could be other things this needs
510 514 as well.
511 515 """
512 516
513 517 ssh_cmd = List(['ssh'], config=True)
514 518 ssh_args = List([], config=True)
515 519 program = List(['date'], config=True)
516 520 program_args = List([], config=True)
517 521 hostname = Str('', config=True)
518 522 user = Str('', config=True)
519 523 location = Str('')
520 524
521 525 def _hostname_changed(self, name, old, new):
522 526 self.location = '%s@%s' % (self.user, new)
523 527
524 528 def _user_changed(self, name, old, new):
525 529 self.location = '%s@%s' % (new, self.hostname)
526 530
527 531 def find_args(self):
528 532 return self.ssh_cmd + self.ssh_args + [self.location] + \
529 533 self.program + self.program_args
530 534
531 535 def start(self, n, hostname=None, user=None):
532 536 if hostname is not None:
533 537 self.hostname = hostname
534 538 if user is not None:
535 539 self.user = user
536 540 return super(SSHLauncher, self).start()
537 541
538 542
539 543 class SSHControllerLauncher(SSHLauncher):
540 544 pass
541 545
542 546
543 547 class SSHEngineSetLauncher(BaseLauncher):
544 548 pass
545 549
546 550
547 551 #-----------------------------------------------------------------------------
548 552 # Windows HPC Server 2008 scheduler launchers
549 553 #-----------------------------------------------------------------------------
550 554
551 555
552 556 # This is only used on Windows.
553 557 def find_job_cmd():
554 558 if os.name=='nt':
555 559 return find_cmd('job')
556 560 else:
557 561 return 'job'
558 562
559 563
560 564 class WindowsHPCLauncher(BaseLauncher):
561 565
562 566 # A regular expression used to get the job id from the output of the
563 567 # submit_command.
564 568 job_id_regexp = Str('\d+', config=True)
565 569 # The filename of the instantiated job script.
566 570 job_file_name = Unicode(u'ipython_job.xml', config=True)
567 571 # The full path to the instantiated job script. This gets made dynamically
568 # by combining the working_dir with the job_file_name.
572 # by combining the work_dir with the job_file_name.
569 573 job_file = Unicode(u'')
570 574 # The hostname of the scheduler to submit the job to
571 575 scheduler = Str('', config=True)
572 576 job_cmd = Str(find_job_cmd(), config=True)
573 577
574 def __init__(self, working_dir, parent=None, name=None, config=None):
578 def __init__(self, work_dir, parent=None, name=None, config=None):
575 579 super(WindowsHPCLauncher, self).__init__(
576 working_dir, parent, name, config
580 work_dir, parent, name, config
577 581 )
578 582
579 583 @property
580 584 def job_file(self):
581 return os.path.join(self.working_dir, self.job_file_name)
585 return os.path.join(self.work_dir, self.job_file_name)
582 586
583 587 def write_job_file(self, n):
584 588 raise NotImplementedError("Implement write_job_file in a subclass.")
585 589
586 590 def find_args(self):
587 591 return ['job.exe']
588 592
589 593 def parse_job_id(self, output):
590 594 """Take the output of the submit command and return the job id."""
591 595 m = re.search(self.job_id_regexp, output)
592 596 if m is not None:
593 597 job_id = m.group()
594 598 else:
595 599 raise LauncherError("Job id couldn't be determined: %s" % output)
596 600 self.job_id = job_id
597 601 log.msg('Job started with job id: %r' % job_id)
598 602 return job_id
599 603
600 604 @inlineCallbacks
601 605 def start(self, n):
602 606 """Start n copies of the process using the Win HPC job scheduler."""
603 607 self.write_job_file(n)
604 608 args = [
605 609 'submit',
606 610 '/jobfile:%s' % self.job_file,
607 611 '/scheduler:%s' % self.scheduler
608 612 ]
609 613 log.msg("Starting Win HPC Job: %s" % (self.job_cmd + ' ' + ' '.join(args),))
610 614 # Twisted will raise DeprecationWarnings if we try to pass unicode to this
611 615 output = yield getProcessOutput(str(self.job_cmd),
612 616 [str(a) for a in args],
613 617 env=dict((str(k),str(v)) for k,v in os.environ.items()),
614 path=self.working_dir
618 path=self.work_dir
615 619 )
616 620 job_id = self.parse_job_id(output)
617 621 self.notify_start(job_id)
618 622 defer.returnValue(job_id)
619 623
620 624 @inlineCallbacks
621 625 def stop(self):
622 626 args = [
623 627 'cancel',
624 628 self.job_id,
625 629 '/scheduler:%s' % self.scheduler
626 630 ]
627 631 log.msg("Stopping Win HPC Job: %s" % (self.job_cmd + ' ' + ' '.join(args),))
628 632 try:
629 633 # Twisted will raise DeprecationWarnings if we try to pass unicode to this
630 634 output = yield getProcessOutput(str(self.job_cmd),
631 635 [str(a) for a in args],
632 636 env=dict((str(k),str(v)) for k,v in os.environ.items()),
633 path=self.working_dir
637 path=self.work_dir
634 638 )
635 639 except:
636 640 output = 'The job already appears to be stoppped: %r' % self.job_id
637 641 self.notify_stop(output) # Pass the output of the kill cmd
638 642 defer.returnValue(output)
639 643
640 644
641 645 class WindowsHPCControllerLauncher(WindowsHPCLauncher):
642 646
643 647 job_file_name = Unicode(u'ipcontroller_job.xml', config=True)
644 648 extra_args = List([], config=False)
645 649
646 650 def write_job_file(self, n):
647 651 job = IPControllerJob(self)
648 652
649 653 t = IPControllerTask(self)
650 654 # The tasks work directory is *not* the actual work directory of
651 655 # the controller. It is used as the base path for the stdout/stderr
652 656 # files that the scheduler redirects to.
653 657 t.work_directory = self.cluster_dir
654 # Add the --cluster-dir and --working-dir from self.start().
658 # Add the --cluster-dir and from self.start().
655 659 t.controller_args.extend(self.extra_args)
656 660 job.add_task(t)
657 661
658 662 log.msg("Writing job description file: %s" % self.job_file)
659 663 job.write(self.job_file)
660 664
661 665 @property
662 666 def job_file(self):
663 667 return os.path.join(self.cluster_dir, self.job_file_name)
664 668
665 669 def start(self, cluster_dir):
666 670 """Start the controller by cluster_dir."""
667 self.extra_args = [
668 '--cluster-dir', cluster_dir, '--working-dir', self.working_dir
669 ]
671 self.extra_args = ['--cluster-dir', cluster_dir]
670 672 self.cluster_dir = unicode(cluster_dir)
671 673 return super(WindowsHPCControllerLauncher, self).start(1)
672 674
673 675
674 676 class WindowsHPCEngineSetLauncher(WindowsHPCLauncher):
675 677
676 678 job_file_name = Unicode(u'ipengineset_job.xml', config=True)
677 679 extra_args = List([], config=False)
678 680
679 681 def write_job_file(self, n):
680 682 job = IPEngineSetJob(self)
681 683
682 684 for i in range(n):
683 685 t = IPEngineTask(self)
684 686 # The tasks work directory is *not* the actual work directory of
685 687 # the engine. It is used as the base path for the stdout/stderr
686 688 # files that the scheduler redirects to.
687 689 t.work_directory = self.cluster_dir
688 # Add the --cluster-dir and --working-dir from self.start().
690 # Add the --cluster-dir and from self.start().
689 691 t.engine_args.extend(self.extra_args)
690 692 job.add_task(t)
691 693
692 694 log.msg("Writing job description file: %s" % self.job_file)
693 695 job.write(self.job_file)
694 696
695 697 @property
696 698 def job_file(self):
697 699 return os.path.join(self.cluster_dir, self.job_file_name)
698 700
699 701 def start(self, n, cluster_dir):
700 702 """Start the controller by cluster_dir."""
701 self.extra_args = [
702 '--cluster-dir', cluster_dir, '--working-dir', self.working_dir
703 ]
703 self.extra_args = ['--cluster-dir', cluster_dir]
704 704 self.cluster_dir = unicode(cluster_dir)
705 705 return super(WindowsHPCEngineSetLauncher, self).start(n)
706 706
707 707
708 708 #-----------------------------------------------------------------------------
709 709 # Batch (PBS) system launchers
710 710 #-----------------------------------------------------------------------------
711 711
712 712
713 713 class BatchSystemLauncher(BaseLauncher):
714 714 """Launch an external process using a batch system.
715 715
716 716 This class is designed to work with UNIX batch systems like PBS, LSF,
717 717 GridEngine, etc. The overall model is that there are different commands
718 718 like qsub, qdel, etc. that handle the starting and stopping of the process.
719 719
720 720 This class also has the notion of a batch script. The ``batch_template``
721 721 attribute can be set to a string that is a template for the batch script.
722 722 This template is instantiated using Itpl. Thus the template can use
723 723 ${n} fot the number of instances. Subclasses can add additional variables
724 724 to the template dict.
725 725 """
726 726
727 727 # Subclasses must fill these in. See PBSEngineSet
728 728 # The name of the command line program used to submit jobs.
729 729 submit_command = Str('', config=True)
730 730 # The name of the command line program used to delete jobs.
731 731 delete_command = Str('', config=True)
732 732 # A regular expression used to get the job id from the output of the
733 733 # submit_command.
734 734 job_id_regexp = Str('', config=True)
735 735 # The string that is the batch script template itself.
736 736 batch_template = Str('', config=True)
737 737 # The filename of the instantiated batch script.
738 738 batch_file_name = Unicode(u'batch_script', config=True)
739 739 # The full path to the instantiated batch script.
740 740 batch_file = Unicode(u'')
741 741
742 def __init__(self, working_dir, parent=None, name=None, config=None):
742 def __init__(self, work_dir, parent=None, name=None, config=None):
743 743 super(BatchSystemLauncher, self).__init__(
744 working_dir, parent, name, config
744 work_dir, parent, name, config
745 745 )
746 self.batch_file = os.path.join(self.working_dir, self.batch_file_name)
746 self.batch_file = os.path.join(self.work_dir, self.batch_file_name)
747 747 self.context = {}
748 748
749 749 def parse_job_id(self, output):
750 750 """Take the output of the submit command and return the job id."""
751 751 m = re.match(self.job_id_regexp, output)
752 752 if m is not None:
753 753 job_id = m.group()
754 754 else:
755 755 raise LauncherError("Job id couldn't be determined: %s" % output)
756 756 self.job_id = job_id
757 757 log.msg('Job started with job id: %r' % job_id)
758 758 return job_id
759 759
760 760 def write_batch_script(self, n):
761 """Instantiate and write the batch script to the working_dir."""
761 """Instantiate and write the batch script to the work_dir."""
762 762 self.context['n'] = n
763 763 script_as_string = Itpl.itplns(self.batch_template, self.context)
764 764 log.msg('Writing instantiated batch script: %s' % self.batch_file)
765 765 f = open(self.batch_file, 'w')
766 766 f.write(script_as_string)
767 767 f.close()
768 768
769 769 @inlineCallbacks
770 770 def start(self, n):
771 771 """Start n copies of the process using a batch system."""
772 772 self.write_batch_script(n)
773 773 output = yield getProcessOutput(self.submit_command,
774 774 [self.batch_file], env=os.environ)
775 775 job_id = self.parse_job_id(output)
776 776 self.notify_start(job_id)
777 777 defer.returnValue(job_id)
778 778
779 779 @inlineCallbacks
780 780 def stop(self):
781 781 output = yield getProcessOutput(self.delete_command,
782 782 [self.job_id], env=os.environ
783 783 )
784 784 self.notify_stop(output) # Pass the output of the kill cmd
785 785 defer.returnValue(output)
786 786
787 787
788 788 class PBSLauncher(BatchSystemLauncher):
789 789 """A BatchSystemLauncher subclass for PBS."""
790 790
791 791 submit_command = Str('qsub', config=True)
792 792 delete_command = Str('qdel', config=True)
793 793 job_id_regexp = Str('\d+', config=True)
794 794 batch_template = Str('', config=True)
795 795 batch_file_name = Unicode(u'pbs_batch_script', config=True)
796 796 batch_file = Unicode(u'')
797 797
798 798
799 799 class PBSControllerLauncher(PBSLauncher):
800 800 """Launch a controller using PBS."""
801 801
802 802 batch_file_name = Unicode(u'pbs_batch_script_controller', config=True)
803 803
804 804 def start(self, cluster_dir):
805 805 """Start the controller by profile or cluster_dir."""
806 806 # Here we save profile and cluster_dir in the context so they
807 807 # can be used in the batch script template as ${profile} and
808 808 # ${cluster_dir}
809 809 self.context['cluster_dir'] = cluster_dir
810 810 self.cluster_dir = unicode(cluster_dir)
811 811 log.msg("Starting PBSControllerLauncher: %r" % self.args)
812 812 return super(PBSControllerLauncher, self).start(1)
813 813
814 814
815 815 class PBSEngineSetLauncher(PBSLauncher):
816 816
817 817 batch_file_name = Unicode(u'pbs_batch_script_engines', config=True)
818 818
819 819 def start(self, n, cluster_dir):
820 820 """Start n engines by profile or cluster_dir."""
821 821 self.program_args.extend(['--cluster-dir', cluster_dir])
822 822 self.cluster_dir = unicode(cluster_dir)
823 823 log.msg('Starting PBSEngineSetLauncher: %r' % self.args)
824 824 return super(PBSEngineSetLauncher, self).start(n)
825 825
826 826
827 827 #-----------------------------------------------------------------------------
828 828 # A launcher for ipcluster itself!
829 829 #-----------------------------------------------------------------------------
830 830
831 831
832 832 def find_ipcluster_cmd():
833 833 """Find the command line ipcluster program in a cross platform way."""
834 834 if sys.platform == 'win32':
835 835 # This logic is needed because the ipcluster script doesn't
836 836 # always get installed in the same way or in the same location.
837 837 from IPython.kernel import ipclusterapp
838 838 script_location = ipclusterapp.__file__.replace('.pyc', '.py')
839 839 # The -u option here turns on unbuffered output, which is required
840 840 # on Win32 to prevent wierd conflict and problems with Twisted.
841 841 # Also, use sys.executable to make sure we are picking up the
842 842 # right python exe.
843 843 cmd = [sys.executable, '-u', script_location]
844 844 else:
845 845 # ipcontroller has to be on the PATH in this case.
846 846 cmd = ['ipcluster']
847 847 return cmd
848 848
849 849
850 850 class IPClusterLauncher(LocalProcessLauncher):
851 851 """Launch the ipcluster program in an external process."""
852 852
853 853 ipcluster_cmd = List(find_ipcluster_cmd(), config=True)
854 854 # Command line arguments to pass to ipcluster.
855 855 ipcluster_args = List(
856 856 ['--clean-logs', '--log-to-file', '--log-level', '40'], config=True)
857 857 ipcluster_subcommand = Str('start')
858 858 ipcluster_n = Int(2)
859 859
860 860 def find_args(self):
861 861 return self.ipcluster_cmd + [self.ipcluster_subcommand] + \
862 862 ['-n', repr(self.ipcluster_n)] + self.ipcluster_args
863 863
864 864 def start(self):
865 865 log.msg("Starting ipcluster: %r" % self.args)
866 866 return super(IPClusterLauncher, self).start()
867 867
@@ -1,253 +1,253
1 1 # -*- coding: utf-8 -*-
2 2 """Modified input prompt for executing files.
3 3
4 4 We define a special input line filter to allow typing lines which begin with
5 5 '~', '/' or '.'. If one of those strings is encountered, it is automatically
6 6 executed.
7 7 """
8 8
9 9 #*****************************************************************************
10 10 # Copyright (C) 2004 W.J. van der Laan <gnufnork@hetdigitalegat.nl>
11 11 # Copyright (C) 2004-2006 Fernando Perez <fperez@colorado.edu>
12 12 #
13 13 # Distributed under the terms of the BSD License. The full license is in
14 14 # the file COPYING, distributed as part of this software.
15 15 #*****************************************************************************
16 16
17 # TODO: deprecated
17
18 18 def prefilter_shell(self,line,continuation):
19 19 """Alternate prefilter, modified for shell-like functionality.
20 20
21 21 - Execute all lines beginning with '~', '/' or '.'
22 22 - $var=cmd <=> %sc var=cmd
23 23 - $$var=cmd <=> %sc -l var=cmd
24 24 """
25 25
26 26 if line:
27 27 l0 = line[0]
28 28 if l0 in '~/.':
29 29 return self._prefilter("!%s"%line,continuation)
30 30 elif l0=='$':
31 31 lrest = line[1:]
32 32 if lrest.startswith('$'):
33 33 # $$var=cmd <=> %sc -l var=cmd
34 34 return self._prefilter("%ssc -l %s" % (self.ESC_MAGIC,lrest[1:]),
35 35 continuation)
36 36 else:
37 37 # $var=cmd <=> %sc var=cmd
38 38 return self._prefilter("%ssc %s" % (self.ESC_MAGIC,lrest),
39 39 continuation)
40 40 else:
41 41 return self._prefilter(line,continuation)
42 42 else:
43 43 return self._prefilter(line,continuation)
44 44
45 45 # Rebind this to be the new IPython prefilter:
46 46 from IPython.core.iplib import InteractiveShell
47 47 InteractiveShell.prefilter = prefilter_shell
48 48 # Clean up the namespace.
49 49 del InteractiveShell,prefilter_shell
50 50
51 51 # Provide pysh and further shell-oriented services
52 52 import os,sys,shutil
53 53 from IPython.utils.genutils import system,shell,getoutput,getoutputerror
54 54
55 55 # Short aliases for getting shell output as a string and a list
56 56 sout = getoutput
57 57 lout = lambda cmd: getoutput(cmd,split=1)
58 58
59 59 # Empty function, meant as a docstring holder so help(pysh) works.
60 60 def pysh():
61 61 """Pysh is a set of modules and extensions to IPython which make shell-like
62 62 usage with Python syntax more convenient. Keep in mind that pysh is NOT a
63 63 full-blown shell, so don't try to make it your /etc/passwd entry!
64 64
65 65 In particular, it has no job control, so if you type Ctrl-Z (under Unix),
66 66 you'll suspend pysh itself, not the process you just started.
67 67
68 68 Since pysh is really nothing but a customized IPython, you should
69 69 familiarize yourself with IPython's features. This brief help mainly
70 70 documents areas in which pysh differs from the normal IPython.
71 71
72 72 ALIASES
73 73 -------
74 74 All of your $PATH has been loaded as IPython aliases, so you should be
75 75 able to type any normal system command and have it executed. See %alias?
76 76 and %unalias? for details on the alias facilities.
77 77
78 78 SPECIAL SYNTAX
79 79 --------------
80 80 Any lines which begin with '~', '/' and '.' will be executed as shell
81 81 commands instead of as Python code. The special escapes below are also
82 82 recognized. !cmd is valid in single or multi-line input, all others are
83 83 only valid in single-line input:
84 84
85 85 !cmd - pass 'cmd' directly to the shell
86 86 !!cmd - execute 'cmd' and return output as a list (split on '\\n')
87 87 $var=cmd - capture output of cmd into var, as a string
88 88 $$var=cmd - capture output of cmd into var, as a list (split on '\\n')
89 89
90 90 The $/$$ syntaxes make Python variables from system output, which you can
91 91 later use for further scripting. The converse is also possible: when
92 92 executing an alias or calling to the system via !/!!, you can expand any
93 93 python variable or expression by prepending it with $. Full details of
94 94 the allowed syntax can be found in Python's PEP 215.
95 95
96 96 A few brief examples will illustrate these:
97 97
98 98 fperez[~/test]|3> !ls *s.py
99 99 scopes.py strings.py
100 100
101 101 ls is an internal alias, so there's no need to use !:
102 102 fperez[~/test]|4> ls *s.py
103 103 scopes.py* strings.py
104 104
105 105 !!ls will return the output into a Python variable:
106 106 fperez[~/test]|5> !!ls *s.py
107 107 <5> ['scopes.py', 'strings.py']
108 108 fperez[~/test]|6> print _5
109 109 ['scopes.py', 'strings.py']
110 110
111 111 $ and $$ allow direct capture to named variables:
112 112 fperez[~/test]|7> $astr = ls *s.py
113 113 fperez[~/test]|8> astr
114 114 <8> 'scopes.py\\nstrings.py'
115 115
116 116 fperez[~/test]|9> $$alist = ls *s.py
117 117 fperez[~/test]|10> alist
118 118 <10> ['scopes.py', 'strings.py']
119 119
120 120 alist is now a normal python list you can loop over. Using $ will expand
121 121 back the python values when alias calls are made:
122 122 fperez[~/test]|11> for f in alist:
123 123 |..> print 'file',f,
124 124 |..> wc -l $f
125 125 |..>
126 126 file scopes.py 13 scopes.py
127 127 file strings.py 4 strings.py
128 128
129 129 Note that you may need to protect your variables with braces if you want
130 130 to append strings to their names. To copy all files in alist to .bak
131 131 extensions, you must use:
132 132 fperez[~/test]|12> for f in alist:
133 133 |..> cp $f ${f}.bak
134 134
135 135 If you try using $f.bak, you'll get an AttributeError exception saying
136 136 that your string object doesn't have a .bak attribute. This is because
137 137 the $ expansion mechanism allows you to expand full Python expressions:
138 138 fperez[~/test]|13> echo "sys.platform is: $sys.platform"
139 139 sys.platform is: linux2
140 140
141 141 IPython's input history handling is still active, which allows you to
142 142 rerun a single block of multi-line input by simply using exec:
143 143 fperez[~/test]|14> $$alist = ls *.eps
144 144 fperez[~/test]|15> exec _i11
145 145 file image2.eps 921 image2.eps
146 146 file image.eps 921 image.eps
147 147
148 148 While these are new special-case syntaxes, they are designed to allow very
149 149 efficient use of the shell with minimal typing. At an interactive shell
150 150 prompt, conciseness of expression wins over readability.
151 151
152 152 USEFUL FUNCTIONS AND MODULES
153 153 ----------------------------
154 154 The os, sys and shutil modules from the Python standard library are
155 155 automatically loaded. Some additional functions, useful for shell usage,
156 156 are listed below. You can request more help about them with '?'.
157 157
158 158 shell - execute a command in the underlying system shell
159 159 system - like shell(), but return the exit status of the command
160 160 sout - capture the output of a command as a string
161 161 lout - capture the output of a command as a list (split on '\\n')
162 162 getoutputerror - capture (output,error) of a shell command
163 163
164 164 sout/lout are the functional equivalents of $/$$. They are provided to
165 165 allow you to capture system output in the middle of true python code,
166 166 function definitions, etc (where $ and $$ are invalid).
167 167
168 168 DIRECTORY MANAGEMENT
169 169 --------------------
170 170 Since each command passed by pysh to the underlying system is executed in
171 171 a subshell which exits immediately, you can NOT use !cd to navigate the
172 172 filesystem.
173 173
174 174 Pysh provides its own builtin '%cd' magic command to move in the
175 175 filesystem (the % is not required with automagic on). It also maintains a
176 176 list of visited directories (use %dhist to see it) and allows direct
177 177 switching to any of them. Type 'cd?' for more details.
178 178
179 179 %pushd, %popd and %dirs are provided for directory stack handling.
180 180
181 181 PROMPT CUSTOMIZATION
182 182 --------------------
183 183
184 184 The supplied ipythonrc-pysh profile comes with an example of a very
185 185 colored and detailed prompt, mainly to serve as an illustration. The
186 186 valid escape sequences, besides color names, are:
187 187
188 188 \\# - Prompt number.
189 189 \\D - Dots, as many as there are digits in \\# (so they align).
190 190 \\w - Current working directory (cwd).
191 191 \\W - Basename of current working directory.
192 192 \\XN - Where N=0..5. N terms of the cwd, with $HOME written as ~.
193 193 \\YN - Where N=0..5. Like XN, but if ~ is term N+1 it's also shown.
194 194 \\u - Username.
195 195 \\H - Full hostname.
196 196 \\h - Hostname up to first '.'
197 197 \\$ - Root symbol ($ or #).
198 198 \\t - Current time, in H:M:S format.
199 199 \\v - IPython release version.
200 200 \\n - Newline.
201 201 \\r - Carriage return.
202 202 \\\\ - An explicitly escaped '\\'.
203 203
204 204 You can configure your prompt colors using any ANSI color escape. Each
205 205 color escape sets the color for any subsequent text, until another escape
206 206 comes in and changes things. The valid color escapes are:
207 207
208 208 \\C_Black
209 209 \\C_Blue
210 210 \\C_Brown
211 211 \\C_Cyan
212 212 \\C_DarkGray
213 213 \\C_Green
214 214 \\C_LightBlue
215 215 \\C_LightCyan
216 216 \\C_LightGray
217 217 \\C_LightGreen
218 218 \\C_LightPurple
219 219 \\C_LightRed
220 220 \\C_Purple
221 221 \\C_Red
222 222 \\C_White
223 223 \\C_Yellow
224 224 \\C_Normal - Stop coloring, defaults to your terminal settings.
225 225 """
226 226 pass
227 227
228 228 # Configure a few things. Much of this is fairly hackish, since IPython
229 229 # doesn't really expose a clean API for it. Be careful if you start making
230 230 # many modifications here.
231 231
232 232
233 233 # Set the 'cd' command to quiet mode, a more shell-like behavior
234 234 __IPYTHON__.default_option('cd','-q')
235 235
236 236 # This is redundant, ipy_user_conf.py will determine this
237 237 # Load all of $PATH as aliases
238 238 __IPYTHON__.magic_rehashx()
239 239
240 240 # Remove %sc,%sx if present as aliases
241 241 __IPYTHON__.magic_unalias('sc')
242 242 __IPYTHON__.magic_unalias('sx')
243 243
244 244 # We need different criteria for line-splitting, so that aliases such as
245 245 # 'gnome-terminal' are interpreted as a single alias instead of variable
246 246 # 'gnome' minus variable 'terminal'.
247 247 import re
248 248 __IPYTHON__.line_split = re.compile(r'^([\s*,;/])'
249 249 r'([\?\w\.\-\+]+\w*\s*)'
250 250 r'(\(?.*$)')
251 251
252 252 # Namespace cleanup
253 253 del re
General Comments 0
You need to be logged in to leave comments. Login now