##// END OF EJS Templates
Reverted decorators_trial.py and added command line docs....
Brian Granger -
Show More
@@ -0,0 +1,132 b''
1 # encoding: utf-8
2 """
3 Testing related decorators for use with twisted.trial.
4
5 The decorators in this files are designed to follow the same API as those
6 in the decorators module (in this same directory). But they can be used
7 with twisted.trial
8 """
9
10 #-----------------------------------------------------------------------------
11 # Copyright (C) 2008-2009 The IPython Development Team
12 #
13 # Distributed under the terms of the BSD License. The full license is in
14 # the file COPYING, distributed as part of this software.
15 #-----------------------------------------------------------------------------
16
17 #-----------------------------------------------------------------------------
18 # Imports
19 #-----------------------------------------------------------------------------
20
21 import os
22 import sys
23
24 from IPython.testing.decorators import make_label_dec
25
26 #-----------------------------------------------------------------------------
27 # Testing decorators
28 #-----------------------------------------------------------------------------
29
30
31 def skipif(skip_condition, msg=None):
32 """Create a decorator that marks a test function for skipping.
33
34 The is a decorator factory that returns a decorator that will
35 conditionally skip a test based on the value of skip_condition. The
36 skip_condition argument can either be a boolean or a callable that returns
37 a boolean.
38
39 Parameters
40 ----------
41 skip_condition : boolean or callable
42 If this evaluates to True, the test is skipped.
43 msg : str
44 The message to print if the test is skipped.
45
46 Returns
47 -------
48 decorator : function
49 The decorator function that can be applied to the test function.
50 """
51
52 def skip_decorator(f):
53
54 # Allow for both boolean or callable skip conditions.
55 if callable(skip_condition):
56 skip_val = lambda : skip_condition()
57 else:
58 skip_val = lambda : skip_condition
59
60 if msg is None:
61 out = 'Test skipped due to test condition.'
62 else:
63 out = msg
64 final_msg = "Skipping test: %s. %s" % (f.__name__,out)
65
66 if skip_val():
67 f.skip = final_msg
68
69 return f
70 return skip_decorator
71
72
73 def skip(msg=None):
74 """Create a decorator that marks a test function for skipping.
75
76 This is a decorator factory that returns a decorator that will cause
77 tests to be skipped.
78
79 Parameters
80 ----------
81 msg : str
82 Optional message to be added.
83
84 Returns
85 -------
86 decorator : function
87 Decorator, which, when applied to a function, sets the skip
88 attribute of the function causing `twisted.trial` to skip it.
89 """
90
91 return skipif(True,msg)
92
93
94 def numpy_not_available():
95 """Can numpy be imported? Returns true if numpy does NOT import.
96
97 This is used to make a decorator to skip tests that require numpy to be
98 available, but delay the 'import numpy' to test execution time.
99 """
100 try:
101 import numpy
102 np_not_avail = False
103 except ImportError:
104 np_not_avail = True
105
106 return np_not_avail
107
108 #-----------------------------------------------------------------------------
109 # Decorators for public use
110 #-----------------------------------------------------------------------------
111
112 # Decorators to skip certain tests on specific platforms.
113 skip_win32 = skipif(sys.platform == 'win32',
114 "This test does not run under Windows")
115 skip_linux = skipif(sys.platform == 'linux2',
116 "This test does not run under Linux")
117 skip_osx = skipif(sys.platform == 'darwin',"This test does not run under OS X")
118
119 # Decorators to skip tests if not on specific platforms.
120 skip_if_not_win32 = skipif(sys.platform != 'win32',
121 "This test only runs under Windows")
122 skip_if_not_linux = skipif(sys.platform != 'linux2',
123 "This test only runs under Linux")
124 skip_if_not_osx = skipif(sys.platform != 'darwin',
125 "This test only runs under OSX")
126
127 # Other skip decorators
128 skipif_not_numpy = skipif(numpy_not_available,"This test requires numpy")
129
130 skipknownfailure = skip('This test is known to fail')
131
132
@@ -1,78 +1,79 b''
1 1 # encoding: utf-8
2 2 """
3 3 Test the output capture at the OS level, using file descriptors.
4 4 """
5 5 #-----------------------------------------------------------------------------
6 6 # Copyright (C) 2008-2009 The IPython Development Team
7 7 #
8 8 # Distributed under the terms of the BSD License. The full license is
9 9 # in the file COPYING, distributed as part of this software.
10 10 #-----------------------------------------------------------------------------
11 11
12 12 #-----------------------------------------------------------------------------
13 13 # Imports
14 14 #-----------------------------------------------------------------------------
15 15
16 16 # Tell nose to skip this module
17 17 __test__ = {}
18 18
19 19 from cStringIO import StringIO
20 20 import os
21 21 import sys
22 22
23 23 from twisted.trial import unittest
24 24
25 from IPython.testing import decorators_trial as dec
26
25 27 #-----------------------------------------------------------------------------
26 28 # Tests
27 29 #-----------------------------------------------------------------------------
28 30
29 31 class TestRedirector(unittest.TestCase):
30 32
31 if sys.platform == 'win32':
32 skip = True
33
33 @dec.skip_win32
34 34 def test_redirector(self):
35 35 """Checks that the redirector can be used to do synchronous capture.
36 36 """
37 37 from IPython.kernel.core.fd_redirector import FDRedirector
38 38 r = FDRedirector()
39 39 out = StringIO()
40 40 try:
41 41 r.start()
42 42 for i in range(10):
43 43 os.system('echo %ic' % i)
44 44 print >>out, r.getvalue(),
45 45 print >>out, i
46 46 except:
47 47 r.stop()
48 48 raise
49 49 r.stop()
50 50 result1 = out.getvalue()
51 51 result2 = "".join("%ic\n%i\n" %(i, i) for i in range(10))
52 52 self.assertEquals(result1, result2)
53 53
54 @dec.skip_win32
54 55 def test_redirector_output_trap(self):
55 56 """Check the greedy trapping behavior of the traps.
56 57
57 58 This test check not only that the redirector_output_trap does
58 59 trap the output, but also that it does it in a gready way, that
59 60 is by calling the callback ASAP.
60 61 """
61 62 from IPython.kernel.core.redirector_output_trap import \
62 63 RedirectorOutputTrap
63 64 out = StringIO()
64 65 trap = RedirectorOutputTrap(out.write, out.write)
65 66 try:
66 67 trap.set()
67 68 for i in range(10):
68 69 os.system('echo %ic' % i)
69 70 print "%ip" % i
70 71 print >>out, i
71 72 except:
72 73 trap.unset()
73 74 raise
74 75 trap.unset()
75 76 result1 = out.getvalue()
76 77 result2 = "".join("%ic\n%ip\n%i\n" %(i, i, i) for i in range(10))
77 78 self.assertEquals(result1, result2)
78 79
@@ -1,463 +1,499 b''
1 1 #!/usr/bin/env python
2 2 # encoding: utf-8
3 3 """
4 4 The ipcluster application.
5 5 """
6 6
7 7 #-----------------------------------------------------------------------------
8 8 # Copyright (C) 2008-2009 The IPython Development Team
9 9 #
10 10 # Distributed under the terms of the BSD License. The full license is in
11 11 # the file COPYING, distributed as part of this software.
12 12 #-----------------------------------------------------------------------------
13 13
14 14 #-----------------------------------------------------------------------------
15 15 # Imports
16 16 #-----------------------------------------------------------------------------
17 17
18 18 import logging
19 19 import os
20 20 import signal
21 21
22 22 if os.name=='posix':
23 23 from twisted.scripts._twistd_unix import daemonize
24 24
25 25 from twisted.internet import reactor, defer
26 26 from twisted.python import log, failure
27 27
28 28
29 29 from IPython.external.argparse import ArgumentParser, SUPPRESS
30 30 from IPython.utils.importstring import import_item
31 31 from IPython.kernel.clusterdir import (
32 32 ApplicationWithClusterDir, ClusterDirConfigLoader,
33 33 ClusterDirError, PIDFileError
34 34 )
35 35
36 36
37 37 #-----------------------------------------------------------------------------
38 38 # Module level variables
39 39 #-----------------------------------------------------------------------------
40 40
41 41
42 42 default_config_file_name = u'ipcluster_config.py'
43 43
44 44
45 45 _description = """\
46 46 Start an IPython cluster for parallel computing.\n\n
47 47
48 48 An IPython cluster consists of 1 controller and 1 or more engines.
49 49 This command automates the startup of these processes using a wide
50 50 range of startup methods (SSH, local processes, PBS, mpiexec,
51 51 Windows HPC Server 2008). To start a cluster with 4 engines on your
52 local host simply do "ipcluster start -n 4". For more complex usage
53 you will typically do "ipcluster create -p mycluster", then edit
54 configuration files, followed by "ipcluster start -p mycluster -n 4".
52 local host simply do 'ipcluster start -n 4'. For more complex usage
53 you will typically do 'ipcluster create -p mycluster', then edit
54 configuration files, followed by 'ipcluster start -p mycluster -n 4'.
55 55 """
56 56
57 57
58 58 # Exit codes for ipcluster
59 59
60 60 # This will be the exit code if the ipcluster appears to be running because
61 61 # a .pid file exists
62 62 ALREADY_STARTED = 10
63 63
64 64
65 65 # This will be the exit code if ipcluster stop is run, but there is not .pid
66 66 # file to be found.
67 67 ALREADY_STOPPED = 11
68 68
69 69
70 70 #-----------------------------------------------------------------------------
71 71 # Command line options
72 72 #-----------------------------------------------------------------------------
73 73
74 74
75 75 class IPClusterAppConfigLoader(ClusterDirConfigLoader):
76 76
77 77 def _add_arguments(self):
78 78 # Don't call ClusterDirConfigLoader._add_arguments as we don't want
79 79 # its defaults on self.parser. Instead, we will put those on
80 80 # default options on our subparsers.
81 81
82 82 # This has all the common options that all subcommands use
83 83 parent_parser1 = ArgumentParser(
84 84 add_help=False,
85 85 argument_default=SUPPRESS
86 86 )
87 87 self._add_ipython_dir(parent_parser1)
88 88 self._add_log_level(parent_parser1)
89 89
90 90 # This has all the common options that other subcommands use
91 91 parent_parser2 = ArgumentParser(
92 92 add_help=False,
93 93 argument_default=SUPPRESS
94 94 )
95 95 self._add_cluster_profile(parent_parser2)
96 96 self._add_cluster_dir(parent_parser2)
97 97 self._add_work_dir(parent_parser2)
98 98 paa = parent_parser2.add_argument
99 99 paa('--log-to-file',
100 100 action='store_true', dest='Global.log_to_file',
101 101 help='Log to a file in the log directory (default is stdout)')
102 102
103 103 # Create the object used to create the subparsers.
104 104 subparsers = self.parser.add_subparsers(
105 105 dest='Global.subcommand',
106 106 title='ipcluster subcommands',
107 107 description=
108 108 """ipcluster has a variety of subcommands. The general way of
109 running ipcluster is 'ipcluster <cmd> [options]'""",
110 help="For more help, type 'ipcluster <cmd> -h'"
109 running ipcluster is 'ipcluster <cmd> [options]'. To get help
110 on a particular subcommand do 'ipcluster <cmd> -h'."""
111 # help="For more help, type 'ipcluster <cmd> -h'",
111 112 )
112 113
113 114 # The "list" subcommand parser
114 115 parser_list = subparsers.add_parser(
115 116 'list',
116 help='List all clusters in cwd and ipython_dir.',
117 117 parents=[parent_parser1],
118 argument_default=SUPPRESS
118 argument_default=SUPPRESS,
119 help="List all clusters in cwd and ipython_dir.",
120 description=
121 """List all available clusters, by cluster directory, that can
122 be found in the current working directly or in the ipython
123 directory. Cluster directories are named using the convention
124 'cluster_<profile>'."""
119 125 )
120 126
121 127 # The "create" subcommand parser
122 128 parser_create = subparsers.add_parser(
123 129 'create',
124 help='Create a new cluster directory.',
125 130 parents=[parent_parser1, parent_parser2],
126 argument_default=SUPPRESS
131 argument_default=SUPPRESS,
132 help="Create a new cluster directory.",
133 description=
134 """Create an ipython cluster directory by its profile name or
135 cluster directory path. Cluster directories contain
136 configuration, log and security related files and are named
137 using the convention 'cluster_<profile>'. By default they are
138 located in your ipython directory. Once created, you will
139 probably need to edit the configuration files in the cluster
140 directory to configure your cluster. Most users will create a
141 cluster directory by profile name,
142 'ipcluster create -p mycluster', which will put the directory
143 in '<ipython_dir>/cluster_mycluster'.
144 """
127 145 )
128 146 paa = parser_create.add_argument
129 147 paa('--reset-config',
130 148 dest='Global.reset_config', action='store_true',
131 149 help=
132 150 """Recopy the default config files to the cluster directory.
133 151 You will loose any modifications you have made to these files.""")
134 152
135 153 # The "start" subcommand parser
136 154 parser_start = subparsers.add_parser(
137 155 'start',
138 help='Start a cluster.',
139 156 parents=[parent_parser1, parent_parser2],
140 argument_default=SUPPRESS
157 argument_default=SUPPRESS,
158 help="Start a cluster.",
159 description=
160 """Start an ipython cluster by its profile name or cluster
161 directory. Cluster directories contain configuration, log and
162 security related files and are named using the convention
163 'cluster_<profile>' and should be creating using the 'start'
164 subcommand of 'ipcluster'. If your cluster directory is in
165 the cwd or the ipython directory, you can simply refer to it
166 using its profile name, 'ipcluster start -n 4 -p <profile>`,
167 otherwise use the '--cluster-dir' option.
168 """
141 169 )
142 170 paa = parser_start.add_argument
143 171 paa('-n', '--number',
144 172 type=int, dest='Global.n',
145 173 help='The number of engines to start.',
146 174 metavar='Global.n')
147 175 paa('--clean-logs',
148 176 dest='Global.clean_logs', action='store_true',
149 177 help='Delete old log flies before starting.')
150 178 paa('--no-clean-logs',
151 179 dest='Global.clean_logs', action='store_false',
152 180 help="Don't delete old log flies before starting.")
153 181 paa('--daemon',
154 182 dest='Global.daemonize', action='store_true',
155 183 help='Daemonize the ipcluster program. This implies --log-to-file')
156 184 paa('--no-daemon',
157 185 dest='Global.daemonize', action='store_false',
158 186 help="Dont't daemonize the ipcluster program.")
159 187
160 188 # The "stop" subcommand parser
161 189 parser_stop = subparsers.add_parser(
162 190 'stop',
163 help='Stop a cluster.',
164 191 parents=[parent_parser1, parent_parser2],
165 argument_default=SUPPRESS
192 argument_default=SUPPRESS,
193 help="Stop a running cluster.",
194 description=
195 """Stop a running ipython cluster by its profile name or cluster
196 directory. Cluster directories are named using the convention
197 'cluster_<profile>'. If your cluster directory is in
198 the cwd or the ipython directory, you can simply refer to it
199 using its profile name, 'ipcluster stop -p <profile>`, otherwise
200 use the '--cluster-dir' option.
201 """
166 202 )
167 203 paa = parser_stop.add_argument
168 204 paa('--signal',
169 205 dest='Global.signal', type=int,
170 206 help="The signal number to use in stopping the cluster (default=2).",
171 207 metavar="Global.signal")
172 208
173 209
174 210 #-----------------------------------------------------------------------------
175 211 # Main application
176 212 #-----------------------------------------------------------------------------
177 213
178 214
179 215 class IPClusterApp(ApplicationWithClusterDir):
180 216
181 217 name = u'ipcluster'
182 218 description = _description
183 219 usage = None
184 220 command_line_loader = IPClusterAppConfigLoader
185 221 config_file_name = default_config_file_name
186 222 default_log_level = logging.INFO
187 223 auto_create_cluster_dir = False
188 224
189 225 def create_default_config(self):
190 226 super(IPClusterApp, self).create_default_config()
191 227 self.default_config.Global.controller_launcher = \
192 228 'IPython.kernel.launcher.LocalControllerLauncher'
193 229 self.default_config.Global.engine_launcher = \
194 230 'IPython.kernel.launcher.LocalEngineSetLauncher'
195 231 self.default_config.Global.n = 2
196 232 self.default_config.Global.reset_config = False
197 233 self.default_config.Global.clean_logs = True
198 234 self.default_config.Global.signal = 2
199 235 self.default_config.Global.daemonize = False
200 236
201 237 def find_resources(self):
202 238 subcommand = self.command_line_config.Global.subcommand
203 239 if subcommand=='list':
204 240 self.list_cluster_dirs()
205 241 # Exit immediately because there is nothing left to do.
206 242 self.exit()
207 243 elif subcommand=='create':
208 244 self.auto_create_cluster_dir = True
209 245 super(IPClusterApp, self).find_resources()
210 246 elif subcommand=='start' or subcommand=='stop':
211 247 self.auto_create_cluster_dir = True
212 248 try:
213 249 super(IPClusterApp, self).find_resources()
214 250 except ClusterDirError:
215 251 raise ClusterDirError(
216 252 "Could not find a cluster directory. A cluster dir must "
217 253 "be created before running 'ipcluster start'. Do "
218 254 "'ipcluster create -h' or 'ipcluster list -h' for more "
219 255 "information about creating and listing cluster dirs."
220 256 )
221 257
222 258 def list_cluster_dirs(self):
223 259 # Find the search paths
224 260 cluster_dir_paths = os.environ.get('IPCLUSTER_DIR_PATH','')
225 261 if cluster_dir_paths:
226 262 cluster_dir_paths = cluster_dir_paths.split(':')
227 263 else:
228 264 cluster_dir_paths = []
229 265 try:
230 266 ipython_dir = self.command_line_config.Global.ipython_dir
231 267 except AttributeError:
232 268 ipython_dir = self.default_config.Global.ipython_dir
233 269 paths = [os.getcwd(), ipython_dir] + \
234 270 cluster_dir_paths
235 271 paths = list(set(paths))
236 272
237 273 self.log.info('Searching for cluster dirs in paths: %r' % paths)
238 274 for path in paths:
239 275 files = os.listdir(path)
240 276 for f in files:
241 277 full_path = os.path.join(path, f)
242 278 if os.path.isdir(full_path) and f.startswith('cluster_'):
243 279 profile = full_path.split('_')[-1]
244 280 start_cmd = 'ipcluster start -p %s -n 4' % profile
245 281 print start_cmd + " ==> " + full_path
246 282
247 283 def pre_construct(self):
248 284 # IPClusterApp.pre_construct() is where we cd to the working directory.
249 285 super(IPClusterApp, self).pre_construct()
250 286 config = self.master_config
251 287 try:
252 288 daemon = config.Global.daemonize
253 289 if daemon:
254 290 config.Global.log_to_file = True
255 291 except AttributeError:
256 292 pass
257 293
258 294 def construct(self):
259 295 config = self.master_config
260 296 subcmd = config.Global.subcommand
261 297 reset = config.Global.reset_config
262 298 if subcmd == 'list':
263 299 return
264 300 if subcmd == 'create':
265 301 self.log.info('Copying default config files to cluster directory '
266 302 '[overwrite=%r]' % (reset,))
267 303 self.cluster_dir_obj.copy_all_config_files(overwrite=reset)
268 304 if subcmd =='start':
269 305 self.cluster_dir_obj.copy_all_config_files(overwrite=False)
270 306 self.start_logging()
271 307 reactor.callWhenRunning(self.start_launchers)
272 308
273 309 def start_launchers(self):
274 310 config = self.master_config
275 311
276 312 # Create the launchers. In both bases, we set the work_dir of
277 313 # the launcher to the cluster_dir. This is where the launcher's
278 314 # subprocesses will be launched. It is not where the controller
279 315 # and engine will be launched.
280 316 el_class = import_item(config.Global.engine_launcher)
281 317 self.engine_launcher = el_class(
282 318 work_dir=self.cluster_dir, config=config
283 319 )
284 320 cl_class = import_item(config.Global.controller_launcher)
285 321 self.controller_launcher = cl_class(
286 322 work_dir=self.cluster_dir, config=config
287 323 )
288 324
289 325 # Setup signals
290 326 signal.signal(signal.SIGINT, self.sigint_handler)
291 327
292 328 # Setup the observing of stopping. If the controller dies, shut
293 329 # everything down as that will be completely fatal for the engines.
294 330 d1 = self.controller_launcher.observe_stop()
295 331 d1.addCallback(self.stop_launchers)
296 332 # But, we don't monitor the stopping of engines. An engine dying
297 333 # is just fine and in principle a user could start a new engine.
298 334 # Also, if we did monitor engine stopping, it is difficult to
299 335 # know what to do when only some engines die. Currently, the
300 336 # observing of engine stopping is inconsistent. Some launchers
301 337 # might trigger on a single engine stopping, other wait until
302 338 # all stop. TODO: think more about how to handle this.
303 339
304 340 # Start the controller and engines
305 341 self._stopping = False # Make sure stop_launchers is not called 2x.
306 342 d = self.start_controller()
307 343 d.addCallback(self.start_engines)
308 344 d.addCallback(self.startup_message)
309 345 # If the controller or engines fail to start, stop everything
310 346 d.addErrback(self.stop_launchers)
311 347 return d
312 348
313 349 def startup_message(self, r=None):
314 350 log.msg("IPython cluster: started")
315 351 return r
316 352
317 353 def start_controller(self, r=None):
318 354 # log.msg("In start_controller")
319 355 config = self.master_config
320 356 d = self.controller_launcher.start(
321 357 cluster_dir=config.Global.cluster_dir
322 358 )
323 359 return d
324 360
325 361 def start_engines(self, r=None):
326 362 # log.msg("In start_engines")
327 363 config = self.master_config
328 364 d = self.engine_launcher.start(
329 365 config.Global.n,
330 366 cluster_dir=config.Global.cluster_dir
331 367 )
332 368 return d
333 369
334 370 def stop_controller(self, r=None):
335 371 # log.msg("In stop_controller")
336 372 if self.controller_launcher.running:
337 373 d = self.controller_launcher.stop()
338 374 d.addErrback(self.log_err)
339 375 return d
340 376 else:
341 377 return defer.succeed(None)
342 378
343 379 def stop_engines(self, r=None):
344 380 # log.msg("In stop_engines")
345 381 if self.engine_launcher.running:
346 382 d = self.engine_launcher.stop()
347 383 d.addErrback(self.log_err)
348 384 return d
349 385 else:
350 386 return defer.succeed(None)
351 387
352 388 def log_err(self, f):
353 389 log.msg(f.getTraceback())
354 390 return None
355 391
356 392 def stop_launchers(self, r=None):
357 393 if not self._stopping:
358 394 self._stopping = True
359 395 if isinstance(r, failure.Failure):
360 396 log.msg('Unexpected error in ipcluster:')
361 397 log.msg(r.getTraceback())
362 398 log.msg("IPython cluster: stopping")
363 399 # These return deferreds. We are not doing anything with them
364 400 # but we are holding refs to them as a reminder that they
365 401 # do return deferreds.
366 402 d1 = self.stop_engines()
367 403 d2 = self.stop_controller()
368 404 # Wait a few seconds to let things shut down.
369 405 reactor.callLater(4.0, reactor.stop)
370 406
371 407 def sigint_handler(self, signum, frame):
372 408 self.stop_launchers()
373 409
374 410 def start_logging(self):
375 411 # Remove old log files of the controller and engine
376 412 if self.master_config.Global.clean_logs:
377 413 log_dir = self.master_config.Global.log_dir
378 414 for f in os.listdir(log_dir):
379 415 if f.startswith('ipengine' + '-'):
380 416 if f.endswith('.log') or f.endswith('.out') or f.endswith('.err'):
381 417 os.remove(os.path.join(log_dir, f))
382 418 if f.startswith('ipcontroller' + '-'):
383 419 if f.endswith('.log') or f.endswith('.out') or f.endswith('.err'):
384 420 os.remove(os.path.join(log_dir, f))
385 421 # This will remote old log files for ipcluster itself
386 422 super(IPClusterApp, self).start_logging()
387 423
388 424 def start_app(self):
389 425 """Start the application, depending on what subcommand is used."""
390 426 subcmd = self.master_config.Global.subcommand
391 427 if subcmd=='create' or subcmd=='list':
392 428 return
393 429 elif subcmd=='start':
394 430 self.start_app_start()
395 431 elif subcmd=='stop':
396 432 self.start_app_stop()
397 433
398 434 def start_app_start(self):
399 435 """Start the app for the start subcommand."""
400 436 config = self.master_config
401 437 # First see if the cluster is already running
402 438 try:
403 439 pid = self.get_pid_from_file()
404 440 except PIDFileError:
405 441 pass
406 442 else:
407 443 self.log.critical(
408 444 'Cluster is already running with [pid=%s]. '
409 445 'use "ipcluster stop" to stop the cluster.' % pid
410 446 )
411 447 # Here I exit with a unusual exit status that other processes
412 448 # can watch for to learn how I existed.
413 449 self.exit(ALREADY_STARTED)
414 450
415 451 # Now log and daemonize
416 452 self.log.info(
417 453 'Starting ipcluster with [daemon=%r]' % config.Global.daemonize
418 454 )
419 455 # TODO: Get daemonize working on Windows or as a Windows Server.
420 456 if config.Global.daemonize:
421 457 if os.name=='posix':
422 458 daemonize()
423 459
424 460 # Now write the new pid file AFTER our new forked pid is active.
425 461 self.write_pid_file()
426 462 reactor.addSystemEventTrigger('during','shutdown', self.remove_pid_file)
427 463 reactor.run()
428 464
429 465 def start_app_stop(self):
430 466 """Start the app for the stop subcommand."""
431 467 config = self.master_config
432 468 try:
433 469 pid = self.get_pid_from_file()
434 470 except PIDFileError:
435 471 self.log.critical(
436 472 'Problem reading pid file, cluster is probably not running.'
437 473 )
438 474 # Here I exit with a unusual exit status that other processes
439 475 # can watch for to learn how I existed.
440 476 self.exit(ALREADY_STOPPED)
441 477 else:
442 478 if os.name=='posix':
443 479 sig = config.Global.signal
444 480 self.log.info(
445 481 "Stopping cluster [pid=%r] with [signal=%r]" % (pid, sig)
446 482 )
447 483 os.kill(pid, sig)
448 484 elif os.name=='nt':
449 485 # As of right now, we don't support daemonize on Windows, so
450 486 # stop will not do anything. Minimally, it should clean up the
451 487 # old .pid files.
452 488 self.remove_pid_file()
453 489
454 490
455 491 def launch_new_instance():
456 492 """Create and run the IPython cluster."""
457 493 app = IPClusterApp()
458 494 app.start()
459 495
460 496
461 497 if __name__ == '__main__':
462 498 launch_new_instance()
463 499
@@ -1,57 +1,52 b''
1 1 # encoding: utf-8
2 2 """
3 Tests for decorators.py compatibility with Twisted.trial
3 Tests for decorators_trial.py
4 4 """
5 5
6 6 #-----------------------------------------------------------------------------
7 7 # Copyright (C) 2008-2009 The IPython Development Team
8 8 #
9 9 # Distributed under the terms of the BSD License. The full license is in
10 10 # the file COPYING, distributed as part of this software.
11 11 #-----------------------------------------------------------------------------
12 12
13 13 #-----------------------------------------------------------------------------
14 14 # Imports
15 15 #-----------------------------------------------------------------------------
16 16
17 # Tell nose to skip this module, since this is for twisted only
17 # Tell nose to skip this module
18 18 __test__ = {}
19 19
20 20 import os
21 21 import sys
22 22
23 23 from twisted.trial import unittest
24 import IPython.testing.decorators as dec
24 import IPython.testing.decorators_trial as dec
25 25
26 26 #-----------------------------------------------------------------------------
27 27 # Tests
28 28 #-----------------------------------------------------------------------------
29 29
30 # Note: this code is identical to that in test_decorators, but that one uses
31 # stdlib unittest, not the one from twisted, which we are using here. While
32 # somewhat redundant, we want to check both with the stdlib and with twisted,
33 # so the duplication is OK.
34
35 30 class TestDecoratorsTrial(unittest.TestCase):
36 31
37 32 @dec.skip()
38 33 def test_deliberately_broken(self):
39 34 """A deliberately broken test - we want to skip this one."""
40 35 1/0
41 36
42 37 @dec.skip('Testing the skip decorator')
43 38 def test_deliberately_broken2(self):
44 39 """Another deliberately broken test - we want to skip this one."""
45 40 1/0
46 41
47 42 @dec.skip_linux
48 43 def test_linux(self):
49 44 self.assertNotEquals(sys.platform,'linux2',"This test can't run under linux")
50 45
51 46 @dec.skip_win32
52 47 def test_win32(self):
53 48 self.assertNotEquals(sys.platform,'win32',"This test can't run under windows")
54 49
55 50 @dec.skip_osx
56 51 def test_osx(self):
57 52 self.assertNotEquals(sys.platform,'darwin',"This test can't run under osx")
General Comments 0
You need to be logged in to leave comments. Login now