diff --git a/IPython/kernel/clusterdir.py b/IPython/kernel/clusterdir.py index d5b0a11..4ad8dc0 100644 --- a/IPython/kernel/clusterdir.py +++ b/IPython/kernel/clusterdir.py @@ -228,13 +228,13 @@ class AppWithClusterDirArgParseConfigLoader(ArgParseConfigLoader): """Default command line options for IPython cluster applications.""" def _add_other_arguments(self): - self.parser.add_argument('-ipythondir', '--ipython-dir', + self.parser.add_argument('--ipython-dir', dest='Global.ipythondir',type=str, help='Set to override default location of Global.ipythondir.', default=NoConfigDefault, metavar='Global.ipythondir' ) - self.parser.add_argument('-p','-profile', '--profile', + self.parser.add_argument('-p', '--profile', dest='Global.profile',type=str, help='The string name of the profile to be used. This determines ' 'the name of the cluster dir as: cluster_. The default profile ' @@ -243,25 +243,25 @@ class AppWithClusterDirArgParseConfigLoader(ArgParseConfigLoader): default=NoConfigDefault, metavar='Global.profile' ) - self.parser.add_argument('-log_level', '--log-level', + self.parser.add_argument('--log-level', dest="Global.log_level",type=int, help='Set the log level (0,10,20,30,40,50). Default is 30.', default=NoConfigDefault, metavar="Global.log_level" ) - self.parser.add_argument('-cluster_dir', '--cluster-dir', + self.parser.add_argument('--cluster-dir', dest='Global.cluster_dir',type=str, help='Set the cluster dir. This overrides the logic used by the ' '--profile option.', default=NoConfigDefault, metavar='Global.cluster_dir' ) - self.parser.add_argument('-clean_logs', '--clean-logs', + self.parser.add_argument('--clean-logs', dest='Global.clean_logs', action='store_true', help='Delete old log flies before starting.', default=NoConfigDefault ) - self.parser.add_argument('-noclean_logs', '--no-clean-logs', + self.parser.add_argument('--no-clean-logs', dest='Global.clean_logs', action='store_false', help="Don't Delete old log flies before starting.", default=NoConfigDefault diff --git a/IPython/kernel/ipclusterapp.py b/IPython/kernel/ipclusterapp.py index 3cde768..08aed1b 100644 --- a/IPython/kernel/ipclusterapp.py +++ b/IPython/kernel/ipclusterapp.py @@ -50,12 +50,12 @@ class IPClusterCLLoader(ArgParseConfigLoader): def _add_arguments(self): # This has all the common options that all subcommands use parent_parser1 = argparse.ArgumentParser(add_help=False) - parent_parser1.add_argument('-ipythondir', '--ipython-dir', + parent_parser1.add_argument('--ipython-dir', dest='Global.ipythondir',type=str, help='Set to override default location of Global.ipythondir.', default=NoConfigDefault, metavar='Global.ipythondir') - parent_parser1.add_argument('-log_level', '--log-level', + parent_parser1.add_argument('--log-level', dest="Global.log_level",type=int, help='Set the log level (0,10,20,30,40,50). Default is 30.', default=NoConfigDefault, @@ -63,7 +63,7 @@ class IPClusterCLLoader(ArgParseConfigLoader): # This has all the common options that other subcommands use parent_parser2 = argparse.ArgumentParser(add_help=False) - parent_parser2.add_argument('-p','-profile', '--profile', + parent_parser2.add_argument('-p','--profile', dest='Global.profile',type=str, default=NoConfigDefault, help='The string name of the profile to be used. This determines ' @@ -72,7 +72,7 @@ class IPClusterCLLoader(ArgParseConfigLoader): 'if the --cluster-dir option is not used.', default=NoConfigDefault, metavar='Global.profile') - parent_parser2.add_argument('-cluster_dir', '--cluster-dir', + parent_parser2.add_argument('--cluster-dir', dest='Global.cluster_dir',type=str, default=NoConfigDefault, help='Set the cluster dir. This overrides the logic used by the ' @@ -124,22 +124,22 @@ class IPClusterCLLoader(ArgParseConfigLoader): help='The number of engines to start.', metavar='Global.n' ) - parser_start.add_argument('-clean_logs', '--clean-logs', + parser_start.add_argument('--clean-logs', dest='Global.clean_logs', action='store_true', help='Delete old log flies before starting.', default=NoConfigDefault ) - parser_start.add_argument('-noclean_logs', '--no-clean-logs', + parser_start.add_argument('--no-clean-logs', dest='Global.clean_logs', action='store_false', help="Don't delete old log flies before starting.", default=NoConfigDefault ) - parser_start.add_argument('--daemon', '-daemon', + parser_start.add_argument('--daemon', dest='Global.daemonize', action='store_true', help='Daemonize the ipcluster program. This implies --log-to-file', default=NoConfigDefault ) - parser_start.add_argument('--nodaemon', '-nodaemon', + parser_start.add_argument('--nodaemon', dest='Global.daemonize', action='store_false', help="Dont't daemonize the ipcluster program.", default=NoConfigDefault @@ -150,9 +150,10 @@ class IPClusterCLLoader(ArgParseConfigLoader): help='Stop a cluster.', parents=[parent_parser1, parent_parser2] ) - parser_start.add_argument('-sig', '--sig', + parser_start.add_argument('--signal-number', dest='Global.stop_signal', type=int, help="The signal number to use in stopping the cluster (default=2).", + metavar="Global.stop_signal", default=NoConfigDefault ) @@ -339,7 +340,9 @@ class IPClusterApp(ApplicationWithClusterDir): 'Cluster is already running with [pid=%s]. ' 'use "ipcluster stop" to stop the cluster.' % pid ) - sys.exit(9) + # Here I exit with a unusual exit status that other processes + # can watch for to learn how I existed. + sys.exit(10) # Now log and daemonize self.log.info('Starting ipcluster with [daemon=%r]' % config.Global.daemonize) if config.Global.daemonize: @@ -359,7 +362,9 @@ class IPClusterApp(ApplicationWithClusterDir): self.log.critical( 'Problem reading pid file, cluster is probably not running.' ) - sys.exit(9) + # Here I exit with a unusual exit status that other processes + # can watch for to learn how I existed. + sys.exit(11) sig = config.Global.stop_signal self.log.info( "Stopping cluster [pid=%r] with [signal=%r]" % (pid, sig) diff --git a/IPython/kernel/ipcontrollerapp.py b/IPython/kernel/ipcontrollerapp.py index 0a27178..ca3750f 100644 --- a/IPython/kernel/ipcontrollerapp.py +++ b/IPython/kernel/ipcontrollerapp.py @@ -160,9 +160,13 @@ cl_args = ( 'are deleted before the controller starts. This must be set if ' 'specific ports are specified by --engine-port or --client-port.') ), - (('-ns','--no-security'), dict( + (('--no-secure',), dict( action='store_false', dest='Global.secure', default=NoConfigDefault, help='Turn off SSL encryption for all connections.') + ), + (('--secure',), dict( + action='store_true', dest='Global.secure', default=NoConfigDefault, + help='Turn off SSL encryption for all connections.') ) ) diff --git a/docs/examples/kernel/fractal.py b/docs/examples/kernel/fractal.py new file mode 100644 index 0000000..715d30a --- /dev/null +++ b/docs/examples/kernel/fractal.py @@ -0,0 +1,90 @@ +from numpy import * + +def mandel(n, m, itermax, xmin, xmax, ymin, ymax): + ''' + Fast mandelbrot computation using numpy. + + (n, m) are the output image dimensions + itermax is the maximum number of iterations to do + xmin, xmax, ymin, ymax specify the region of the + set to compute. + ''' + # The point of ix and iy is that they are 2D arrays + # giving the x-coord and y-coord at each point in + # the array. The reason for doing this will become + # clear below... + ix, iy = mgrid[0:n, 0:m] + # Now x and y are the x-values and y-values at each + # point in the array, linspace(start, end, n) + # is an array of n linearly spaced points between + # start and end, and we then index this array using + # numpy fancy indexing. If A is an array and I is + # an array of indices, then A[I] has the same shape + # as I and at each place i in I has the value A[i]. + x = linspace(xmin, xmax, n)[ix] + y = linspace(ymin, ymax, m)[iy] + # c is the complex number with the given x, y coords + c = x+complex(0,1)*y + del x, y # save a bit of memory, we only need z + # the output image coloured according to the number + # of iterations it takes to get to the boundary + # abs(z)>2 + img = zeros(c.shape, dtype=int) + # Here is where the improvement over the standard + # algorithm for drawing fractals in numpy comes in. + # We flatten all the arrays ix, iy and c. This + # flattening doesn't use any more memory because + # we are just changing the shape of the array, the + # data in memory stays the same. It also affects + # each array in the same way, so that index i in + # array c has x, y coords ix[i], iy[i]. The way the + # algorithm works is that whenever abs(z)>2 we + # remove the corresponding index from each of the + # arrays ix, iy and c. Since we do the same thing + # to each array, the correspondence between c and + # the x, y coords stored in ix and iy is kept. + ix.shape = n*m + iy.shape = n*m + c.shape = n*m + # we iterate z->z^2+c with z starting at 0, but the + # first iteration makes z=c so we just start there. + # We need to copy c because otherwise the operation + # z->z^2 will send c->c^2. + z = copy(c) + for i in xrange(itermax): + if not len(z): break # all points have escaped + # equivalent to z = z*z+c but quicker and uses + # less memory + multiply(z, z, z) + add(z, c, z) + # these are the points that have escaped + rem = abs(z)>2.0 + # colour them with the iteration number, we + # add one so that points which haven't + # escaped have 0 as their iteration number, + # this is why we keep the arrays ix and iy + # because we need to know which point in img + # to colour + img[ix[rem], iy[rem]] = i+1 + # -rem is the array of points which haven't + # escaped, in numpy -A for a boolean array A + # is the NOT operation. + rem = -rem + # So we select out the points in + # z, ix, iy and c which are still to be + # iterated on in the next step + z = z[rem] + ix, iy = ix[rem], iy[rem] + c = c[rem] + return img + +if __name__=='__main__': + from pylab import * + import time + start = time.time() + I = mandel(400, 400, 100, -2, .5, -1.25, 1.25) + print 'Time taken:', time.time()-start + I[I==0] = 101 + img = imshow(I.T, origin='lower left') + img.write_png('mandel.png', noscale=True) + show() diff --git a/docs/examples/kernel/wordfreq.py b/docs/examples/kernel/wordfreq.py index e76f39f..e12549f 100644 --- a/docs/examples/kernel/wordfreq.py +++ b/docs/examples/kernel/wordfreq.py @@ -1,13 +1,20 @@ """Count the frequencies of words in a string""" +from __future__ import division + +import cmath as math + + def wordfreq(text): """Return a dictionary of words and word counts in a string.""" freqs = {} for word in text.split(): - freqs[word] = freqs.get(word, 0) + 1 + lword = word.lower() + freqs[lword] = freqs.get(lword, 0) + 1 return freqs + def print_wordfreq(freqs, n=10): """Print the n most common words and counts in the freqs dict.""" @@ -17,7 +24,43 @@ def print_wordfreq(freqs, n=10): for (count, word) in items[:n]: print word, count -if __name__ == '__main__': - import gzip - text = gzip.open('HISTORY.gz').read() - freqs = wordfreq(text) \ No newline at end of file + +def wordfreq_to_weightsize(worddict, minsize=10, maxsize=50, minalpha=0.4, maxalpha=1.0): + mincount = min(worddict.itervalues()) + maxcount = max(worddict.itervalues()) + weights = {} + for k, v in worddict.iteritems(): + w = (v-mincount)/(maxcount-mincount) + alpha = minalpha + (maxalpha-minalpha)*w + size = minsize + (maxsize-minsize)*w + weights[k] = (alpha, size) + return weights + + +def tagcloud(worddict, n=10, minsize=10, maxsize=50, minalpha=0.4, maxalpha=1.0): + from matplotlib import pyplot as plt + import random + + worddict = wordfreq_to_weightsize(worddict, minsize, maxsize, minalpha, maxalpha) + + fig = plt.figure() + ax = fig.add_subplot(111) + ax.set_position([0.0,0.0,1.0,1.0]) + plt.xticks([]) + plt.yticks([]) + + words = worddict.keys() + alphas = [v[0] for v in worddict.values()] + sizes = [v[1] for v in worddict.values()] + items = zip(alphas, sizes, words) + items.sort(reverse=True) + for alpha, size, word in items[:n]: + xpos = random.normalvariate(0.5, 0.3) + ypos = random.normalvariate(0.5, 0.3) + # xpos = random.uniform(0.0,1.0) + # ypos = random.uniform(0.0,1.0) + ax.text(xpos, ypos, word.lower(), alpha=alpha, fontsize=size) + ax.autoscale_view() + return ax + + \ No newline at end of file