diff --git a/IPython/FakeModule.py b/IPython/FakeModule.py index cc53d99..41029f5 100644 --- a/IPython/FakeModule.py +++ b/IPython/FakeModule.py @@ -15,6 +15,37 @@ sessions. import types +def init_fakemod_dict(fm,adict=None): + """Initialize a FakeModule instance __dict__. + + Kept as a standalone function and not a method so the FakeModule API can + remain basically empty. + + This should be considered for private IPython use, used in managing + namespaces for %run. + + Parameters + ---------- + + fm : FakeModule instance + + adict : dict, optional + """ + + dct = {} + # It seems pydoc (and perhaps others) needs any module instance to + # implement a __nonzero__ method, so we add it if missing: + dct.setdefault('__nonzero__',lambda : True) + dct.setdefault('__file__',__file__) + + if adict is not None: + dct.update(adict) + + # Hard assignment of the object's __dict__. This is nasty but deliberate. + fm.__dict__.clear() + fm.__dict__.update(dct) + + class FakeModule(types.ModuleType): """Simple class with attribute access to fake a module. @@ -29,14 +60,7 @@ class FakeModule(types.ModuleType): # tmp to force __dict__ instance creation, else self.__dict__ fails self.__iptmp = None - - # It seems pydoc (and perhaps others) needs any module instance to - # implement a __nonzero__ method, so we add it if missing: - self.__dict__.setdefault('__nonzero__',lambda : True) - self.__dict__.setdefault('__file__',__file__) - # cleanup our temp trick del self.__iptmp - - if adict is not None: - self.__dict__.update(adict) + # Now, initialize the actual data in the instance dict. + init_fakemod_dict(self,adict) diff --git a/IPython/Magic.py b/IPython/Magic.py index f500698..32bdb6d 100644 --- a/IPython/Magic.py +++ b/IPython/Magic.py @@ -1584,23 +1584,17 @@ Currently the magic system has the following functions:\n""" prog_ns = self.shell.user_ns __name__save = self.shell.user_ns['__name__'] prog_ns['__name__'] = '__main__' - main_mod = FakeModule(prog_ns) + main_mod = self.shell.new_main_mod(prog_ns) else: # Run in a fresh, empty namespace if opts.has_key('n'): name = os.path.splitext(os.path.basename(filename))[0] else: name = '__main__' - main_mod = FakeModule() + + main_mod = self.shell.new_main_mod() prog_ns = main_mod.__dict__ prog_ns['__name__'] = name - - # The shell MUST hold a reference to main_mod so after %run exits, - # the python deletion mechanism doesn't zero it out (leaving - # dangling references). However, we should drop old versions of - # main_mod. There is now a proper API to manage this caching in - # the main shell object, we use that. - self.shell.cache_main_mod(main_mod) # Since '%run foo' emulates 'python foo.py' at the cmd line, we must # set the __file__ global in the script's namespace @@ -1703,9 +1697,14 @@ Currently the magic system has the following functions:\n""" else: # regular execution runner(filename,prog_ns,prog_ns,exit_ignore=exit_ignore) + if opts.has_key('i'): self.shell.user_ns['__name__'] = __name__save else: + # The shell MUST hold a reference to prog_ns so after %run + # exits, the python deletion mechanism doesn't zero it out + # (leaving dangling references). + self.shell.cache_main_mod(prog_ns,filename) # update IPython interactive namespace del prog_ns['__name__'] self.shell.user_ns.update(prog_ns) @@ -1719,6 +1718,7 @@ Currently the magic system has the following functions:\n""" # added. Otherwise it will trap references to objects # contained therein. del sys.modules[main_mod_name] + self.shell.reloadhist() return stats @@ -1800,7 +1800,28 @@ Currently the magic system has the following functions:\n""" import timeit import math - units = [u"s", u"ms", u"\xb5s", u"ns"] + # XXX: Unfortunately the unicode 'micro' symbol can cause problems in + # certain terminals. Until we figure out a robust way of + # auto-detecting if the terminal can deal with it, use plain 'us' for + # microseconds. I am really NOT happy about disabling the proper + # 'micro' prefix, but crashing is worse... If anyone knows what the + # right solution for this is, I'm all ears... + # + # Note: using + # + # s = u'\xb5' + # s.encode(sys.getdefaultencoding()) + # + # is not sufficient, as I've seen terminals where that fails but + # print s + # + # succeeds + # + # See bug: https://bugs.launchpad.net/ipython/+bug/348466 + + #units = [u"s", u"ms",u'\xb5',"ns"] + units = [u"s", u"ms",u'us',"ns"] + scaling = [1, 1e3, 1e6, 1e9] opts, stmt = self.parse_options(parameter_s,'n:r:tcp:', @@ -1839,9 +1860,9 @@ Currently the magic system has the following functions:\n""" # determine number so that 0.2 <= total time < 2.0 number = 1 for i in range(1, 10): - number *= 10 if timer.timeit(number) >= 0.2: break + number *= 10 best = min(timer.repeat(repeat, number)) / number diff --git a/IPython/Prompts.py b/IPython/Prompts.py index 89b8d2a..343fb1e 100644 --- a/IPython/Prompts.py +++ b/IPython/Prompts.py @@ -128,7 +128,7 @@ prompt_specials_color = { r'\N': '${self.cache.prompt_count}', # Prompt/history count, with the actual digits replaced by dots. Used # mainly in continuation prompts (prompt_in2) - r'\D': '${"."*len(str(self.cache.prompt_count))}', + r'\D': '${"."*__builtins__.len(__builtins__.str(self.cache.prompt_count))}', # Current working directory r'\w': '${os.getcwd()}', # Current time diff --git a/IPython/Release.py b/IPython/Release.py index d2e3bf5..5a6e465 100644 --- a/IPython/Release.py +++ b/IPython/Release.py @@ -20,10 +20,10 @@ name = 'ipython' # because bdist_rpm does not accept dashes (an RPM) convention, and # bdist_deb does not accept underscores (a Debian convention). -development = False # change this to False to do a release -version_base = '0.9.1' +development = True # change this to False to do a release +version_base = '0.10' branch = 'ipython' -revision = '1143' +revision = '1163' if development: if branch == 'ipython': diff --git a/IPython/iplib.py b/IPython/iplib.py index 612fa2e..d674668 100644 --- a/IPython/iplib.py +++ b/IPython/iplib.py @@ -54,7 +54,7 @@ from pprint import pprint, pformat from IPython import Debugger,OInspect,PyColorize,ultraTB from IPython.ColorANSI import ColorScheme,ColorSchemeTable # too long names from IPython.Extensions import pickleshare -from IPython.FakeModule import FakeModule +from IPython.FakeModule import FakeModule, init_fakemod_dict from IPython.Itpl import Itpl,itpl,printpl,ItplNS,itplns from IPython.Logger import Logger from IPython.Magic import Magic @@ -108,6 +108,197 @@ def softspace(file, newvalue): return oldvalue +def user_setup(ipythondir,rc_suffix,mode='install',interactive=True): + """Install or upgrade the user configuration directory. + + Can be called when running for the first time or to upgrade the user's + .ipython/ directory. + + Parameters + ---------- + ipythondir : path + The directory to be used for installation/upgrade. In 'install' mode, + if this path already exists, the function exits immediately. + + rc_suffix : str + Extension for the config files. On *nix platforms it is typically the + empty string, while Windows normally uses '.ini'. + + mode : str, optional + Valid modes are 'install' and 'upgrade'. + + interactive : bool, optional + If False, do not wait for user input on any errors. Normally after + printing its status information, this function waits for the user to + hit Return before proceeding. This is because the default use case is + when first installing the IPython configuration, so we want the user to + acknowledge the initial message, which contains some useful + information. + """ + + # For automatic use, deactivate all i/o + if interactive: + def wait(): + try: + raw_input("Please press to start IPython.") + except EOFError: + print >> Term.cout + print '*'*70 + + def printf(s): + print s + else: + wait = lambda : None + printf = lambda s : None + + # Install mode should be re-entrant: if the install dir already exists, + # bail out cleanly + if mode == 'install' and os.path.isdir(ipythondir): + return + + cwd = os.getcwd() # remember where we started + glb = glob.glob + + printf('*'*70) + if mode == 'install': + printf( +"""Welcome to IPython. I will try to create a personal configuration directory +where you can customize many aspects of IPython's functionality in:\n""") + else: + printf('I am going to upgrade your configuration in:') + + printf(ipythondir) + + rcdirend = os.path.join('IPython','UserConfig') + cfg = lambda d: os.path.join(d,rcdirend) + try: + rcdir = filter(os.path.isdir,map(cfg,sys.path))[0] + printf("Initializing from configuration: %s" % rcdir) + except IndexError: + warning = """ +Installation error. IPython's directory was not found. + +Check the following: + +The ipython/IPython directory should be in a directory belonging to your +PYTHONPATH environment variable (that is, it should be in a directory +belonging to sys.path). You can copy it explicitly there or just link to it. + +IPython will create a minimal default configuration for you. + +""" + warn(warning) + wait() + + if sys.platform =='win32': + inif = 'ipythonrc.ini' + else: + inif = 'ipythonrc' + minimal_setup = {'ipy_user_conf.py' : 'import ipy_defaults', + inif : '# intentionally left blank' } + os.makedirs(ipythondir, mode = 0777) + for f, cont in minimal_setup.items(): + # In 2.5, this can be more cleanly done using 'with' + fobj = file(ipythondir + '/' + f,'w') + fobj.write(cont) + fobj.close() + + return + + if mode == 'install': + try: + shutil.copytree(rcdir,ipythondir) + os.chdir(ipythondir) + rc_files = glb("ipythonrc*") + for rc_file in rc_files: + os.rename(rc_file,rc_file+rc_suffix) + except: + warning = """ + +There was a problem with the installation: +%s +Try to correct it or contact the developers if you think it's a bug. +IPython will proceed with builtin defaults.""" % sys.exc_info()[1] + warn(warning) + wait() + return + + elif mode == 'upgrade': + try: + os.chdir(ipythondir) + except: + printf(""" +Can not upgrade: changing to directory %s failed. Details: +%s +""" % (ipythondir,sys.exc_info()[1]) ) + wait() + return + else: + sources = glb(os.path.join(rcdir,'[A-Za-z]*')) + for new_full_path in sources: + new_filename = os.path.basename(new_full_path) + if new_filename.startswith('ipythonrc'): + new_filename = new_filename + rc_suffix + # The config directory should only contain files, skip any + # directories which may be there (like CVS) + if os.path.isdir(new_full_path): + continue + if os.path.exists(new_filename): + old_file = new_filename+'.old' + if os.path.exists(old_file): + os.remove(old_file) + os.rename(new_filename,old_file) + shutil.copy(new_full_path,new_filename) + else: + raise ValueError('unrecognized mode for install: %r' % mode) + + # Fix line-endings to those native to each platform in the config + # directory. + try: + os.chdir(ipythondir) + except: + printf(""" +Problem: changing to directory %s failed. +Details: +%s + +Some configuration files may have incorrect line endings. This should not +cause any problems during execution. """ % (ipythondir,sys.exc_info()[1]) ) + wait() + else: + for fname in glb('ipythonrc*'): + try: + native_line_ends(fname,backup=0) + except IOError: + pass + + if mode == 'install': + printf(""" +Successful installation! + +Please read the sections 'Initial Configuration' and 'Quick Tips' in the +IPython manual (there are both HTML and PDF versions supplied with the +distribution) to make sure that your system environment is properly configured +to take advantage of IPython's features. + +Important note: the configuration system has changed! The old system is +still in place, but its setting may be partly overridden by the settings in +"~/.ipython/ipy_user_conf.py" config file. Please take a look at the file +if some of the new settings bother you. + +""") + else: + printf(""" +Successful upgrade! + +All files in your directory: +%(ipythondir)s +which would have been overwritten by the upgrade were backed up with a .old +extension. If you had made particular customizations in those files you may +want to merge them back into the new files.""" % locals() ) + wait() + os.chdir(cwd) + #**************************************************************************** # Local use exceptions class SpaceInInput(exceptions.Exception): pass @@ -308,13 +499,24 @@ class InteractiveShell(object,Magic): # calling functions defined in the script that use other things from # the script will fail, because the function's closure had references # to the original objects, which are now all None. So we must protect - # these modules from deletion by keeping a cache. To avoid keeping - # stale modules around (we only need the one from the last run), we use - # a dict keyed with the full path to the script, so only the last - # version of the module is held in the cache. The %reset command will - # flush this cache. See the cache_main_mod() and clear_main_mod_cache() - # methods for details on use. - self._user_main_modules = {} + # these modules from deletion by keeping a cache. + # + # To avoid keeping stale modules around (we only need the one from the + # last run), we use a dict keyed with the full path to the script, so + # only the last version of the module is held in the cache. Note, + # however, that we must cache the module *namespace contents* (their + # __dict__). Because if we try to cache the actual modules, old ones + # (uncached) could be destroyed while still holding references (such as + # those held by GUI objects that tend to be long-lived)> + # + # The %reset command will flush this cache. See the cache_main_mod() + # and clear_main_mod_cache() methods for details on use. + + # This is the cache used for 'main' namespaces + self._main_ns_cache = {} + # And this is the single instance of FakeModule whose __dict__ we keep + # copying and clearing for reuse on each %run + self._user_main_module = FakeModule() # A table holding all the namespaces IPython deals with, so that # introspection facilities can search easily. @@ -330,7 +532,7 @@ class InteractiveShell(object,Magic): # a simple list. self.ns_refs_table = [ user_ns, user_global_ns, self.user_config_ns, self.alias_table, self.internal_ns, - self._user_main_modules ] + self._main_ns_cache ] # We need to insert into sys.modules something that looks like a # module but which accesses the IPython namespace, for shelve and @@ -1114,156 +1316,11 @@ class InteractiveShell(object,Magic): def user_setup(self,ipythondir,rc_suffix,mode='install'): """Install the user configuration directory. - Can be called when running for the first time or to upgrade the user's - .ipython/ directory with the mode parameter. Valid modes are 'install' - and 'upgrade'.""" - - def wait(): - try: - raw_input("Please press to start IPython.") - except EOFError: - print >> Term.cout - print '*'*70 - - cwd = os.getcwd() # remember where we started - glb = glob.glob - print '*'*70 - if mode == 'install': - print \ -"""Welcome to IPython. I will try to create a personal configuration directory -where you can customize many aspects of IPython's functionality in:\n""" - else: - print 'I am going to upgrade your configuration in:' - - print ipythondir - - rcdirend = os.path.join('IPython','UserConfig') - cfg = lambda d: os.path.join(d,rcdirend) - try: - rcdir = filter(os.path.isdir,map(cfg,sys.path))[0] - print "Initializing from configuration",rcdir - except IndexError: - warning = """ -Installation error. IPython's directory was not found. - -Check the following: - -The ipython/IPython directory should be in a directory belonging to your -PYTHONPATH environment variable (that is, it should be in a directory -belonging to sys.path). You can copy it explicitly there or just link to it. - -IPython will create a minimal default configuration for you. - -""" - warn(warning) - wait() - - if sys.platform =='win32': - inif = 'ipythonrc.ini' - else: - inif = 'ipythonrc' - minimal_setup = {'ipy_user_conf.py' : 'import ipy_defaults', - inif : '# intentionally left blank' } - os.makedirs(ipythondir, mode = 0777) - for f, cont in minimal_setup.items(): - open(ipythondir + '/' + f,'w').write(cont) - - return - - if mode == 'install': - try: - shutil.copytree(rcdir,ipythondir) - os.chdir(ipythondir) - rc_files = glb("ipythonrc*") - for rc_file in rc_files: - os.rename(rc_file,rc_file+rc_suffix) - except: - warning = """ - -There was a problem with the installation: -%s -Try to correct it or contact the developers if you think it's a bug. -IPython will proceed with builtin defaults.""" % sys.exc_info()[1] - warn(warning) - wait() - return - - elif mode == 'upgrade': - try: - os.chdir(ipythondir) - except: - print """ -Can not upgrade: changing to directory %s failed. Details: -%s -""" % (ipythondir,sys.exc_info()[1]) - wait() - return - else: - sources = glb(os.path.join(rcdir,'[A-Za-z]*')) - for new_full_path in sources: - new_filename = os.path.basename(new_full_path) - if new_filename.startswith('ipythonrc'): - new_filename = new_filename + rc_suffix - # The config directory should only contain files, skip any - # directories which may be there (like CVS) - if os.path.isdir(new_full_path): - continue - if os.path.exists(new_filename): - old_file = new_filename+'.old' - if os.path.exists(old_file): - os.remove(old_file) - os.rename(new_filename,old_file) - shutil.copy(new_full_path,new_filename) - else: - raise ValueError,'unrecognized mode for install:',`mode` - - # Fix line-endings to those native to each platform in the config - # directory. - try: - os.chdir(ipythondir) - except: - print """ -Problem: changing to directory %s failed. -Details: -%s - -Some configuration files may have incorrect line endings. This should not -cause any problems during execution. """ % (ipythondir,sys.exc_info()[1]) - wait() - else: - for fname in glb('ipythonrc*'): - try: - native_line_ends(fname,backup=0) - except IOError: - pass - - if mode == 'install': - print """ -Successful installation! - -Please read the sections 'Initial Configuration' and 'Quick Tips' in the -IPython manual (there are both HTML and PDF versions supplied with the -distribution) to make sure that your system environment is properly configured -to take advantage of IPython's features. - -Important note: the configuration system has changed! The old system is -still in place, but its setting may be partly overridden by the settings in -"~/.ipython/ipy_user_conf.py" config file. Please take a look at the file -if some of the new settings bother you. - -""" - else: - print """ -Successful upgrade! - -All files in your directory: -%(ipythondir)s -which would have been overwritten by the upgrade were backed up with a .old -extension. If you had made particular customizations in those files you may -want to merge them back into the new files.""" % locals() - wait() - os.chdir(cwd) - # end user_setup() + Note + ---- + DEPRECATED: use the top-level user_setup() function instead. + """ + return user_setup(ipythondir,rc_suffix,mode) def atexit_operations(self): """This will be executed at the time of exit. @@ -1441,35 +1498,53 @@ want to merge them back into the new files.""" % locals() return True return ask_yes_no(prompt,default) - def cache_main_mod(self,mod): - """Cache a main module. + def new_main_mod(self,ns=None): + """Return a new 'main' module object for user code execution. + """ + main_mod = self._user_main_module + init_fakemod_dict(main_mod,ns) + return main_mod + + def cache_main_mod(self,ns,fname): + """Cache a main module's namespace. - When scripts are executed via %run, we must keep a reference to their - __main__ module (a FakeModule instance) around so that Python doesn't - clear it, rendering objects defined therein useless. + When scripts are executed via %run, we must keep a reference to the + namespace of their __main__ module (a FakeModule instance) around so + that Python doesn't clear it, rendering objects defined therein + useless. This method keeps said reference in a private dict, keyed by the absolute path of the module object (which corresponds to the script path). This way, for multiple executions of the same script we only - keep one copy of __main__ (the last one), thus preventing memory leaks - from old references while allowing the objects from the last execution - to be accessible. + keep one copy of the namespace (the last one), thus preventing memory + leaks from old references while allowing the objects from the last + execution to be accessible. + + Note: we can not allow the actual FakeModule instances to be deleted, + because of how Python tears down modules (it hard-sets all their + references to None without regard for reference counts). This method + must therefore make a *copy* of the given namespace, to allow the + original module's __dict__ to be cleared and reused. + Parameters ---------- - mod : a module object + ns : a namespace (a dict, typically) + + fname : str + Filename associated with the namespace. Examples -------- In [10]: import IPython - In [11]: _ip.IP.cache_main_mod(IPython) + In [11]: _ip.IP.cache_main_mod(IPython.__dict__,IPython.__file__) - In [12]: IPython.__file__ in _ip.IP._user_main_modules + In [12]: IPython.__file__ in _ip.IP._main_ns_cache Out[12]: True """ - self._user_main_modules[os.path.abspath(mod.__file__) ] = mod + self._main_ns_cache[os.path.abspath(fname)] = ns.copy() def clear_main_mod_cache(self): """Clear the cache of main modules. @@ -1481,17 +1556,17 @@ want to merge them back into the new files.""" % locals() In [15]: import IPython - In [16]: _ip.IP.cache_main_mod(IPython) + In [16]: _ip.IP.cache_main_mod(IPython.__dict__,IPython.__file__) - In [17]: len(_ip.IP._user_main_modules) > 0 + In [17]: len(_ip.IP._main_ns_cache) > 0 Out[17]: True In [18]: _ip.IP.clear_main_mod_cache() - In [19]: len(_ip.IP._user_main_modules) == 0 + In [19]: len(_ip.IP._main_ns_cache) == 0 Out[19]: True """ - self._user_main_modules.clear() + self._main_ns_cache.clear() def _should_recompile(self,e): """Utility routine for edit_syntax_error""" diff --git a/IPython/kernel/multiengineclient.py b/IPython/kernel/multiengineclient.py index 1d27037..2f7ad16 100644 --- a/IPython/kernel/multiengineclient.py +++ b/IPython/kernel/multiengineclient.py @@ -885,7 +885,62 @@ class FullBlockingMultiEngineClient(InteractiveMultiEngineClient): targets, block = self._findTargetsAndBlock(targets, block) return self._blockFromThread(self.smultiengine.run, filename, targets=targets, block=block) + + def benchmark(self, push_size=10000): + """ + Run performance benchmarks for the current IPython cluster. + + This method tests both the latency of sending command and data to the + engines as well as the throughput of sending large objects to the + engines using push. The latency is measured by having one or more + engines execute the command 'pass'. The throughput is measure by + sending an NumPy array of size `push_size` to one or more engines. + + These benchmarks will vary widely on different hardware and networks + and thus can be used to get an idea of the performance characteristics + of a particular configuration of an IPython controller and engines. + + This function is not testable within our current testing framework. + """ + import timeit, __builtin__ + __builtin__._mec_self = self + benchmarks = {} + repeat = 3 + count = 10 + + timer = timeit.Timer('_mec_self.execute("pass",0)') + result = 1000*min(timer.repeat(repeat,count))/count + benchmarks['single_engine_latency'] = (result,'msec') + + timer = timeit.Timer('_mec_self.execute("pass")') + result = 1000*min(timer.repeat(repeat,count))/count + benchmarks['all_engine_latency'] = (result,'msec') + try: + import numpy as np + except: + pass + else: + timer = timeit.Timer( + "_mec_self.push(d)", + "import numpy as np; d = dict(a=np.zeros(%r,dtype='float64'))" % push_size + ) + result = min(timer.repeat(repeat,count))/count + benchmarks['all_engine_push'] = (1e-6*push_size*8/result, 'MB/sec') + + try: + import numpy as np + except: + pass + else: + timer = timeit.Timer( + "_mec_self.push(d,0)", + "import numpy as np; d = dict(a=np.zeros(%r,dtype='float64'))" % push_size + ) + result = min(timer.repeat(repeat,count))/count + benchmarks['single_engine_push'] = (1e-6*push_size*8/result, 'MB/sec') + + return benchmarks components.registerAdapter(FullBlockingMultiEngineClient, diff --git a/IPython/kernel/scripts/ipcluster.py b/IPython/kernel/scripts/ipcluster.py index 640a41a..ca20901 100755 --- a/IPython/kernel/scripts/ipcluster.py +++ b/IPython/kernel/scripts/ipcluster.py @@ -478,15 +478,31 @@ Try running ipcluster with the -xy flags: ipcluster local -xy -n 4""") cont_args.append('-y') return True +def check_reuse(args, cont_args): + if args.r: + cont_args.append('-r') + if args.client_port == 0 or args.engine_port == 0: + log.err(""" +To reuse FURL files, you must also set the client and engine ports using +the --client-port and --engine-port options.""") + reactor.stop() + return False + cont_args.append('--client-port=%i' % args.client_port) + cont_args.append('--engine-port=%i' % args.engine_port) + return True def main_local(args): cont_args = [] cont_args.append('--logfile=%s' % pjoin(args.logdir,'ipcontroller')) - + # Check security settings before proceeding if not check_security(args, cont_args): return - + + # See if we are reusing FURL files + if not check_reuse(args, cont_args): + return + cl = ControllerLauncher(extra_args=cont_args) dstart = cl.start() def start_engines(cont_pid): @@ -513,18 +529,22 @@ def main_local(args): dstart.addErrback(lambda f: f.raiseException()) -def main_mpirun(args): +def main_mpi(args): cont_args = [] cont_args.append('--logfile=%s' % pjoin(args.logdir,'ipcontroller')) - + # Check security settings before proceeding if not check_security(args, cont_args): return - + + # See if we are reusing FURL files + if not check_reuse(args, cont_args): + return + cl = ControllerLauncher(extra_args=cont_args) dstart = cl.start() def start_engines(cont_pid): - raw_args = ['mpirun'] + raw_args = [args.cmd] raw_args.extend(['-n',str(args.n)]) raw_args.append('ipengine') raw_args.append('-l') @@ -554,11 +574,15 @@ def main_mpirun(args): def main_pbs(args): cont_args = [] cont_args.append('--logfile=%s' % pjoin(args.logdir,'ipcontroller')) - + # Check security settings before proceeding if not check_security(args, cont_args): return - + + # See if we are reusing FURL files + if not check_reuse(args, cont_args): + return + cl = ControllerLauncher(extra_args=cont_args) dstart = cl.start() def start_engines(r): @@ -598,13 +622,16 @@ def main_ssh(args): if not check_security(args, cont_args): return + # See if we are reusing FURL files + if not check_reuse(args, cont_args): + return + cl = ControllerLauncher(extra_args=cont_args) dstart = cl.start() def start_engines(cont_pid): ssh_set = SSHEngineSet(clusterfile['engines'], sshx=args.sshx) def shutdown(signum, frame): d = ssh_set.kill() - # d.addErrback(log.err) cl.interrupt_then_kill(1.0) reactor.callLater(2.0, reactor.stop) signal.signal(signal.SIGINT,shutdown) @@ -621,6 +648,26 @@ def main_ssh(args): def get_args(): base_parser = argparse.ArgumentParser(add_help=False) base_parser.add_argument( + '-r', + action='store_true', + dest='r', + help='try to reuse FURL files. Use with --client-port and --engine-port' + ) + base_parser.add_argument( + '--client-port', + type=int, + dest='client_port', + help='the port the controller will listen on for client connections', + default=0 + ) + base_parser.add_argument( + '--engine-port', + type=int, + dest='engine_port', + help='the port the controller will listen on for engine connections', + default=0 + ) + base_parser.add_argument( '-x', action='store_true', dest='x', @@ -665,7 +712,7 @@ def get_args(): parser_mpirun = subparsers.add_parser( 'mpirun', - help='run a cluster using mpirun', + help='run a cluster using mpirun (mpiexec also works)', parents=[base_parser] ) parser_mpirun.add_argument( @@ -674,7 +721,20 @@ def get_args(): dest="mpi", # Don't put a default here to allow no MPI support help="how to call MPI_Init (default=mpi4py)" ) - parser_mpirun.set_defaults(func=main_mpirun) + parser_mpirun.set_defaults(func=main_mpi, cmd='mpirun') + + parser_mpiexec = subparsers.add_parser( + 'mpiexec', + help='run a cluster using mpiexec (mpirun also works)', + parents=[base_parser] + ) + parser_mpiexec.add_argument( + "--mpi", + type=str, + dest="mpi", # Don't put a default here to allow no MPI support + help="how to call MPI_Init (default=mpi4py)" + ) + parser_mpiexec.set_defaults(func=main_mpi, cmd='mpiexec') parser_pbs = subparsers.add_parser( 'pbs', diff --git a/IPython/testing/iptest.py b/IPython/testing/iptest.py index 14f8eba..50f9bee 100644 --- a/IPython/testing/iptest.py +++ b/IPython/testing/iptest.py @@ -61,7 +61,7 @@ def main(): # plugin needs to be gone through with a fine # toothed comb to find what is causing the problem. # '--with-ipdoctest', - '--doctest-tests','--doctest-extension=txt', + '--ipdoctest-tests','--ipdoctest-extension=txt', '--detailed-errors', # We add --exe because of setuptools' imbecility (it @@ -81,11 +81,13 @@ def main(): (':' in arg and '.py' in arg): has_tests = True break + # If nothing was specifically requested, test full IPython if not has_tests: argv.append('IPython') - # Construct list of plugins, omitting the existing doctest plugin. + # Construct list of plugins, omitting the existing doctest plugin, which + # ours replaces (and extends). plugins = [IPythonDoctest(EXCLUDE)] for p in nose.plugins.builtin.plugins: plug = p() diff --git a/IPython/testing/plugin/ipdoctest.py b/IPython/testing/plugin/ipdoctest.py index 64e6a2c..617de83 100644 --- a/IPython/testing/plugin/ipdoctest.py +++ b/IPython/testing/plugin/ipdoctest.py @@ -15,7 +15,6 @@ Limitations: won't even have these special _NN variables set at all. """ - #----------------------------------------------------------------------------- # Module imports @@ -123,6 +122,13 @@ class ipnsdict(dict): def start_ipython(): """Start a global IPython shell, which we need for IPython-specific syntax. """ + + # This function should only ever run once! + if hasattr(start_ipython,'already_called'): + return + start_ipython.already_called = True + + # Ok, first time we're called, go ahead import new import IPython @@ -691,6 +697,7 @@ class ExtensionDoctest(doctests.Doctest): to exclude any filename which matches them from inclusion in the test suite (using pattern.search(), NOT pattern.match() ). """ + if exclude_patterns is None: exclude_patterns = [] self.exclude_patterns = map(re.compile,exclude_patterns) @@ -836,15 +843,33 @@ class IPythonDoctest(ExtensionDoctest): optionflags=optionflags, checker=self.checker) - def configure(self, options, config): + def options(self, parser, env=os.environ): + Plugin.options(self, parser, env) + parser.add_option('--ipdoctest-tests', action='store_true', + dest='ipdoctest_tests', + default=env.get('NOSE_IPDOCTEST_TESTS',True), + help="Also look for doctests in test modules. " + "Note that classes, methods and functions should " + "have either doctests or non-doctest tests, " + "not both. [NOSE_IPDOCTEST_TESTS]") + parser.add_option('--ipdoctest-extension', action="append", + dest="ipdoctest_extension", + help="Also look for doctests in files with " + "this extension [NOSE_IPDOCTEST_EXTENSION]") + # Set the default as a list, if given in env; otherwise + # an additional value set on the command line will cause + # an error. + env_setting = env.get('NOSE_IPDOCTEST_EXTENSION') + if env_setting is not None: + parser.set_defaults(ipdoctest_extension=tolist(env_setting)) + def configure(self, options, config): Plugin.configure(self, options, config) - self.doctest_tests = options.doctest_tests - self.extension = tolist(options.doctestExtension) + self.doctest_tests = options.ipdoctest_tests + self.extension = tolist(options.ipdoctest_extension) self.parser = IPDocTestParser() self.finder = DocTestFinder(parser=self.parser) self.checker = IPDoctestOutputChecker() self.globs = None self.extraglobs = None - diff --git a/IPython/testing/plugin/show_refs.py b/IPython/testing/plugin/show_refs.py index 11a441f..cbba10f 100644 --- a/IPython/testing/plugin/show_refs.py +++ b/IPython/testing/plugin/show_refs.py @@ -6,8 +6,9 @@ This is used by a companion test case. import gc class C(object): - def __del__(self): - print 'deleting object...' + def __del__(self): + pass + #print 'deleting object...' # dbg c = C() diff --git a/IPython/testing/plugin/test_refs.py b/IPython/testing/plugin/test_refs.py index ae9ba41..599bdcc 100644 --- a/IPython/testing/plugin/test_refs.py +++ b/IPython/testing/plugin/test_refs.py @@ -39,13 +39,10 @@ def doctest_ivars(): Out[6]: 1 """ -@dec.skip_doctest +#@dec.skip_doctest def doctest_refs(): """DocTest reference holding issues when running scripts. In [32]: run show_refs.py c referrers: [] - - In [33]: map(type,gc.get_referrers(c)) - Out[33]: [] """ diff --git a/IPython/tests/obj_del.py b/IPython/tests/obj_del.py index 089182e..8ea9d18 100644 --- a/IPython/tests/obj_del.py +++ b/IPython/tests/obj_del.py @@ -26,7 +26,7 @@ import sys class A(object): def __del__(self): - print 'object A deleted' + print 'obj_del.py: object A deleted' a = A() diff --git a/IPython/tests/refbug.py b/IPython/tests/refbug.py new file mode 100644 index 0000000..99aca19 --- /dev/null +++ b/IPython/tests/refbug.py @@ -0,0 +1,41 @@ +"""Minimal script to reproduce our nasty reference counting bug. + +The problem is related to https://bugs.launchpad.net/ipython/+bug/269966 + +The original fix for that appeared to work, but John D. Hunter found a +matplotlib example which, when run twice in a row, would break. The problem +were references held by open figures to internals of Tkinter. + +This code reproduces the problem that John saw, without matplotlib. + +This script is meant to be called by other parts of the test suite that call it +via %run as if it were executed interactively by the user. As of 2009-04-13, +test_magic.py calls it. +""" + +#----------------------------------------------------------------------------- +# Module imports +#----------------------------------------------------------------------------- +import sys + +from IPython import ipapi + +#----------------------------------------------------------------------------- +# Globals +#----------------------------------------------------------------------------- +ip = ipapi.get() + +if not '_refbug_cache' in ip.user_ns: + ip.user_ns['_refbug_cache'] = [] + + +aglobal = 'Hello' +def f(): + return aglobal + +cache = ip.user_ns['_refbug_cache'] +cache.append(f) + +def call_f(): + for func in cache: + print 'lowercased:',func().lower() diff --git a/IPython/tests/tclass.py b/IPython/tests/tclass.py index 1e8e1dd..5f3bb24 100644 --- a/IPython/tests/tclass.py +++ b/IPython/tests/tclass.py @@ -16,11 +16,12 @@ class C(object): self.name = name def __del__(self): - print 'Deleting object:',self.name + print 'tclass.py: deleting object:',self.name try: name = sys.argv[1] except IndexError: pass else: - c = C(name) + if name.startswith('C'): + c = C(name) diff --git a/IPython/tests/test_fakemodule.py b/IPython/tests/test_fakemodule.py new file mode 100644 index 0000000..6325439 --- /dev/null +++ b/IPython/tests/test_fakemodule.py @@ -0,0 +1,17 @@ +"""Tests for the FakeModule objects. +""" + +import nose.tools as nt + +from IPython.FakeModule import FakeModule, init_fakemod_dict + +# Make a fakemod and check a few properties +def test_mk_fakemod(): + fm = FakeModule() + yield nt.assert_true,fm + yield nt.assert_true,lambda : hasattr(fm,'__file__') + +def test_mk_fakemod_fromdict(): + """Test making a FakeModule object with initial data""" + fm = FakeModule(dict(hello=True)) + nt.assert_true(fm.hello) diff --git a/IPython/tests/test_iplib.py b/IPython/tests/test_iplib.py index 42fa1b6..0924e1b 100644 --- a/IPython/tests/test_iplib.py +++ b/IPython/tests/test_iplib.py @@ -1,17 +1,68 @@ """Tests for the key iplib module, where the main ipython class is defined. """ +#----------------------------------------------------------------------------- +# Module imports +#----------------------------------------------------------------------------- +# stdlib +import os +import shutil +import tempfile + +# third party import nose.tools as nt +# our own packages +from IPython import iplib + +#----------------------------------------------------------------------------- +# Globals +#----------------------------------------------------------------------------- + +# Useful global ipapi object and main IPython one. Unfortunately we have a +# long precedent of carrying the 'ipapi' global object which is injected into +# the system namespace as _ip, but that keeps a pointer to the actual IPython +# InteractiveShell instance, which is named IP. Since in testing we do need +# access to the real thing (we want to probe beyond what ipapi exposes), make +# here a global reference to each. In general, things that are exposed by the +# ipapi instance should be read from there, but we also will often need to use +# the actual IPython one. + +ip = _ip # This is the ipapi instance +IP = ip.IP # This is the actual IPython shell 'raw' object. + +#----------------------------------------------------------------------------- +# Test functions +#----------------------------------------------------------------------------- def test_reset(): """reset must clear most namespaces.""" - ip = _ip.IP - ip.reset() # first, it should run without error + IP.reset() # first, it should run without error # Then, check that most namespaces end up empty - for ns in ip.ns_refs_table: - if ns is ip.user_ns: + for ns in IP.ns_refs_table: + if ns is IP.user_ns: # The user namespace is reset with some data, so we can't check for # it being empty continue nt.assert_equals(len(ns),0) + + +# make sure that user_setup can be run re-entrantly in 'install' mode. +def test_user_setup(): + # use a lambda to pass kwargs to the generator + user_setup = lambda a,k: iplib.user_setup(*a,**k) + kw = dict(mode='install', interactive=False) + + # Call the user setup and verify that the directory exists + yield user_setup, (ip.options.ipythondir,''), kw + yield os.path.isdir, ip.options.ipythondir + + # Now repeat the operation with a non-existent directory. Check both that + # the call succeeds and that the directory is created. + tmpdir = tempfile.mktemp(prefix='ipython-test-') + try: + yield user_setup, (tmpdir,''), kw + yield os.path.isdir, tmpdir + finally: + # In this case, clean up the temp dir once done + shutil.rmtree(tmpdir) diff --git a/IPython/tests/test_magic.py b/IPython/tests/test_magic.py index eeb175c..da5a498 100644 --- a/IPython/tests/test_magic.py +++ b/IPython/tests/test_magic.py @@ -37,7 +37,7 @@ def test_rehashx(): def doctest_run_ns(): """Classes declared %run scripts must be instantiable afterwards. - In [11]: run tclass + In [11]: run tclass foo In [12]: isinstance(f(),foo) Out[12]: True @@ -47,12 +47,10 @@ def doctest_run_ns(): def doctest_run_ns2(): """Classes declared %run scripts must be instantiable afterwards. - In [3]: run tclass.py + In [4]: run tclass C-first_pass - In [4]: run tclass first_pass - - In [5]: run tclass second_pass - Deleting object: first_pass + In [5]: run tclass C-second_pass + tclass.py: deleting object: C-first_pass """ @@ -85,7 +83,7 @@ def test_obj_del(): test_dir = os.path.dirname(__file__) del_file = os.path.join(test_dir,'obj_del.py') out = _ip.IP.getoutput('ipython %s' % del_file) - nt.assert_equals(out,'object A deleted') + nt.assert_equals(out,'obj_del.py: object A deleted') def test_shist(): @@ -133,3 +131,21 @@ def test_fail_dec2(*a,**k): def test_fail_dec3(*a,**k): yield nt.assert_true, False + +def doctest_refbug(): + """Very nasty problem with references held by multiple runs of a script. + See: https://bugs.launchpad.net/ipython/+bug/269966 + + In [1]: _ip.IP.clear_main_mod_cache() + + In [2]: run refbug + + In [3]: call_f() + lowercased: hello + + In [4]: run refbug + + In [5]: call_f() + lowercased: hello + lowercased: hello + """ diff --git a/docs/source/changes.txt b/docs/source/changes.txt index 2408bbf..c0e146e 100644 --- a/docs/source/changes.txt +++ b/docs/source/changes.txt @@ -75,6 +75,9 @@ Bug fixes The block is assigned to pasted_block even if code raises exception. +* Bug #274067 'The code in get_home_dir is broken for py2exe' was + fixed. + Backwards incompatible changes ------------------------------ diff --git a/docs/source/development/overview.txt b/docs/source/development/overview.txt index b4552b8..642fbbd 100644 --- a/docs/source/development/overview.txt +++ b/docs/source/development/overview.txt @@ -345,6 +345,37 @@ nosetests option. For example, you can use ``--pdb`` or ``--pdb-failures`` to automatically activate the interactive Pdb debugger on errors or failures. See the nosetests documentation for further details. +.. warning:: + + Note that right now we have a nasty interaction between ipdoctest and + twisted. Until we figure this out, please use the following instructions to + ensure that at least you run all the tests. + +Right now, if you now run:: + + $ iptest [any options] [any submodules] + +it will NOT load ipdoctest but won't cause any Twisted problems. + +Once you're happy that you didn't break Twisted, run:: + + $ iptest --with-ipdoctest [any options] [any submodules] + +This MAY give a Twisted AlreadyCalledError exception at the end, but it will +also correctly load up all of the ipython-specific tests and doctests. + +The above can be made easier with a trivial shell alias:: + + $ alias iptest2='iptest --with-ipdoctest' + +So that you can run:: + + $ iptest ... + # Twisted happy + # iptest2 ... + # ignore possible Twisted error, this checks all the rest. + + A few tips for writing tests ---------------------------- diff --git a/docs/source/parallel/parallel_mpi.txt b/docs/source/parallel/parallel_mpi.txt index d09bf44..4df70f3 100644 --- a/docs/source/parallel/parallel_mpi.txt +++ b/docs/source/parallel/parallel_mpi.txt @@ -32,34 +32,34 @@ Starting the engines with MPI enabled To use code that calls MPI, there are typically two things that MPI requires. 1. The process that wants to call MPI must be started using - :command:`mpirun` or a batch system (like PBS) that has MPI support. + :command:`mpiexec` or a batch system (like PBS) that has MPI support. 2. Once the process starts, it must call :func:`MPI_Init`. There are a couple of ways that you can start the IPython engines and get these things to happen. -Automatic starting using :command:`mpirun` and :command:`ipcluster` +Automatic starting using :command:`mpiexec` and :command:`ipcluster` ------------------------------------------------------------------- -The easiest approach is to use the `mpirun` mode of :command:`ipcluster`, which will first start a controller and then a set of engines using :command:`mpirun`:: +The easiest approach is to use the `mpiexec` mode of :command:`ipcluster`, which will first start a controller and then a set of engines using :command:`mpiexec`:: - $ ipcluster mpirun -n 4 + $ ipcluster mpiexec -n 4 This approach is best as interrupting :command:`ipcluster` will automatically stop and clean up the controller and engines. -Manual starting using :command:`mpirun` +Manual starting using :command:`mpiexec` --------------------------------------- -If you want to start the IPython engines using the :command:`mpirun`, just do:: +If you want to start the IPython engines using the :command:`mpiexec`, just do:: - $ mpirun -n 4 ipengine --mpi=mpi4py + $ mpiexec -n 4 ipengine --mpi=mpi4py This requires that you already have a controller running and that the FURL files for the engines are in place. We also have built in support for PyTrilinos [PyTrilinos]_, which can be used (assuming is installed) by starting the engines with:: - mpirun -n 4 ipengine --mpi=pytrilinos + mpiexec -n 4 ipengine --mpi=pytrilinos Automatic starting using PBS and :command:`ipcluster` ----------------------------------------------------- @@ -84,7 +84,7 @@ First, lets define a simply function that uses MPI to calculate the sum of a dis Now, start an IPython cluster in the same directory as :file:`psum.py`:: - $ ipcluster mpirun -n 4 + $ ipcluster mpiexec -n 4 Finally, connect to the cluster and use this function interactively. In this case, we create a random array on each engine and sum up all the random arrays using our :func:`psum` function: diff --git a/docs/source/parallel/parallel_process.txt b/docs/source/parallel/parallel_process.txt index d35ffc9..3884d89 100644 --- a/docs/source/parallel/parallel_process.txt +++ b/docs/source/parallel/parallel_process.txt @@ -85,33 +85,40 @@ To see other command line options for the local mode, do:: $ ipcluster local -h -Using :command:`ipcluster` in mpirun mode ------------------------------------------ +Using :command:`ipcluster` in mpiexec/mpirun mode +------------------------------------------------- -The mpirun mode is useful if you: +The mpiexec/mpirun mode is useful if you: 1. Have MPI installed. -2. Your systems are configured to use the :command:`mpirun` command to start - processes. +2. Your systems are configured to use the :command:`mpiexec` or + :command:`mpirun` commands to start MPI processes. + +.. note:: + + The preferred command to use is :command:`mpiexec`. However, we also + support :command:`mpirun` for backwards compatibility. The underlying + logic used is exactly the same, the only difference being the name of the + command line program that is called. If these are satisfied, you can start an IPython cluster using:: - $ ipcluster mpirun -n 4 + $ ipcluster mpiexec -n 4 This does the following: 1. Starts the IPython controller on current host. -2. Uses :command:`mpirun` to start 4 engines. +2. Uses :command:`mpiexec` to start 4 engines. On newer MPI implementations (such as OpenMPI), this will work even if you don't make any calls to MPI or call :func:`MPI_Init`. However, older MPI implementations actually require each process to call :func:`MPI_Init` upon starting. The easiest way of having this done is to install the mpi4py [mpi4py]_ package and then call ipcluster with the ``--mpi`` option:: - $ ipcluster mpirun -n 4 --mpi=mpi4py + $ ipcluster mpiexec -n 4 --mpi=mpi4py Unfortunately, even this won't work for some MPI implementations. If you are having problems with this, you will likely have to use a custom Python executable that itself calls :func:`MPI_Init` at the appropriate time. Fortunately, mpi4py comes with such a custom Python executable that is easy to install and use. However, this custom Python executable approach will not work with :command:`ipcluster` currently. Additional command line options for this mode can be found by doing:: - $ ipcluster mpirun -h + $ ipcluster mpiexec -h More details on using MPI with IPython can be found :ref:`here `. @@ -301,6 +308,11 @@ This is possible. The only thing you have to do is decide what ports the contro $ ipcontroller -r --client-port=10101 --engine-port=10102 +These options also work with all of the various modes of +:command:`ipcluster`:: + + $ ipcluster local -n 2 -r --client-port=10101 --engine-port=10102 + Then, just copy the furl files over the first time and you are set. You can start and stop the controller and engines any many times as you want in the future, just make sure to tell the controller to use the *same* ports. .. note:: diff --git a/docs/sphinxext/only_directives.py b/docs/sphinxext/only_directives.py index e4dfd5c..57d70a4 100644 --- a/docs/sphinxext/only_directives.py +++ b/docs/sphinxext/only_directives.py @@ -5,9 +5,15 @@ from docutils.nodes import Body, Element from docutils.writers.html4css1 import HTMLTranslator -from sphinx.latexwriter import LaTeXTranslator from docutils.parsers.rst import directives +# The sphinx API has changed, so we try both the old and new import forms +try: + from sphinx.latexwriter import LaTeXTranslator +except ImportError: + from sphinx.writers.latex import LaTeXTranslator + + class html_only(Body, Element): pass