From dstanek at codespeak.net Fri Jun 3 13:01:11 2005 From: dstanek at codespeak.net (dstanek at codespeak.net) Date: Fri, 3 Jun 2005 13:01:11 +0200 (CEST) Subject: [py-svn] r13033 - py/dist/py/code Message-ID: <20050603110111.478FD27B4C@code1.codespeak.net> Author: dstanek Date: Fri Jun 3 13:01:11 2005 New Revision: 13033 Modified: py/dist/py/code/frame.py Log: Removed duplication in frame.py by removing the Code class and instead using py.code.Code. Modified: py/dist/py/code/frame.py ============================================================================== --- py/dist/py/code/frame.py (original) +++ py/dist/py/code/frame.py Fri Jun 3 13:01:11 2005 @@ -1,42 +1,12 @@ import py -class Code(object): - def __init__(self, f_code): - f_code = getattr(f_code, 'im_func', f_code) - f_code = getattr(f_code, 'func_code', f_code) - self._raw = f_code - try: - self.firstlineno = f_code.co_firstlineno - 1 - except AttributeError: - raise TypeError("not a code object: %r" %(f_code,)) - self.name = f_code.co_name - - def path(self): - try: - return self._raw.co_filename.__path__ - except AttributeError: - try: - return py.path.local(self._raw.co_filename) - except ValueError: - return None - path = property(path, None, None, "path to source of this code object") - - def fullsource(self): - fn = self._raw.co_filename - try: - return fn.__source__ - except AttributeError: - return py.code.Source(self.path.read(mode="rU")) - fullsource = property(fullsource, None, None, - "full source representing this code object") - class Frame(object): """Wrapper around a Python frame holding f_locals and f_globals in which expressions can be evaluated.""" def __init__(self, frame): - self.code = Code(frame.f_code) + self.code = py.code.Code(frame.f_code) self.lineno = frame.f_lineno - 1 self.f_globals = frame.f_globals self.f_locals = frame.f_locals From hpk at codespeak.net Sun Jun 5 17:01:24 2005 From: hpk at codespeak.net (hpk at codespeak.net) Date: Sun, 5 Jun 2005 17:01:24 +0200 (CEST) Subject: [py-svn] r13075 - py/dist/py/documentation Message-ID: <20050605150124.7892627B46@code1.codespeak.net> Author: hpk Date: Sun Jun 5 17:01:24 2005 New Revision: 13075 Modified: py/dist/py/documentation/TODO.txt Log: random todo update Modified: py/dist/py/documentation/TODO.txt ============================================================================== --- py/dist/py/documentation/TODO.txt (original) +++ py/dist/py/documentation/TODO.txt Sun Jun 5 17:01:24 2005 @@ -4,7 +4,7 @@ distutils install ----------------- -* see if things work on Win32 +* see if things work on Win32 (partially done) * do something about c-extensions both on unix-ish and win32 systems @@ -14,20 +14,20 @@ ------- * adjust py.test documentation to reflect new - collector/session architecture + collector/session architecture (mostly done) -* document py.test's conftest.py approach +* document py.test's conftest.py approach (somewhat done) -* put Armin's collect class into py.__builtin__ +* put Armin's collect class into py.__builtin__ (not done) -* try get rid of Collect.tryiter() in favour of +* try get rid of Collect.tryiter() in favour of (not done) using Armin's collect class -* hide py.test.TerminalSession and TkinterSession? +* hide py.test.TerminalSession and TkinterSession? (questionable) misc ---- -* get Armin or Christian to fix greenlets on +* get Armin or Christian to fix greenlets on (no clue) recent gcc's (compile fails at least for switch_x86_unix.h) From ggheo at codespeak.net Mon Jun 6 16:02:50 2005 From: ggheo at codespeak.net (ggheo at codespeak.net) Date: Mon, 6 Jun 2005 16:02:50 +0200 (CEST) Subject: [py-svn] r13107 - py/dist/py/misc Message-ID: <20050606140250.A7C6F27B51@code1.codespeak.net> Author: ggheo Date: Mon Jun 6 16:02:50 2005 New Revision: 13107 Modified: py/dist/py/misc/_dist.py Log: Replace %SystemRoot% with actual value when modifying the PATH registry value. Modified: py/dist/py/misc/_dist.py ============================================================================== --- py/dist/py/misc/_dist.py (original) +++ py/dist/py/misc/_dist.py Mon Jun 6 16:02:50 2005 @@ -1,5 +1,5 @@ import py -import sys, os +import sys, os, re from distutils import sysconfig from distutils import core @@ -100,7 +100,9 @@ reg = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE) key = r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment" path = get_registry_value(reg, key, "Path") - path += ";" + bindir + path += ";" + bindir + # Replace %SystemRoot% with actual value + path = replace_systemroot(path) print "Setting PATH to:", path set_registry_value(reg, key, "Path", path) #print "Current PATH is:", get_registry_value(reg, key, "Path") @@ -123,6 +125,18 @@ k = _winreg.OpenKey(reg, key, 0, _winreg.KEY_WRITE) _winreg.SetValueEx(k, value_name, 0, _winreg.REG_SZ, value) _winreg.CloseKey(k) + +def replace_systemroot(path): + dirs = path.split(';') + try: + systemroot = os.environ['SYSTEMROOT'] + except KeyError: + pass + else: + dirs = [re.sub('%SystemRoot%', systemroot, dir) + for dir in dirs] + path = ';'.join(dirs) + return path ### end helpers From ggheo at codespeak.net Mon Jun 6 16:13:10 2005 From: ggheo at codespeak.net (ggheo at codespeak.net) Date: Mon, 6 Jun 2005 16:13:10 +0200 (CEST) Subject: [py-svn] r13109 - py/dist/py/execnet Message-ID: <20050606141310.D7CD827B48@code1.codespeak.net> Author: ggheo Date: Mon Jun 6 16:13:10 2005 New Revision: 13109 Modified: py/dist/py/execnet/register.py Log: Added sys.exc_info() to trace if IOError during closing of the channel. Fixed order of arguments in self.trace("child process %s already dead? error:%s"). Modified: py/dist/py/execnet/register.py ============================================================================== --- py/dist/py/execnet/register.py (original) +++ py/dist/py/execnet/register.py Mon Jun 6 16:13:10 2005 @@ -62,9 +62,10 @@ self._pidchannel.waitclose(timeout=0.5) pid = self._pidchannel.receive() except IOError: - self.trace("could not receive child PID") - pid = None - super(PopenCmdGateway, self).exit() + self.trace("IOError: could not receive child PID") + self.trace(sys.exc_info()) + pid = None + super(PopenCmdGateway, self).exit() if pid is not None: self.trace("waiting for pid %s" % pid) try: @@ -75,7 +76,7 @@ raise except OSError, e: self.trace("child process %s already dead? error:%s" % - (str(e), pid)) + (pid, str(e))) class PopenGateway(PopenCmdGateway): # use sysfind/sysexec/subprocess instead of os.popen? From hpk at codespeak.net Wed Jun 8 15:39:21 2005 From: hpk at codespeak.net (hpk at codespeak.net) Date: Wed, 8 Jun 2005 15:39:21 +0200 (CEST) Subject: [py-svn] r13182 - py/dist/py/documentation Message-ID: <20050608133921.50B1227B4E@code1.codespeak.net> Author: hpk Date: Wed Jun 8 15:39:20 2005 New Revision: 13182 Modified: py/dist/py/documentation/future.txt Log: remove link-checking chapter because that is implemented Modified: py/dist/py/documentation/future.txt ============================================================================== --- py/dist/py/documentation/future.txt (original) +++ py/dist/py/documentation/future.txt Wed Jun 8 15:39:20 2005 @@ -327,22 +327,6 @@ .. _`CPython's distutils`: http://www.python.org/dev/doc/devel/lib/module-distutils.html -Extending rest-tests to check links -=================================== - -Currently if you have properly set up your py lib (see `getting started`_) -you can go to the ``doc/`` subtree and execute ``py.test`` in order -to test that the `restructured text`_ documentation does not -produce errors or warnings. There is a ``test_rest.py`` script -with a custom collector that does it. It would be nice if it -parsed the generated html and checked that all links actually -resolve correctly to ensure high quality of documentation. - -However, this is crossconnected with `a more general view on path objects`_ -and `lightweight xml generation`_ as the codification of these -visions would make the link-checker a snap. It could serve -as a nice script in our ``example`` tree. - .. _`getting started`: getting-started.html .. _`restructured text`: http://docutils.sourceforge.net/docs/user/rst/quickref.html .. _`python standard library`: http://www.python.org/doc/2.3.4/lib/lib.html From ggheo at codespeak.net Wed Jun 8 16:05:10 2005 From: ggheo at codespeak.net (ggheo at codespeak.net) Date: Wed, 8 Jun 2005 16:05:10 +0200 (CEST) Subject: [py-svn] r13183 - py/dist/py/misc Message-ID: <20050608140510.4F7EA27B59@code1.codespeak.net> Author: ggheo Date: Wed Jun 8 16:05:10 2005 New Revision: 13183 Modified: py/dist/py/misc/_dist.py Log: Deleted replace_systemroot function and instead set the value type for the Path registry value to REG_EXPAND_SZ, so that things like %SystemRoot% get automatically expanded by the command prompt. Modified: py/dist/py/misc/_dist.py ============================================================================== --- py/dist/py/misc/_dist.py (original) +++ py/dist/py/misc/_dist.py Wed Jun 8 16:05:10 2005 @@ -101,8 +101,6 @@ key = r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment" path = get_registry_value(reg, key, "Path") path += ";" + bindir - # Replace %SystemRoot% with actual value - path = replace_systemroot(path) print "Setting PATH to:", path set_registry_value(reg, key, "Path", path) #print "Current PATH is:", get_registry_value(reg, key, "Path") @@ -122,22 +120,16 @@ return value def set_registry_value(reg, key, value_name, value): - k = _winreg.OpenKey(reg, key, 0, _winreg.KEY_WRITE) - _winreg.SetValueEx(k, value_name, 0, _winreg.REG_SZ, value) + k = _winreg.OpenKey(reg, key, 0, _winreg.KEY_WRITE) + value_type = _winreg.REG_SZ + # if we handle the Path value, then set its type to REG_EXPAND_SZ + # so that things like %SystemRoot% get automatically expanded by the + # command prompt + if value_name == "Path": + value_type = _winreg.REG_EXPAND_SZ + _winreg.SetValueEx(k, value_name, 0, value_type, value) _winreg.CloseKey(k) -def replace_systemroot(path): - dirs = path.split(';') - try: - systemroot = os.environ['SYSTEMROOT'] - except KeyError: - pass - else: - dirs = [re.sub('%SystemRoot%', systemroot, dir) - for dir in dirs] - path = ';'.join(dirs) - return path - ### end helpers def setup(pkg, **kw): From hpk at codespeak.net Thu Jun 9 11:48:09 2005 From: hpk at codespeak.net (hpk at codespeak.net) Date: Thu, 9 Jun 2005 11:48:09 +0200 (CEST) Subject: [py-svn] r13223 - in py/dist/py/misc: . testing Message-ID: <20050609094809.081FA27B80@code1.codespeak.net> Author: hpk Date: Thu Jun 9 11:48:08 2005 New Revision: 13223 Added: py/dist/py/misc/testing/test_trace.py py/dist/py/misc/trace.py Log: a first stab at implementing a _minimalistic_ tracing functionality as discussed on py-dev. It's already a bit convoluted (although it has only 54 lines of code :-) Added: py/dist/py/misc/testing/test_trace.py ============================================================================== --- (empty file) +++ py/dist/py/misc/testing/test_trace.py Thu Jun 9 11:48:08 2005 @@ -0,0 +1,41 @@ + +import py + +class TestTracing: + def setup_method(self, meth): + self.state = py.trace._getstate_() + def teardown_method(self, meth): + py.trace._setstate_(self.state) + + def test_tracer_repr(self): + t = py.trace.debug + assert repr(t).find('debug') != -1 + + def test_trace_one_keyword(self): + t = py.trace.debug + l = [] + def f(msg): + l.append(msg) + py.trace['debug'] = f + py.trace.debug("hello world") + assert len(l) == 1 + msg = l[0] + assert msg.content().startswith('hello world') + assert msg.prefix() == '[debug] ' + assert str(msg) == "[debug] hello world" + + def test_trace_default_writer(self): + l = [] + def f(msg): + l.append(msg) + + py.trace[...] = f + py.trace.debug("hello") + py.trace.warn("world") + assert len(l) == 2 + msg1, msg2 = l + + assert 'debug' in msg1.keywords + assert 'warn' in msg2.keywords + assert msg1.content() == 'hello' + assert msg2.content() == 'world' Added: py/dist/py/misc/trace.py ============================================================================== --- (empty file) +++ py/dist/py/misc/trace.py Thu Jun 9 11:48:08 2005 @@ -0,0 +1,69 @@ +""" +py lib's basic tracing functionality + + EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL (especially the dispatching) + +WARNING: this module is not allowed to contain any 'py' imports, + Instead, it is very self-contained and should not depend on + CPython/stdlib versions, either. One reason for these + restrictions is that this module should be sendable + via py.execnet across the network in an very early phase. +""" + +class Message(object): + def __init__(self, keywords, args): + self.keywords = keywords + self.args = args + + def content(self): + return " ".join(map(str, self.args)) + + def prefix(self): + return "[%s] " % (":".join(self.keywords)) + + def __str__(self): + return self.prefix() + self.content() + +class Tracer(object): + Message = Message # to allow later customization + _traceregistry = {} + + def __init__(self, keywords=()): + self.keywords = keywords + + def __repr__(self): + return "" % ":".join(self.keywords) + + def __getattr__(self, name): + if name[0] == '_': + raise AttributeError, name + return Tracer(self.keywords + (name,)) + + def __setitem__(self, name, func): + assert callable(func) + keywords = self.keywords + (name,) + self._traceregistry[keywords] = func + + def __call__(self, *args): + message = self.Message(self.keywords, args) + try: + func = self._traceregistry[message.keywords] + except KeyError: + # XXX find best match, for now it's a hack/simplistic + try: + func = self._traceregistry[(Ellipsis,)] + except KeyError: + print str(message) + return + func(message) + + # class methods dealing with registry + def _getstate_(cls): + return cls._traceregistry.copy() + _getstate_ = classmethod(_getstate_) + + def _setstate_(cls, state): + cls._traceregistry = state + _setstate_ = classmethod(_setstate_) + +trace = Tracer() From hpk at codespeak.net Thu Jun 9 11:48:26 2005 From: hpk at codespeak.net (hpk at codespeak.net) Date: Thu, 9 Jun 2005 11:48:26 +0200 (CEST) Subject: [py-svn] r13224 - py/dist/py Message-ID: <20050609094826.C09D827B84@code1.codespeak.net> Author: hpk Date: Thu Jun 9 11:48:26 2005 New Revision: 13224 Modified: py/dist/py/__init__.py Log: forgot the __init__ export Modified: py/dist/py/__init__.py ============================================================================== --- py/dist/py/__init__.py (original) +++ py/dist/py/__init__.py Thu Jun 9 11:48:26 2005 @@ -98,4 +98,7 @@ 'xml.Tag' : ('./xmlobj/xml.py', 'Tag'), 'xml.Namespace' : ('./xmlobj/xml.py', 'Namespace'), 'xml.escape' : ('./xmlobj/misc.py', 'escape'), + + # trace + 'trace' : ('./misc/trace.py', 'trace'), }) From ggheo at codespeak.net Sat Jun 11 01:06:46 2005 From: ggheo at codespeak.net (ggheo at codespeak.net) Date: Sat, 11 Jun 2005 01:06:46 +0200 (CEST) Subject: [py-svn] r13284 - py/dist/py/misc Message-ID: <20050610230646.5CEB827B4E@code1.codespeak.net> Author: ggheo Date: Sat Jun 11 01:06:46 2005 New Revision: 13284 Added: py/dist/py/misc/log.py Log: EXPERIMENTAL! First cut at logging module which represents the 'consumer' side of py.trace. This module uses the standard Python logging module as the back-end. Added: py/dist/py/misc/log.py ============================================================================== --- (empty file) +++ py/dist/py/misc/log.py Sat Jun 11 01:06:46 2005 @@ -0,0 +1,67 @@ +import py +import sys, logging + +class Logger(object): + _logregistry = {} + + def __init__(self): + self.formatter = logging.Formatter('%(message)s') + + def __getattr__(self, name): + if name == "stdout": + logger_stdout = logging.getLogger('py.log.stdout') + stdout_handler = logging.StreamHandler(sys.stdout) + stdout_handler.setFormatter(self.formatter) + logger_stdout.addHandler(stdout_handler) + logger_stdout.setLevel(logging.DEBUG) + def stdout_tracer(message): + self.log_message(logger_stdout, message) + return stdout_tracer + if name == "stderr": + logger_stderr = logging.getLogger('py.log.stderr') + stderr_handler = logging.StreamHandler(sys.stderr) + stderr_handler.setFormatter(self.formatter) + logger_stderr.addHandler(stderr_handler) + logger_stderr.setLevel(logging.DEBUG) + def stderr_tracer(message): + self.log_message(logger_stderr, message) + return stderr_tracer + elif name == "file": + filename = self._logregistry.get('file', 'py_log.out') + file_handler = logging.FileHandler(filename) + file_handler.setFormatter(self.formatter) + logger_file = logging.getLogger('py.log.file') + logger_file.addHandler(file_handler) + logger_file.setLevel(logging.DEBUG) + def file_tracer(message): + self.log_message(logger_file, message) + return file_tracer + else: + raise AttributeError, name + + def __setitem__(self, name, value): + self._logregistry[name] = value + + def log_message(self, logger, message): + for keyword in message.keywords: + if keyword.startswith('debug'): + logger.debug(message) + if keyword.startswith('info'): + logger.info(message) + if keyword.startswith('warn'): + logger.warn(message) + if keyword.startswith('err'): + logger.error(message) + if keyword.startswith('crit'): + logger.critical(message) + + # class methods dealing with registry + def _getstate_(cls): + return cls._logregistry.copy() + _getstate_ = classmethod(_getstate_) + + def _setstate_(cls, state): + cls._logregistry = state + _setstate_ = classmethod(_setstate_) + +log = Logger() From ggheo at codespeak.net Sat Jun 11 01:07:11 2005 From: ggheo at codespeak.net (ggheo at codespeak.net) Date: Sat, 11 Jun 2005 01:07:11 +0200 (CEST) Subject: [py-svn] r13285 - py/dist/py/misc/testing Message-ID: <20050610230711.7BA1F27B4E@code1.codespeak.net> Author: ggheo Date: Sat Jun 11 01:07:11 2005 New Revision: 13285 Added: py/dist/py/misc/testing/test_log.py Log: Unit tests/usage examples for the log module. Added: py/dist/py/misc/testing/test_log.py ============================================================================== --- (empty file) +++ py/dist/py/misc/testing/test_log.py Sat Jun 11 01:07:11 2005 @@ -0,0 +1,86 @@ + +import py +import sys + +class TestLogging: + def setup_method(self, meth): + self.state = py.log._getstate_() + def teardown_method(self, meth): + py.log._setstate_(self.state) + + def test_log_stdout(self): + # We redirect stdout so that we can verify that + # the log messages have been printed to it + redirect = 'py_stdout.out' + sys.saved = sys.stdout + sys.stdout = open(redirect, 'w') + + # Start of the 'consumer' code + py.trace[...] = py.log.stdout + py.trace.debug("hello world") + py.trace.info("hello world") + py.trace.warn("hello world") + py.trace.error("hello world") + py.trace.critical("hello world") + # End of the 'consumer' code + + sys.stdout = sys.saved + lines = open(redirect).readlines() + assert lines == ['[debug] hello world\n', '[info] hello world\n', + '[warn] hello world\n', '[error] hello world\n', + '[critical] hello world\n'] + + def test_log_stderr(self): + # We redirect stderr so that we can verify that + # the log messages have been printed to it + redirect = 'py_stderr.out' + sys.saved = sys.stderr + sys.stderr = open(redirect, 'w') + + # Start of the 'consumer' code + py.trace[...] = py.log.stderr + py.trace.debug("hello world") + py.trace.info("hello world") + py.trace.warn("hello world") + py.trace.error("hello world") + py.trace.critical("hello world") + # End of the 'consumer' code + + sys.stderr = sys.saved + lines = open(redirect).readlines() + assert lines == ['[debug] hello world\n', '[info] hello world\n', + '[warn] hello world\n', '[error] hello world\n', + '[critical] hello world\n'] + + def test_default_log_file(self): + # Start of the 'consumer' code + py.trace[...] = py.log.file + py.trace.debug("hello world") + py.trace.info("hello world") + py.trace.warn("hello world") + py.trace.error("hello world") + py.trace.critical("hello world") + # End of the 'consumer' code + + lines = open('py_log.out').readlines() + last_5_lines = lines[-5:] + assert last_5_lines == ['[debug] hello world\n', '[info] hello world\n', + '[warn] hello world\n', '[error] hello world\n', + '[critical] hello world\n'] + + def test_custom_log_file(self): + # Start of the 'consumer' code + py.log['file'] = 'py_log2.out' + py.trace[...] = py.log.file + py.trace.debug("hello world") + py.trace.info("hello world") + py.trace.warn("hello world") + py.trace.error("hello world") + py.trace.critical("hello world") + # End of the 'consumer' code + + lines = open('py_log2.out').readlines() + last_5_lines = lines[-5:] + assert last_5_lines == ['[debug] hello world\n', '[info] hello world\n', + '[warn] hello world\n', '[error] hello world\n', + '[critical] hello world\n'] From ggheo at codespeak.net Sat Jun 11 01:14:07 2005 From: ggheo at codespeak.net (ggheo at codespeak.net) Date: Sat, 11 Jun 2005 01:14:07 +0200 (CEST) Subject: [py-svn] r13286 - py/dist/py Message-ID: <20050610231407.7892E27B4E@code1.codespeak.net> Author: ggheo Date: Sat Jun 11 01:14:07 2005 New Revision: 13286 Modified: py/dist/py/__init__.py Log: Added log module to initpkg. Modified: py/dist/py/__init__.py ============================================================================== --- py/dist/py/__init__.py (original) +++ py/dist/py/__init__.py Sat Jun 11 01:14:07 2005 @@ -101,4 +101,7 @@ # trace 'trace' : ('./misc/trace.py', 'trace'), + + # log + 'log' : ('./misc/log.py', 'log'), }) From dstanek at codespeak.net Sat Jun 11 14:52:43 2005 From: dstanek at codespeak.net (dstanek at codespeak.net) Date: Sat, 11 Jun 2005 14:52:43 +0200 (CEST) Subject: [py-svn] r13294 - py/branch/dist-doctest Message-ID: <20050611125243.A689327B4A@code1.codespeak.net> Author: dstanek Date: Sat Jun 11 14:52:41 2005 New Revision: 13294 Added: py/branch/dist-doctest/ - copied from r13293, py/dist/ Log: An experimental branch for adding doctest support. From hpk at codespeak.net Sat Jun 11 20:47:04 2005 From: hpk at codespeak.net (hpk at codespeak.net) Date: Sat, 11 Jun 2005 20:47:04 +0200 (CEST) Subject: [py-svn] r13302 - in py/dist/py: documentation misc Message-ID: <20050611184704.A67CF27B4C@code1.codespeak.net> Author: hpk Date: Sat Jun 11 20:47:03 2005 New Revision: 13302 Added: py/dist/py/documentation/confrest.py py/dist/py/documentation/contact.txt py/dist/py/documentation/home.txt Modified: py/dist/py/documentation/conftest.py py/dist/py/documentation/index.txt py/dist/py/documentation/style.css py/dist/py/misc/rest.py Log: new py lib website going online Added: py/dist/py/documentation/confrest.py ============================================================================== --- (empty file) +++ py/dist/py/documentation/confrest.py Sat Jun 11 20:47:03 2005 @@ -0,0 +1,80 @@ +import py +from py.__.misc.rest import convert_rest_html, strip_html_header + +mydir = py.magic.autopath().dirpath() +html = py.xml.html + +def process(txtpath): + encoding = 'latin1' + content = unicode(txtpath.read(), encoding) + stylesheet = 'style.css' + if not txtpath.dirpath(stylesheet).check(): + stylesheet = None + + content = convert_rest_html(content, txtpath, stylesheet=stylesheet, encoding=encoding) + content = strip_html_header(content) + + page = PyPage("py lib [%s] " % txtpath.purebasename, stylesheeturl=stylesheet) + page.contentspace.append(content) + htmlpath = txtpath.new(ext='.html') + htmlpath.write(page.unicode().encode(encoding)) + +class Page(object): + doctype = ('\n') + + def __init__(self, title, stylesheeturl=None, type="text/html", encoding="ISO-8859-1"): + self.title = title + self.stylesheeturl = stylesheeturl + self.type = type + self.encoding = encoding + + self.body = html.body() + self.head = html.head() + self._root = html.html(self.head, self.body) + self.fill() + + def fill(self): + content_type = "%s;charset=%s" %(self.type, self.encoding) + self.head.append(html.title(self.title)) + self.head.append(html.meta(name="Content-Type", value=content_type)) + if self.stylesheeturl: + self.head.append( + html.link(href=self.stylesheeturl, + media="screen", rel="stylesheet", + type="text/css")) + + def unicode(self, doctype=True): + page = self._root.unicode() + if doctype: + return self.doctype + page + else: + return page + +class PyPage(Page): + def fill(self): + super(PyPage, self).fill() + # base layout + self.body.append( + html.div(html.img(alt="py lib", id='pyimg', height=75, width=154, + src="http://codespeak.net/img/codespeak1b.png"))) + + self.menubar = html.div( + html.a("home", href="home.html", class_="menu"), " ", + html.a("doc", href="index.html", class_="menu"), " ", + html.a("contact", href="contact.html", class_="menu"), " ", + html.a("getting-started", href="getting-started.html", class_="menu"), " ", + html.a("issue", href="https://codespeak.net/issue/py-dev/", class_="menu"), + id="menubar", + ) + + self.metaspace = html.div( + html.div(self.title, class_="project_title"), + self.menubar, + id='metaspace') + + self.body.append(self.metaspace) + + self.contentspace = html.div(id="contentspace", class_="content") + self.body.append(self.contentspace) + Modified: py/dist/py/documentation/conftest.py ============================================================================== --- py/dist/py/documentation/conftest.py (original) +++ py/dist/py/documentation/conftest.py Sat Jun 11 20:47:03 2005 @@ -19,9 +19,15 @@ def restcheck(path): checkdocutils() import docutils.utils + try: - # this helper will raise errors instead of warnings - rest.process(path) + confrest = path.localpath.dirpath('confrest.py') + if confrest.check(file=1): + confrest = confrest.pyimport() + confrest.process(path) + else: + # defer to default processor + rest.process(path) except KeyboardInterrupt: raise except docutils.utils.SystemMessage: Added: py/dist/py/documentation/contact.txt ============================================================================== --- (empty file) +++ py/dist/py/documentation/contact.txt Sat Jun 11 20:47:03 2005 @@ -0,0 +1,15 @@ +py lib contact and communication +=================================== + +* `development mailing list`_ for conceptual and coding discussions (low to medium traffic). + +* `subversion commit mailing list`_ all updates to the trunk/branch source and documentation tree. + +* `development bug/feature tracker`_ for filing bugs and feature requests. + +* IRC Channel #pylib on irc.freenode.net + +.. _`subversion commit mailing list`: http://codespeak.net/mailman/listinfo/py-svn +.. _`development mailing list`: http://codespeak.net/mailman/listinfo/py-dev +.. _`development bug/feature tracker`: https://codespeak.net/issue/py-dev/ + Added: py/dist/py/documentation/home.txt ============================================================================== --- (empty file) +++ py/dist/py/documentation/home.txt Sat Jun 11 20:47:03 2005 @@ -0,0 +1,12 @@ + + The py lib aims at supporting a decent development process + addressing important deployment, versioning, testing and + documentation issues - seen primarily from the perspective + of a FOSS (Free and Open Source) developer. + +Heading towards a first 0.8.0 py.test/py lib release! +====================================================== + +``py.test`` and the py lib are heading towards their first release. +The main missing feature is proper testing and integration with win32 +platforms and improved documentation. *holger (06/11/2005)* Modified: py/dist/py/documentation/index.txt ============================================================================== --- py/dist/py/documentation/index.txt (original) +++ py/dist/py/documentation/index.txt Sat Jun 11 20:47:03 2005 @@ -1,39 +1,19 @@ -*the py lib* -============ +py.test and the py lib - documentation +---------------------------------------- -*The py lib wants to support a decent and convenient -development process addressing important deployment, -versioning, testing and documentation issues - seen -primarily from the perspective of a FOSS (Free and -Open Source) developer. While the primary focus -is on developer activities this does not preclude -usage of the py lib for applications.* +`py.test`_ introduces to the new'n'easy **py.test** utility -Important Links ---------------- +`py.execnet`_ a (probably uniquely) innovative way to distribute programs across the net - `getting started`_ quick start for using py.test and the py lib. +`py.magic.greenlet`_: Lightweight in-process concurrent programming (aka Stackless) - `py-dev at codespeak net`_ the development mailing list +`py.xml`_ a fast'n'easy way to generate xml/html documents (including CSS-styling) - **#pylib on irc.freenode.net** (upcoming) development IRC channel +`miscellaneous features`_ describes some more py lib features - `why what how py?`_, describing motivation and background of the py lib +`future`_ handles development visions and plans for the near future. -Usage Documentation -------------------- - - `py.test`_ introduces to the new'n'easy **py.test** utility - - `py.execnet`_ a (probably uniquely) innovative way to distribute programs across the net - - `py.magic.greenlet`_: Lightweight in-process concurrent programming (aka Stackless) - - `py.xml`_ a fast'n'easy way to generate xml/html documents (including CSS-styling) - - `miscellaneous features`_ describes some more py lib features - - `future`_ handles development visions and plans for the near future. +`why what how py?`_, describing motivation and background of the py lib Note that some parts of these texts refer to future development and do not reflect the current state. **Welcome to documentation and Modified: py/dist/py/documentation/style.css ============================================================================== --- py/dist/py/documentation/style.css (original) +++ py/dist/py/documentation/style.css Sat Jun 11 20:47:03 2005 @@ -1,26 +1,25 @@ -body { - background: url(http://codespeak.net/img/codespeak1b.png) no-repeat; - font: 120% Arial, Verdana, Helvetica, sans-serif; - border: 0; - margin: 0.5em 0em 0.5em 0.5em; - padding: 0 0 0 145px; +body,body.editor,body.body { + font: 110% "Times New Roman", Arial, Verdana, Helvetica, serif; + background: White; + color: Black; } -a { - text-decoration: underline; - background-color: transparent; +a, a.reference { + text-decoration: none; +} +a[href]:hover { text-decoration: underline; } + +img { + border: none; + vertical-align: middle; } -p { - /*margin: 0.5em 0em 1em 0em;*/ +p, div.text { text-align: left; line-height: 1.5em; margin: 0.5em 0em 0em 0em; } -p a { - text-decoration: underline; -} p a:active { @@ -28,6 +27,18 @@ background-color: transparent; } +p img { + border: 0; + margin: 0; +} + +img.inlinephoto { + padding: 0; + padding-right: 1em; + padding-top: 0.7em; + float: left; +} + hr { clear: both; height: 1px; @@ -39,18 +50,32 @@ ul { line-height: 1.5em; /*list-style-image: url("bullet.gif"); */ - margin-left: 1em; + margin-left: 1.5em; + padding:0; } ol { line-height: 1.5em; - margin-left: 0em; + margin-left: 1.5em; + padding:0; } ul a, ol a { text-decoration: underline; } +dl { +} + +dt { + font-weight: bold; +} + +dd { + line-height: 1.5em; + margin-bottom: 1em; +} + blockquote { font-family: Times, "Times New Roman", serif; font-style: italic; @@ -58,17 +83,996 @@ } code { - font-size: 120%; color: Black; /*background-color: #dee7ec;*/ background-color: #cccccc; } pre { + padding: 1em; + border: 1px solid #8cacbb; + color: Black; + background-color: #dee7ec; + background-color: #cccccc; + overflow: auto; +} + + +.netscape4 { + display: none; +} + +/* main page styles */ + +/*a[href]:hover { color: black; text-decoration: underline; } +a[href]:link { color: black; text-decoration: underline; } +a[href] { color: black; text-decoration: underline; } +*/ + +span.menu_selected { + color: black; + font: 140% Verdana, Helvetica, Arial, sans-serif; + text-decoration: none; + padding-right: 0.3em; + background-color: #cccccc; +} + + +a.menu { + /*color: #3ba6ec; */ + font: 140% Verdana, Helvetica, Arial, sans-serif; + text-decoration: none; + padding-right: 0.3em; +} + +a.menu[href]:visited, a.menu[href]:link{ + /*color: #3ba6ec; */ + font: 140% Verdana, Helvetica, Arial, sans-serif; + text-decoration: none; +} + +a.menu[href]:hover { + /*color: black;*/ +} + +div.project_title{ + /*border-spacing: 20px;*/ + font: 160% Verdana, Helvetica, Arial, sans-serif; + color: #3ba6ec; + vertical-align: middle; + padding-bottom: 0.3em; +} + +a.wikicurrent { + font: 100% Verdana, Helvetica, Arial, sans-serif; + color: #3ba6ec; + vertical-align: middle; +} + + +table.body { + border: 0; + /*padding: 0; + border-spacing: 0px; + border-collapse: separate; + */ +} + +td.page-header-left { + padding: 5px; + /*border-bottom: 1px solid #444444;*/ +} + +td.page-header-top { + padding: 0; + + /*border-bottom: 1px solid #444444;*/ +} + +td.sidebar { + padding: 1 0 0 1; +} + +td.sidebar p.classblock { + padding: 0 5 0 5; + margin: 1 1 1 1; + border: 1px solid #444444; + background-color: #eeeeee; +} + +td.sidebar p.userblock { + padding: 0 5 0 5; + margin: 1 1 1 1; + border: 1px solid #444444; + background-color: #eeeeff; +} + +td.content { + padding: 1 5 1 5; + vertical-align: top; + width: 100%; +} + +p.ok-message { + background-color: #22bb22; + padding: 5 5 5 5; + color: white; + font-weight: bold; +} +p.error-message { + background-color: #bb2222; + padding: 5 5 5 5; + color: white; + font-weight: bold; +} + +p:first-child { + margin: 0 ; + padding: 0; +} + +/* style for forms */ +table.form { + padding: 2; + border-spacing: 0px; + border-collapse: separate; +} + +table.form th { + color: #333388; + text-align: right; + vertical-align: top; + font-weight: normal; +} +table.form th.header { + font-weight: bold; + background-color: #eeeeff; + text-align: left; +} + +table.form th.required { + font-weight: bold; +} + +table.form td { + color: #333333; + empty-cells: show; + vertical-align: top; +} + +table.form td.optional { + font-weight: bold; + font-style: italic; +} + +table.form td.html { + color: #777777; +} + +/* style for lists */ +table.list { + border-spacing: 0px; + border-collapse: separate; + vertical-align: top; + padding-top: 0; + width: 100%; +} + +table.list th { + padding: 0 4 0 4; + color: #404070; + background-color: #eeeeff; + border-right: 1px solid #404070; + border-top: 1px solid #404070; + border-bottom: 1px solid #404070; + vertical-align: top; + empty-cells: show; +} +table.list th a[href]:hover { color: #404070 } +table.list th a[href]:link { color: #404070 } +table.list th a[href] { color: #404070 } +table.list th.group { + background-color: #f4f4ff; + text-align: center; + font-size: 120%; +} + +table.list td { + padding: 0 4 0 4; + border: 0 2 0 2; + border-right: 1px solid #404070; + color: #404070; + background-color: white; + vertical-align: top; + empty-cells: show; +} + +table.list tr.normal td { + background-color: white; + white-space: nowrap; +} + +table.list tr.alt td { + background-color: #efefef; + white-space: nowrap; +} + +table.list td:first-child { + border-left: 1px solid #404070; + border-right: 1px solid #404070; +} + +table.list th:first-child { + border-left: 1px solid #404070; + border-right: 1px solid #404070; +} + +table.list tr.navigation th { + text-align: right; +} +table.list tr.navigation th:first-child { + border-right: none; + text-align: left; +} + + +/* style for message displays */ +table.messages { + border-spacing: 0px; + border-collapse: separate; + width: 100%; +} + +table.messages th.header{ + padding-top: 10px; + border-bottom: 1px solid gray; + font-weight: bold; + background-color: white; + color: #707040; +} + +table.messages th { + font-weight: bold; + color: black; + text-align: left; + border-bottom: 1px solid #afafaf; +} + +table.messages td { + font-family: monospace; + background-color: #efefef; + border-bottom: 1px solid #afafaf; + color: black; + empty-cells: show; + border-right: 1px solid #afafaf; + vertical-align: top; + padding: 2 5 2 5; +} + +table.messages td:first-child { + border-left: 1px solid #afafaf; + border-right: 1px solid #afafaf; +} + +/* style for file displays */ +table.files { + border-spacing: 0px; + border-collapse: separate; + width: 100%; +} + +table.files th.header{ + padding-top: 10px; + border-bottom: 1px solid gray; + font-weight: bold; + background-color: white; + color: #707040; +} + +table.files th { + border-bottom: 1px solid #afafaf; + font-weight: bold; + text-align: left; +} + +table.files td { + font-family: monospace; + empty-cells: show; +} + +/* style for history displays */ +table.history { + border-spacing: 0px; + border-collapse: separate; + width: 100%; +} + +table.history th.header{ + padding-top: 10px; + border-bottom: 1px solid gray; + font-weight: bold; + background-color: white; + color: #707040; + font-size: 100%; +} + +table.history th { + border-bottom: 1px solid #afafaf; + font-weight: bold; + text-align: left; + font-size: 90%; +} + +table.history td { + font-size: 90%; + vertical-align: top; + empty-cells: show; +} + + +/* style for class list */ +table.classlist { + border-spacing: 0px; + border-collapse: separate; + width: 100%; +} + +table.classlist th.header{ + padding-top: 10px; + border-bottom: 1px solid gray; + font-weight: bold; + background-color: white; + color: #707040; +} + +table.classlist th { + font-weight: bold; + text-align: left; +} + + +/* style for class help display */ +table.classhelp { + border-spacing: 0px; + border-collapse: separate; + width: 100%; +} + +table.classhelp th { + font-weight: bold; + text-align: left; + color: #707040; +} + +table.classhelp td { + padding: 2 2 2 2; + border: 1px solid black; + text-align: left; + vertical-align: top; + empty-cells: show; +} + + +/* style for "other" displays */ +table.otherinfo { + border-spacing: 0px; + border-collapse: separate; + width: 100%; +} + +table.otherinfo th.header{ + padding-top: 10px; + border-bottom: 1px solid gray; + font-weight: bold; + background-color: white; + color: #707040; +} + +table.otherinfo th { + border-bottom: 1px solid #afafaf; + font-weight: bold; + text-align: left; +} + +input { + border: 1px solid #8cacbb; + color: Black; + background-color: white; + vertical-align: middle; + margin-bottom: 1px; /* IE bug fix */ + padding: 0.1em; +} + +select { + border: 1px solid #8cacbb; + color: Black; + background-color: white; + vertical-align: middle; + margin-bottom: 1px; /* IE bug fix */ + padding: 0.1em; +} + + +a.nonexistent { + color: #FF2222; +} +a.nonexistent:visited { + color: #FF2222; +} +a.external { + color: #AA6600; +} + +/* +dl,ul,ol { + margin-top: 1pt; +} +tt,pre { + font-family: Lucida Console,Courier New,Courier,monotype; + font-size: 12pt; +} +pre.code { + margin-top: 8pt; + margin-bottom: 8pt; + background-color: #FFFFEE; + white-space:pre; + border-style:solid; + border-width:1pt; + border-color:#999999; + color:#111111; + padding:5px; + width:100%; +} +*/ +div.diffold { + background-color: #FFFF80; + border-style:none; + border-width:thin; + width:100%; +} +div.diffnew { + background-color: #80FF80; + border-style:none; + border-width:thin; + width:100%; +} +div.message { + margin-top: 6pt; + background-color: #E8FFE8; + border-style:solid; + border-width:1pt; + border-color:#999999; + color:#440000; + padding:5px; + width:100%; +} +strong.highlight { + background-color: #FFBBBB; +/* as usual, NetScape fucks up with innocent CSS + border-color: #FFAAAA; + border-style: solid; + border-width: 1pt; +*/ +} + +table.navibar { + background-color: #C8C8C8; + border-spacing: 3px; +} +td.navibar { + background-color: #E8E8E8; + vertical-align: top; + text-align: right; + padding: 0px; +} + +div.pagename { + font-size: 140%; + color: blue; + text-align: center; + font-weight: bold; + background-color: white; + padding: 0 ; +} + +a.wikiaction, input.wikiaction { + color: black; + text-decoration: None; + text-align: center; + color: black; + /*border: 1px solid #3ba6ec; */ + margin: 4px; + padding: 5; + padding-bottom: 0; + white-space: nowrap; +} + +a.wikiaction[href]:hover { + color: black; + text-decoration: none; + /*background-color: #dddddd; */ +} + +span.wikiuserpref { + padding-top: 1em; font-size: 120%; +} + +div.wikitrail { + vertical-align: bottom; + /*font-size: -1;*/ + padding-top: 1em; + display: none; +} + +div.wikiaction { + vertical-align: middle; + /*border-bottom: 1px solid #8cacbb;*/ + padding-bottom:1em; + text-align: left; + width: 100%; +} + +div.wikieditmenu { + text-align: right; +} + +form.wikiedit { + border: 1px solid #8cacbb; + background-color: #f0f0f0; + background-color: #fabf00; + padding: 1em; + padding-right: 0em; +} + +div.legenditem { + padding-top: 0.5em; + padding-left: 0.3em; +} + +span.wikitoken { + background-color: #eeeeee; +} + + +div#contentspace h1:first-child, div.heading:first-child { + padding-top: 0; + margin-top: 0; +} +div#contentspace h2:first-child { + padding-top: 0; + margin-top: 0; +} + +/* heading and paragraph text */ + +div.heading, h1 { + font-family: Verdana, Helvetica, Arial, sans-serif; + background-color: #58b3ef; + background-color: #FFFFFF; + /*color: #4893cf;*/ + color: black; + padding-top: 1.0em; + padding-bottom:0.2em; + text-align: left; + margin-top: 0em; + /*margin-bottom:8pt;*/ + font-weight: bold; + font-size: 115%; + border-bottom: 1px solid #8CACBB; +} + + +h1, h2, h3, h4, h5, h6 { + color: Black; + clear: left; + font: 100% Verdana, Helvetica, Arial, sans-serif; + margin: 0; + padding-left: 0em; + padding-top: 1em; + padding-bottom: 0.2em; + /*border-bottom: 1px solid #8CACBB;*/ +} +/* h1,h2 { padding-top: 0; }*/ + + +h1 { font-size: 145%; } +h2 { font-size: 135%; } +h3 { font-size: 125%; } +h4 { font-size: 120%; } +h5 { font-size: 110%; } +h6 { font-size: 80%; } + +h1 a { text-decoration: None;} + +div.exception { + background-color: #bb2222; + padding: 5 5 5 5; + color: white; + font-weight: bold; +} +pre.exception { + font-size: 110%; padding: 1em; border: 1px solid #8cacbb; color: Black; background-color: #dee7ec; background-color: #cccccc; } + +/* defines for navgiation bar (documentation) */ + + +div.direntry { + padding-top: 0.3em; + padding-bottom: 0.3em; + margin-right: 1em; + font-weight: bold; + background-color: #dee7ec; + font-size: 110%; +} + +div.fileentry { + font-family: Verdana, Helvetica, Arial, sans-serif; + padding-bottom: 0.3em; + white-space: nowrap; + line-height: 150%; +} + +a.fileentry { + white-space: nowrap; +} + + +span.left { + text-align: left; +} +span.right { + text-align: right; +} + +div.navbar { + /*margin: 0;*/ + font-size: 80% /*smaller*/; + font-weight: bold; + text-align: left; + /* position: fixed; */ + top: 100pt; + left: 0pt; /* auto; */ + width: 120pt; + /* right: auto; + right: 0pt; 2em; */ +} + + +div.history a { + /* font-size: 70%; */ +} + +div.wikiactiontitle { + font-weight: bold; +} + +/* REST defines */ + +div.document { + margin: 0; +} + +h1.title { + margin: 0; + margin-bottom: 0.5em; +} + +td.toplist { + vertical-align: top; +} + +img#pyimg { + position: absolute; + top: 4px; + left: 4px; +} + +div#navspace { + position: absolute; + top: 130px; + left: 11px; + font-size: 100%; + width: 150px; + overflow: hidden; /* scroll; */ +} + +div#metaspace { + position: absolute; + top: 10px; + left: 170px; +} + +div#errorline { + position: relative; + top: 5px; + float: right; +} + +div#contentspace { + position: absolute; + /* font: 120% "Times New Roman", serif;*/ + font: 110% Verdana, Helvetica, Arial, sans-serif; + top: 130px; + left: 170px; + margin-right: 5px; +} + +div#menubar { +/* width: 400px; */ + float: left; +} + +/* for the documentation page */ +div#docinfoline { + position: relative; + top: 5px; + left: 0px; + + /*background-color: #dee7ec; */ + padding: 5pt; + padding-bottom: 1em; + color: black; + /*border-width: 1pt; + border-style: solid;*/ + +} + +div#docnavlist { + /*background-color: #dee7ec; */ + padding: 5pt; + padding-bottom: 2em; + color: black; + border-width: 1pt; + /*border-style: solid;*/ +} + + +/* text markup */ + +div.listtitle { + color: Black; + clear: left; + font: 120% Verdana, Helvetica, Arial, sans-serif; + margin: 0; + padding-left: 0em; + padding-top: 0em; + padding-bottom: 0.2em; + margin-right: 0.5em; + border-bottom: 1px solid #8CACBB; +} + +div.actionbox h3 { + padding-top: 0; + padding-right: 0.5em; + padding-left: 0.5em; + background-color: #fabf00; + text-align: center; + border: 1px solid black; /* 8cacbb; */ +} + +div.actionbox a { + display: block; + padding-bottom: 0.5em; + padding-top: 0.5em; + margin-left: 0.5em; +} + +div.actionbox a.history { + display: block; + padding-bottom: 0.5em; + padding-top: 0.5em; + margin-left: 0.5em; + font-size: 90%; +} + +div.actionbox { + margin-bottom: 2em; + padding-bottom: 1em; + overflow: hidden; /* scroll; */ +} + +/* taken from docutils (oh dear, a bit senseless) */ +ol.simple, ul.simple { + margin-bottom: 1em } + +ol.arabic { + list-style: decimal } + +ol.loweralpha { + list-style: lower-alpha } + +ol.upperalpha { + list-style: upper-alpha } + +ol.lowerroman { + list-style: lower-roman } + +ol.upperroman { + list-style: upper-roman } + + +/* +:Author: David Goodger +:Contact: goodger at users.sourceforge.net +:date: $Date: 2003/01/22 22:26:48 $ +:version: $Revision: 1.29 $ +:copyright: This stylesheet has been placed in the public domain. + +Default cascading style sheet for the HTML output of Docutils. +*/ +/* +.first { + margin-top: 0 } + +.last { + margin-bottom: 0 } + +a.toc-backref { + text-decoration: none ; + color: black } + +dd { + margin-bottom: 0.5em } + +div.abstract { + margin: 2em 5em } + +div.abstract p.topic-title { + font-weight: bold ; + text-align: center } + +div.attention, div.caution, div.danger, div.error, div.hint, +div.important, div.note, div.tip, div.warning { + margin: 2em ; + border: medium outset ; + padding: 1em } + +div.attention p.admonition-title, div.caution p.admonition-title, +div.danger p.admonition-title, div.error p.admonition-title, +div.warning p.admonition-title { + color: red ; + font-weight: bold ; + font-family: sans-serif } + +div.hint p.admonition-title, div.important p.admonition-title, +div.note p.admonition-title, div.tip p.admonition-title { + font-weight: bold ; + font-family: sans-serif } + +div.dedication { + margin: 2em 5em ; + text-align: center ; + font-style: italic } + +div.dedication p.topic-title { + font-weight: bold ; + font-style: normal } + +div.figure { + margin-left: 2em } + +div.footer, div.header { + font-size: smaller } + +div.system-messages { + margin: 5em } + +div.system-messages h1 { + color: red } + +div.system-message { + border: medium outset ; + padding: 1em } + +div.system-message p.system-message-title { + color: red ; + font-weight: bold } + +div.topic { + margin: 2em } + +h1.title { + text-align: center } + +h2.subtitle { + text-align: center } + +hr { + width: 75% } + +p.caption { + font-style: italic } + +p.credits { + font-style: italic ; + font-size: smaller } + +p.label { + white-space: nowrap } + +p.topic-title { + font-weight: bold } + +pre.address { + margin-bottom: 0 ; + margin-top: 0 ; + font-family: serif ; + font-size: 100% } + +pre.line-block { + font-family: serif ; + font-size: 100% } + +pre.literal-block, pre.doctest-block { + margin-left: 2em ; + margin-right: 2em ; + background-color: #eeeeee } + +span.classifier { + font-family: sans-serif ; + font-style: oblique } + +span.classifier-delimiter { + font-family: sans-serif ; + font-weight: bold } + +span.interpreted { + font-family: sans-serif } + +span.option { + white-space: nowrap } + +span.option-argument { + font-style: italic } + +span.pre { + white-space: pre } + +span.problematic { + color: red } + +table { + margin-top: 0.5em ; + margin-bottom: 0.5em } + +table.citation { + border-left: solid thin gray ; + padding-left: 0.5ex } + +table.docinfo { + margin: 2em 4em } + +table.footnote { + border-left: solid thin black ; + padding-left: 0.5ex } + +td, th { + padding-left: 0.5em ; + padding-right: 0.5em ; + vertical-align: top } + +th.docinfo-name, th.field-name { + font-weight: bold ; + text-align: left ; + white-space: nowrap } + +h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt { + font-size: 100% } + +tt { + background-color: #eeeeee } + +ul.auto-toc { + list-style-type: none } +*/ + +div.section { + margin-top: 1.0em ; +} Modified: py/dist/py/misc/rest.py ============================================================================== --- py/dist/py/misc/rest.py (original) +++ py/dist/py/misc/rest.py Sat Jun 11 20:47:03 2005 @@ -1,5 +1,6 @@ import sys, os, traceback +import re if hasattr(sys.stdout, 'fileno') and os.isatty(sys.stdout.fileno()): def log(msg): @@ -45,3 +46,15 @@ # info = txtpath.info() # svninfopath.dump(info) +rex1 = re.compile(ur'.*(.*).*', re.MULTILINE | re.DOTALL) +rex2 = re.compile(ur'.*
(.*)
.*', re.MULTILINE | re.DOTALL) + +def strip_html_header(string): + """ return the content of the body-tag """ + uni = unicode(string, 'utf8') + for rex in rex1,rex2: + match = rex.search(uni) + if not match: + break + uni = match.group(1) + return uni From hpk at codespeak.net Sat Jun 11 21:00:04 2005 From: hpk at codespeak.net (hpk at codespeak.net) Date: Sat, 11 Jun 2005 21:00:04 +0200 (CEST) Subject: [py-svn] r13303 - py/dist/py/documentation Message-ID: <20050611190004.7D10827B4C@code1.codespeak.net> Author: hpk Date: Sat Jun 11 21:00:03 2005 New Revision: 13303 Modified: py/dist/py/documentation/index.txt Log: warn against win32 for py.execnet and be slightly more humble Modified: py/dist/py/documentation/index.txt ============================================================================== --- py/dist/py/documentation/index.txt (original) +++ py/dist/py/documentation/index.txt Sat Jun 11 21:00:03 2005 @@ -3,9 +3,9 @@ `py.test`_ introduces to the new'n'easy **py.test** utility -`py.execnet`_ a (probably uniquely) innovative way to distribute programs across the net +`py.execnet`_ an innovative way to distribute programs across the net (not stable on win32 yet) -`py.magic.greenlet`_: Lightweight in-process concurrent programming (aka Stackless) +`py.magic.greenlet`_: Lightweight in-process concurrent programming (aka Stackless) `py.xml`_ a fast'n'easy way to generate xml/html documents (including CSS-styling) From hpk at codespeak.net Sat Jun 11 21:31:45 2005 From: hpk at codespeak.net (hpk at codespeak.net) Date: Sat, 11 Jun 2005 21:31:45 +0200 (CEST) Subject: [py-svn] r13304 - py/dist/py/documentation Message-ID: <20050611193145.A88B127B4C@code1.codespeak.net> Author: hpk Date: Sat Jun 11 21:31:45 2005 New Revision: 13304 Modified: py/dist/py/documentation/confrest.py py/dist/py/documentation/style.css Log: better layout, add last-modification information Modified: py/dist/py/documentation/confrest.py ============================================================================== --- py/dist/py/documentation/confrest.py (original) +++ py/dist/py/documentation/confrest.py Sat Jun 11 21:31:45 2005 @@ -1,5 +1,6 @@ import py from py.__.misc.rest import convert_rest_html, strip_html_header +from py.__.misc.difftime import worded_diff_time mydir = py.magic.autopath().dirpath() html = py.xml.html @@ -15,6 +16,15 @@ content = strip_html_header(content) page = PyPage("py lib [%s] " % txtpath.purebasename, stylesheeturl=stylesheet) + + svninfo = txtpath.info() + modified = " modified %s by %s" % (worded_diff_time(svninfo.mtime), + getrealname(svninfo.last_author)) + + page.contentspace.append( + html.div(html.div(modified, style="float: right; font-style: italic;"), + id = 'docinfoline')) + page.contentspace.append(content) htmlpath = txtpath.new(ext='.html') htmlpath.write(page.unicode().encode(encoding)) @@ -58,6 +68,8 @@ self.body.append( html.div(html.img(alt="py lib", id='pyimg', height=75, width=154, src="http://codespeak.net/img/codespeak1b.png"))) + #self.body.append( + # html.div("py lib rev %d" % 1)) self.menubar = html.div( html.a("home", href="home.html", class_="menu"), " ", @@ -75,6 +87,17 @@ self.body.append(self.metaspace) - self.contentspace = html.div(id="contentspace", class_="content") + self.contentspace = html.div(id="contentspace") self.body.append(self.contentspace) +def getrealname(username): + try: + import uconf + except ImportError: + return username + try: + user = uconf.system.User(username) + except KeyboardInterrupt: + raise + return user.realname or username + Modified: py/dist/py/documentation/style.css ============================================================================== --- py/dist/py/documentation/style.css (original) +++ py/dist/py/documentation/style.css Sat Jun 11 21:31:45 2005 @@ -776,7 +776,7 @@ div#navspace { position: absolute; - top: 130px; + top: 100px; left: 11px; font-size: 100%; width: 150px; @@ -799,7 +799,7 @@ position: absolute; /* font: 120% "Times New Roman", serif;*/ font: 110% Verdana, Helvetica, Arial, sans-serif; - top: 130px; + top: 100px; left: 170px; margin-right: 5px; } From hpk at codespeak.net Sat Jun 11 21:36:43 2005 From: hpk at codespeak.net (hpk at codespeak.net) Date: Sat, 11 Jun 2005 21:36:43 +0200 (CEST) Subject: [py-svn] r13305 - py/dist/py/misc Message-ID: <20050611193643.1D17A27B4C@code1.codespeak.net> Author: hpk Date: Sat Jun 11 21:36:42 2005 New Revision: 13305 Added: py/dist/py/misc/difftime.py Log: ups, forgot a module Added: py/dist/py/misc/difftime.py ============================================================================== --- (empty file) +++ py/dist/py/misc/difftime.py Sat Jun 11 21:36:42 2005 @@ -0,0 +1,25 @@ +import py + +_time_desc = { + 1 : 'second', 60 : 'minute', 3600 : 'hour', 86400 : 'day', + 2628000 : 'month', 31536000 : 'year', } + +def worded_diff_time(ctime): + difftime = py.std.time.time() - ctime + keys = _time_desc.keys() + keys.sort() + for i, key in py.builtin.enumerate(keys): + if key >=difftime: + break + l = [] + keylist = keys[:i] + + keylist.reverse() + for key in keylist[:1]: + div = int(difftime / key) + if div==0: + break + difftime -= div * key + plural = div > 1 and 's' or '' + l.append('%d %s%s' %(div, _time_desc[key], plural)) + return ", ".join(l) + " ago " From hpk at codespeak.net Sat Jun 11 21:45:03 2005 From: hpk at codespeak.net (hpk at codespeak.net) Date: Sat, 11 Jun 2005 21:45:03 +0200 (CEST) Subject: [py-svn] r13306 - py/dist/py/documentation Message-ID: <20050611194503.A213927B4C@code1.codespeak.net> Author: hpk Date: Sat Jun 11 21:45:03 2005 New Revision: 13306 Modified: py/dist/py/documentation/confrest.py Log: let the codespeak logo point to the codespeak home page Modified: py/dist/py/documentation/confrest.py ============================================================================== --- py/dist/py/documentation/confrest.py (original) +++ py/dist/py/documentation/confrest.py Sat Jun 11 21:45:03 2005 @@ -66,8 +66,10 @@ super(PyPage, self).fill() # base layout self.body.append( - html.div(html.img(alt="py lib", id='pyimg', height=75, width=154, - src="http://codespeak.net/img/codespeak1b.png"))) + html.div(html.a(html.img(alt="py lib", id='pyimg', height=75, width=154, + src="http://codespeak.net/img/codespeak1b.png"), + href="http://codespeak.net", + ))) #self.body.append( # html.div("py lib rev %d" % 1)) From ggheo at codespeak.net Sun Jun 12 22:11:21 2005 From: ggheo at codespeak.net (ggheo at codespeak.net) Date: Sun, 12 Jun 2005 22:11:21 +0200 (CEST) Subject: [py-svn] r13321 - in py/dist/py/misc: . testing Message-ID: <20050612201121.AADAC27B4F@code1.codespeak.net> Author: ggheo Date: Sun Jun 12 22:11:21 2005 New Revision: 13321 Added: py/dist/py/misc/log_support.py Removed: py/dist/py/misc/testing/test_trace.py py/dist/py/misc/trace.py Modified: py/dist/py/misc/log.py py/dist/py/misc/testing/test_log.py Log: [issue3] Split py.log functionality into a 'producer' API (misc/log.py) and a 'consumer' API (misc/log_support.py). There is only one 'root-level' namespace called py.log (see __init__.py file in top-level py directory). Modified: py/dist/py/misc/log.py ============================================================================== --- py/dist/py/misc/log.py (original) +++ py/dist/py/misc/log.py Sun Jun 12 22:11:21 2005 @@ -1,67 +1,101 @@ -import py -import sys, logging - -class Logger(object): - _logregistry = {} - - def __init__(self): - self.formatter = logging.Formatter('%(message)s') - - def __getattr__(self, name): - if name == "stdout": - logger_stdout = logging.getLogger('py.log.stdout') - stdout_handler = logging.StreamHandler(sys.stdout) - stdout_handler.setFormatter(self.formatter) - logger_stdout.addHandler(stdout_handler) - logger_stdout.setLevel(logging.DEBUG) - def stdout_tracer(message): - self.log_message(logger_stdout, message) - return stdout_tracer - if name == "stderr": - logger_stderr = logging.getLogger('py.log.stderr') - stderr_handler = logging.StreamHandler(sys.stderr) - stderr_handler.setFormatter(self.formatter) - logger_stderr.addHandler(stderr_handler) - logger_stderr.setLevel(logging.DEBUG) - def stderr_tracer(message): - self.log_message(logger_stderr, message) - return stderr_tracer - elif name == "file": - filename = self._logregistry.get('file', 'py_log.out') - file_handler = logging.FileHandler(filename) - file_handler.setFormatter(self.formatter) - logger_file = logging.getLogger('py.log.file') - logger_file.addHandler(file_handler) - logger_file.setLevel(logging.DEBUG) - def file_tracer(message): - self.log_message(logger_file, message) - return file_tracer - else: - raise AttributeError, name - - def __setitem__(self, name, value): - self._logregistry[name] = value - - def log_message(self, logger, message): - for keyword in message.keywords: - if keyword.startswith('debug'): - logger.debug(message) - if keyword.startswith('info'): - logger.info(message) - if keyword.startswith('warn'): - logger.warn(message) - if keyword.startswith('err'): - logger.error(message) - if keyword.startswith('crit'): - logger.critical(message) - +""" +py lib's basic logging/tracing functionality + + EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL (especially the dispatching) + +WARNING: this module is not allowed to contain any 'py' imports, + Instead, it is very self-contained and should not depend on + CPython/stdlib versions, either. One reason for these + restrictions is that this module should be sendable + via py.execnet across the network in an very early phase. +""" + +class Message(object): + def __init__(self, keywords, args): + self.keywords = keywords + self.args = args + + def content(self): + return " ".join(map(str, self.args)) + + def prefix(self): + return "[%s] " % (":".join(self.keywords)) + + def __str__(self): + return self.prefix() + self.content() + +class LogProducer(object): + """Log "producer" API which sends messages to be logged + to a 'consumer' object, which then prints them to stdout, + stderr, files, etc.""" + + Message = Message # to allow later customization + _registry = {} + + def __init__(self, keywords=()): + self.keywords = keywords + + def __repr__(self): + return "" % ":".join(self.keywords) + + def debug(self, *args): + producer = LogProducer(self.keywords + ('debug',)) + producer(*args) + + def info(self, *args): + producer = LogProducer(self.keywords + ('info',)) + producer(*args) + + def warn(self, *args): + producer = LogProducer(self.keywords + ('warn',)) + producer(*args) + + def error(self, *args): + producer = LogProducer(self.keywords + ('error',)) + producer(*args) + + def critical(self, *args): + producer = LogProducer(self.keywords + ('critical',)) + producer(*args) + + def set_logger(self, name, func): + assert callable(func) + keywords = self.keywords + (name,) + self._registry[keywords] = func + + def __getattr__(self, name): + if name[0] == '_': + raise AttributeError, name + return LogProducer(self.keywords + (name,)) + + def __call__(self, *args): + message = self.Message(self.keywords, args) + try: + func = self._registry[message.keywords] + except KeyError: + # XXX find best match, for now it's a hack/simplistic + try: + func = self._registry[("default",)] + except KeyError: + print str(message) + return + func(message) + # class methods dealing with registry - def _getstate_(cls): - return cls._logregistry.copy() + def _getstate_(cls): + return cls._registry.copy() _getstate_ = classmethod(_getstate_) - def _setstate_(cls, state): - cls._logregistry = state + def _setstate_(cls, state): + cls._registry = state _setstate_ = classmethod(_setstate_) - -log = Logger() + +producer = LogProducer() +debug = producer.debug +info = producer.info +warn = producer.warn +error = producer.error +critical = producer.critical +set_logger = producer.set_logger +_getstate_ = producer._getstate_ +_setstate_ = producer._setstate_ \ No newline at end of file Added: py/dist/py/misc/log_support.py ============================================================================== --- (empty file) +++ py/dist/py/misc/log_support.py Sun Jun 12 22:11:21 2005 @@ -0,0 +1,63 @@ +import py +import os, sys, logging + +class LogConsumer(object): + """Log "consumer" API which receives messages from + a 'producer' object and displays them using various + logging mechanisms (stdout, stderr, files, syslog, etc.)""" + + def __init__(self): + self.formatter = logging.Formatter('%(message)s') + + def FileLogger(self, filename=None, mode='a'): + if filename is None: + filename = 'log.out' + else: + filename = str(filename) + file_handler = logging.FileHandler(filename, mode) + file_handler.setFormatter(self.formatter) + logger_name = "py.log.file.%s" % os.path.basename(filename) + logger_file = logging.getLogger(logger_name) + logger_file.addHandler(file_handler) + logger_file.setLevel(logging.DEBUG) + def file_tracer(message): + self.log_message(logger_file, message) + return file_tracer + + def StdoutLogger(self): + logger_stdout = logging.getLogger('py.log.stdout') + stdout_handler = logging.StreamHandler(sys.stdout) + stdout_handler.setFormatter(self.formatter) + logger_stdout.addHandler(stdout_handler) + logger_stdout.setLevel(logging.DEBUG) + def stdout_tracer(message): + self.log_message(logger_stdout, message) + return stdout_tracer + + def StderrLogger(self): + logger_stderr = logging.getLogger('py.log.stderr') + stderr_handler = logging.StreamHandler(sys.stderr) + stderr_handler.setFormatter(self.formatter) + logger_stderr.addHandler(stderr_handler) + logger_stderr.setLevel(logging.DEBUG) + def stderr_tracer(message): + self.log_message(logger_stderr, message) + return stderr_tracer + + def log_message(self, logger, message): + for keyword in message.keywords: + if keyword.startswith('debug'): + logger.debug(message) + if keyword.startswith('info'): + logger.info(message) + if keyword.startswith('warn'): + logger.warn(message) + if keyword.startswith('err'): + logger.error(message) + if keyword.startswith('crit'): + logger.critical(message) + +consumer = LogConsumer() +FileLogger = consumer.FileLogger +StdoutLogger = consumer.StdoutLogger +StderrLogger = consumer.StderrLogger Modified: py/dist/py/misc/testing/test_log.py ============================================================================== --- py/dist/py/misc/testing/test_log.py (original) +++ py/dist/py/misc/testing/test_log.py Sun Jun 12 22:11:21 2005 @@ -1,86 +1,137 @@ - -import py +import py import sys -class TestLogging: +class TestLogProducer: def setup_method(self, meth): - self.state = py.log._getstate_() + self.state = py.log.getstate() def teardown_method(self, meth): - py.log._setstate_(self.state) + py.log.setstate(self.state) + + def test_producer_repr(self): + d = py.log.debug + assert repr(d).find('debug') != -1 + + def test_produce_one_keyword(self): + l = [] + def f(msg): + l.append(msg) + py.log.set_logger('debug', f) + py.log.debug("hello world") + assert len(l) == 1 + msg = l[0] + assert msg.content().startswith('hello world') + assert msg.prefix() == '[debug] ' + assert str(msg) == "[debug] hello world" + + def test_default_logger(self): + l = [] + def f(msg): + l.append(msg) + + py.log.set_logger("default", f) + py.log.debug("hello") + py.log.warn("world") + py.log.info("I") + py.log.error("am") + py.log.critical("Sam") + assert len(l) == 5 + msg1, msg2, msg3, msg4, msg5 = l + + assert 'debug' in msg1.keywords + assert 'warn' in msg2.keywords + assert 'info' in msg3.keywords + assert 'error' in msg4.keywords + assert 'critical' in msg5.keywords + + assert msg1.content() == 'hello' + assert msg2.content() == 'world' + assert msg3.content() == 'I' + assert msg4.content() == 'am' + assert msg5.content() == 'Sam' + +class TestLogConsumer: + + def test_log_stdout(self): + # We redirect stdout so that we can verify that + # the log messages have been printed to it + dir = py.test.ensuretemp("logtest") + p = dir.join('py_stdout.out') + redirect = str(p) + sys.saved = sys.stdout + sys.stdout = open(redirect, 'w') + + # Start of the 'consumer' code + py.log.set_logger("default", py.log.StdoutLogger()) + py.log.debug("hello world #1") + py.log.info("hello world #2") + py.log.warn("hello world #3") + py.log.error("hello world #4") + py.log.critical("hello world #5") + # End of the 'consumer' code + + sys.stdout = sys.saved + lines = open(redirect).readlines() + assert lines == ['[debug] hello world #1\n', '[info] hello world #2\n', + '[warn] hello world #3\n', '[error] hello world #4\n', + '[critical] hello world #5\n'] + + def test_log_stderr(self): + # We redirect stderr so that we can verify that + # the log messages have been printed to it + dir = py.test.ensuretemp("logtest") + p = dir.join('py_stderr.out') + redirect = str(p) + sys.saved = sys.stderr + sys.stderr = open(redirect, 'w') + + # Start of the 'consumer' code + py.log.set_logger("default", py.log.StderrLogger()) + py.log.debug("hello world #1") + py.log.info("hello world #2") + py.log.warn("hello world #3") + py.log.error("hello world #4") + py.log.critical("hello world #5") + # End of the 'consumer' code + + sys.stderr = sys.saved + lines = open(redirect).readlines() + assert lines == ['[debug] hello world #1\n', '[info] hello world #2\n', + '[warn] hello world #3\n', '[error] hello world #4\n', + '[critical] hello world #5\n'] + + def test_default_log_file(self): + # Start of the 'consumer' code + py.log.set_logger("default", py.log.FileLogger()) + py.log.debug("hello world #1") + py.log.info("hello world #2") + py.log.warn("hello world #3") + py.log.error("hello world #4") + py.log.critical("hello world #5") + # End of the 'consumer' code + + lines = open('log.out').readlines() + last_5_lines = lines[-5:] + assert last_5_lines == ['[debug] hello world #1\n', '[info] hello world #2\n', + '[warn] hello world #3\n', '[error] hello world #4\n', + '[critical] hello world #5\n'] + + def test_custom_log_file(self): + dir = py.test.ensuretemp("logtest") + p = dir.join('log2.out') + custom_log = str(p) + + # Start of the 'consumer' code + py.log.set_logger("default", py.log.FileLogger(custom_log)) + py.log.debug("hello world #1") + py.log.info("hello world #2") + py.log.warn("hello world #3") + py.log.error("hello world #4") + py.log.critical("hello world #5") + # End of the 'consumer' code - def test_log_stdout(self): - # We redirect stdout so that we can verify that - # the log messages have been printed to it - redirect = 'py_stdout.out' - sys.saved = sys.stdout - sys.stdout = open(redirect, 'w') - - # Start of the 'consumer' code - py.trace[...] = py.log.stdout - py.trace.debug("hello world") - py.trace.info("hello world") - py.trace.warn("hello world") - py.trace.error("hello world") - py.trace.critical("hello world") - # End of the 'consumer' code - - sys.stdout = sys.saved - lines = open(redirect).readlines() - assert lines == ['[debug] hello world\n', '[info] hello world\n', - '[warn] hello world\n', '[error] hello world\n', - '[critical] hello world\n'] - - def test_log_stderr(self): - # We redirect stderr so that we can verify that - # the log messages have been printed to it - redirect = 'py_stderr.out' - sys.saved = sys.stderr - sys.stderr = open(redirect, 'w') - - # Start of the 'consumer' code - py.trace[...] = py.log.stderr - py.trace.debug("hello world") - py.trace.info("hello world") - py.trace.warn("hello world") - py.trace.error("hello world") - py.trace.critical("hello world") - # End of the 'consumer' code - - sys.stderr = sys.saved - lines = open(redirect).readlines() - assert lines == ['[debug] hello world\n', '[info] hello world\n', - '[warn] hello world\n', '[error] hello world\n', - '[critical] hello world\n'] - - def test_default_log_file(self): - # Start of the 'consumer' code - py.trace[...] = py.log.file - py.trace.debug("hello world") - py.trace.info("hello world") - py.trace.warn("hello world") - py.trace.error("hello world") - py.trace.critical("hello world") - # End of the 'consumer' code - - lines = open('py_log.out').readlines() - last_5_lines = lines[-5:] - assert last_5_lines == ['[debug] hello world\n', '[info] hello world\n', - '[warn] hello world\n', '[error] hello world\n', - '[critical] hello world\n'] - - def test_custom_log_file(self): - # Start of the 'consumer' code - py.log['file'] = 'py_log2.out' - py.trace[...] = py.log.file - py.trace.debug("hello world") - py.trace.info("hello world") - py.trace.warn("hello world") - py.trace.error("hello world") - py.trace.critical("hello world") - # End of the 'consumer' code - - lines = open('py_log2.out').readlines() - last_5_lines = lines[-5:] - assert last_5_lines == ['[debug] hello world\n', '[info] hello world\n', - '[warn] hello world\n', '[error] hello world\n', - '[critical] hello world\n'] + lines = open(custom_log).readlines() + last_5_lines = lines[-5:] + assert last_5_lines == ['[debug] hello world #1\n', '[info] hello world #2\n', + '[warn] hello world #3\n', '[error] hello world #4\n', + '[critical] hello world #5\n'] + \ No newline at end of file Deleted: /py/dist/py/misc/testing/test_trace.py ============================================================================== --- /py/dist/py/misc/testing/test_trace.py Sun Jun 12 22:11:21 2005 +++ (empty file) @@ -1,41 +0,0 @@ - -import py - -class TestTracing: - def setup_method(self, meth): - self.state = py.trace._getstate_() - def teardown_method(self, meth): - py.trace._setstate_(self.state) - - def test_tracer_repr(self): - t = py.trace.debug - assert repr(t).find('debug') != -1 - - def test_trace_one_keyword(self): - t = py.trace.debug - l = [] - def f(msg): - l.append(msg) - py.trace['debug'] = f - py.trace.debug("hello world") - assert len(l) == 1 - msg = l[0] - assert msg.content().startswith('hello world') - assert msg.prefix() == '[debug] ' - assert str(msg) == "[debug] hello world" - - def test_trace_default_writer(self): - l = [] - def f(msg): - l.append(msg) - - py.trace[...] = f - py.trace.debug("hello") - py.trace.warn("world") - assert len(l) == 2 - msg1, msg2 = l - - assert 'debug' in msg1.keywords - assert 'warn' in msg2.keywords - assert msg1.content() == 'hello' - assert msg2.content() == 'world' Deleted: /py/dist/py/misc/trace.py ============================================================================== --- /py/dist/py/misc/trace.py Sun Jun 12 22:11:21 2005 +++ (empty file) @@ -1,69 +0,0 @@ -""" -py lib's basic tracing functionality - - EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL (especially the dispatching) - -WARNING: this module is not allowed to contain any 'py' imports, - Instead, it is very self-contained and should not depend on - CPython/stdlib versions, either. One reason for these - restrictions is that this module should be sendable - via py.execnet across the network in an very early phase. -""" - -class Message(object): - def __init__(self, keywords, args): - self.keywords = keywords - self.args = args - - def content(self): - return " ".join(map(str, self.args)) - - def prefix(self): - return "[%s] " % (":".join(self.keywords)) - - def __str__(self): - return self.prefix() + self.content() - -class Tracer(object): - Message = Message # to allow later customization - _traceregistry = {} - - def __init__(self, keywords=()): - self.keywords = keywords - - def __repr__(self): - return "" % ":".join(self.keywords) - - def __getattr__(self, name): - if name[0] == '_': - raise AttributeError, name - return Tracer(self.keywords + (name,)) - - def __setitem__(self, name, func): - assert callable(func) - keywords = self.keywords + (name,) - self._traceregistry[keywords] = func - - def __call__(self, *args): - message = self.Message(self.keywords, args) - try: - func = self._traceregistry[message.keywords] - except KeyError: - # XXX find best match, for now it's a hack/simplistic - try: - func = self._traceregistry[(Ellipsis,)] - except KeyError: - print str(message) - return - func(message) - - # class methods dealing with registry - def _getstate_(cls): - return cls._traceregistry.copy() - _getstate_ = classmethod(_getstate_) - - def _setstate_(cls, state): - cls._traceregistry = state - _setstate_ = classmethod(_setstate_) - -trace = Tracer() From hpk at codespeak.net Mon Jun 13 01:07:15 2005 From: hpk at codespeak.net (hpk at codespeak.net) Date: Mon, 13 Jun 2005 01:07:15 +0200 (CEST) Subject: [py-svn] r13324 - py/dist/py Message-ID: <20050612230715.1C0D027B50@code1.codespeak.net> Author: hpk Date: Mon Jun 13 01:07:14 2005 New Revision: 13324 Modified: py/dist/py/__init__.py Log: disabling __init__.py's log until i have looked into it a bit more Modified: py/dist/py/__init__.py ============================================================================== --- py/dist/py/__init__.py (original) +++ py/dist/py/__init__.py Mon Jun 13 01:07:14 2005 @@ -102,6 +102,6 @@ # trace 'trace' : ('./misc/trace.py', 'trace'), - # log - 'log' : ('./misc/log.py', 'log'), + ## log + #'log' : ('./misc/log.py', 'log'), }) From ggheo at codespeak.net Mon Jun 13 02:29:17 2005 From: ggheo at codespeak.net (ggheo at codespeak.net) Date: Mon, 13 Jun 2005 02:29:17 +0200 (CEST) Subject: [py-svn] r13329 - py/dist/py Message-ID: <20050613002917.0884427B50@code1.codespeak.net> Author: ggheo Date: Mon Jun 13 02:29:16 2005 New Revision: 13329 Modified: py/dist/py/__init__.py Log: Forgot to commit the file when I committed the last log-related changes. Holger -- I noticed that you disabled the log entry, but if I don't commit this then my test_log.py stuff won't work. Modified: py/dist/py/__init__.py ============================================================================== --- py/dist/py/__init__.py (original) +++ py/dist/py/__init__.py Mon Jun 13 02:29:16 2005 @@ -99,9 +99,16 @@ 'xml.Namespace' : ('./xmlobj/xml.py', 'Namespace'), 'xml.escape' : ('./xmlobj/misc.py', 'escape'), - # trace - 'trace' : ('./misc/trace.py', 'trace'), - - ## log - #'log' : ('./misc/log.py', 'log'), + # logging API ('producers' and 'consumers') + 'log.debug' : ('./misc/log.py', 'debug'), + 'log.info' :('./misc/log.py', 'info'), + 'log.warn' :('./misc/log.py', 'warn'), + 'log.error' :('./misc/log.py', 'error'), + 'log.critical' :('./misc/log.py', 'critical'), + 'log.set_logger' :('./misc/log.py', 'set_logger'), + 'log.getstate' :('./misc/log.py', '_getstate_'), + 'log.setstate' :('./misc/log.py', '_setstate_'), + 'log.FileLogger' :('./misc/log_support.py', 'FileLogger'), + 'log.StdoutLogger' :('./misc/log_support.py', 'StdoutLogger'), + 'log.StderrLogger' :('./misc/log_support.py', 'StderrLogger'), }) From dstanek at codespeak.net Mon Jun 13 13:13:36 2005 From: dstanek at codespeak.net (dstanek at codespeak.net) Date: Mon, 13 Jun 2005 13:13:36 +0200 (CEST) Subject: [py-svn] r13338 - py/dist/py/compat Message-ID: <20050613111336.0659B27B50@code1.codespeak.net> Author: dstanek Date: Mon Jun 13 13:13:34 2005 New Revision: 13338 Added: py/dist/py/compat/ Log: Added a compat directory for borrowed Python modules. From dstanek at codespeak.net Mon Jun 13 13:14:31 2005 From: dstanek at codespeak.net (dstanek at codespeak.net) Date: Mon, 13 Jun 2005 13:14:31 +0200 (CEST) Subject: [py-svn] r13339 - py/dist/py/compat Message-ID: <20050613111431.5E04427B50@code1.codespeak.net> Author: dstanek Date: Mon Jun 13 13:14:27 2005 New Revision: 13339 Added: py/dist/py/compat/doctest.py - copied unchanged from r13338, vendor/cpython/Python-r241/dist/src/Lib/doctest.py Log: Added the standard Python doctest module from Python-r241. From dstanek at codespeak.net Mon Jun 13 13:15:17 2005 From: dstanek at codespeak.net (dstanek at codespeak.net) Date: Mon, 13 Jun 2005 13:15:17 +0200 (CEST) Subject: [py-svn] r13340 - py/dist/py/compat Message-ID: <20050613111517.6D5F427B50@code1.codespeak.net> Author: dstanek Date: Mon Jun 13 13:15:14 2005 New Revision: 13340 Added: py/dist/py/compat/optparse.py - copied unchanged from r13339, vendor/cpython/Python-r241/dist/src/Lib/optparse.py Log: Added the standard Python optparse module from Python-r241. From dstanek at codespeak.net Mon Jun 13 13:15:59 2005 From: dstanek at codespeak.net (dstanek at codespeak.net) Date: Mon, 13 Jun 2005 13:15:59 +0200 (CEST) Subject: [py-svn] r13341 - py/dist/py/compat Message-ID: <20050613111559.E1D8427B50@code1.codespeak.net> Author: dstanek Date: Mon Jun 13 13:15:56 2005 New Revision: 13341 Added: py/dist/py/compat/textwrap.py - copied unchanged from r13340, vendor/cpython/Python-r241/dist/src/Lib/textwrap.py Log: Added the standard Python textwrap module from Python-r241. From hpk at codespeak.net Mon Jun 13 13:36:17 2005 From: hpk at codespeak.net (hpk at codespeak.net) Date: Mon, 13 Jun 2005 13:36:17 +0200 (CEST) Subject: [py-svn] r13342 - in py/dist/py/misc: . testing Message-ID: <20050613113617.6A6A427B50@code1.codespeak.net> Author: hpk Date: Mon Jun 13 13:36:17 2005 New Revision: 13342 Modified: py/dist/py/misc/log.py py/dist/py/misc/log_support.py py/dist/py/misc/testing/test_log.py Log: - quick streamlining of py/misc/log.py (no tests broke :-) - don't create a log.out file in the current dir but in a proper tempdir Modified: py/dist/py/misc/log.py ============================================================================== --- py/dist/py/misc/log.py (original) +++ py/dist/py/misc/log.py Mon Jun 13 13:36:17 2005 @@ -38,26 +38,6 @@ def __repr__(self): return "" % ":".join(self.keywords) - def debug(self, *args): - producer = LogProducer(self.keywords + ('debug',)) - producer(*args) - - def info(self, *args): - producer = LogProducer(self.keywords + ('info',)) - producer(*args) - - def warn(self, *args): - producer = LogProducer(self.keywords + ('warn',)) - producer(*args) - - def error(self, *args): - producer = LogProducer(self.keywords + ('error',)) - producer(*args) - - def critical(self, *args): - producer = LogProducer(self.keywords + ('critical',)) - producer(*args) - def set_logger(self, name, func): assert callable(func) keywords = self.keywords + (name,) @@ -98,4 +78,4 @@ critical = producer.critical set_logger = producer.set_logger _getstate_ = producer._getstate_ -_setstate_ = producer._setstate_ \ No newline at end of file +_setstate_ = producer._setstate_ Modified: py/dist/py/misc/log_support.py ============================================================================== --- py/dist/py/misc/log_support.py (original) +++ py/dist/py/misc/log_support.py Mon Jun 13 13:36:17 2005 @@ -9,11 +9,8 @@ def __init__(self): self.formatter = logging.Formatter('%(message)s') - def FileLogger(self, filename=None, mode='a'): - if filename is None: - filename = 'log.out' - else: - filename = str(filename) + def FileLogger(self, filename, mode='a'): + filename = str(filename) file_handler = logging.FileHandler(filename, mode) file_handler.setFormatter(self.formatter) logger_name = "py.log.file.%s" % os.path.basename(filename) Modified: py/dist/py/misc/testing/test_log.py ============================================================================== --- py/dist/py/misc/testing/test_log.py (original) +++ py/dist/py/misc/testing/test_log.py Mon Jun 13 13:36:17 2005 @@ -1,6 +1,9 @@ import py import sys +def setup_module(mod): + mod.tempdir = py.test.ensuretemp("py.log-test") + class TestLogProducer: def setup_method(self, meth): self.state = py.log.getstate() @@ -101,7 +104,8 @@ def test_default_log_file(self): # Start of the 'consumer' code - py.log.set_logger("default", py.log.FileLogger()) + logfilefn = tempdir.join('log.out') + py.log.set_logger("default", py.log.FileLogger(logfilefn)) py.log.debug("hello world #1") py.log.info("hello world #2") py.log.warn("hello world #3") @@ -109,16 +113,14 @@ py.log.critical("hello world #5") # End of the 'consumer' code - lines = open('log.out').readlines() + lines = logfilefn.readlines() last_5_lines = lines[-5:] assert last_5_lines == ['[debug] hello world #1\n', '[info] hello world #2\n', '[warn] hello world #3\n', '[error] hello world #4\n', '[critical] hello world #5\n'] def test_custom_log_file(self): - dir = py.test.ensuretemp("logtest") - p = dir.join('log2.out') - custom_log = str(p) + custom_log = tempdir.join('log2.out') # Start of the 'consumer' code py.log.set_logger("default", py.log.FileLogger(custom_log)) @@ -129,9 +131,9 @@ py.log.critical("hello world #5") # End of the 'consumer' code - lines = open(custom_log).readlines() + lines = custom_log.readlines() last_5_lines = lines[-5:] assert last_5_lines == ['[debug] hello world #1\n', '[info] hello world #2\n', '[warn] hello world #3\n', '[error] hello world #4\n', '[critical] hello world #5\n'] - \ No newline at end of file + From hpk at codespeak.net Mon Jun 13 16:37:43 2005 From: hpk at codespeak.net (hpk at codespeak.net) Date: Mon, 13 Jun 2005 16:37:43 +0200 (CEST) Subject: [py-svn] r13350 - py/dist/py/compat Message-ID: <20050613143743.C179927B7C@code1.codespeak.net> Author: hpk Date: Mon Jun 13 16:37:43 2005 New Revision: 13350 Added: py/dist/py/compat/__init__.py Log: add missing __init__.py file Added: py/dist/py/compat/__init__.py ============================================================================== --- (empty file) +++ py/dist/py/compat/__init__.py Mon Jun 13 16:37:43 2005 @@ -0,0 +1 @@ +# From ggheo at codespeak.net Mon Jun 13 19:56:47 2005 From: ggheo at codespeak.net (ggheo at codespeak.net) Date: Mon, 13 Jun 2005 19:56:47 +0200 (CEST) Subject: [py-svn] r13360 - in py/dist/py/misc: . testing Message-ID: <20050613175647.14C5B27B84@code1.codespeak.net> Author: ggheo Date: Mon Jun 13 19:56:46 2005 New Revision: 13360 Modified: py/dist/py/misc/log_support.py py/dist/py/misc/testing/test_log.py Log: Changed logging back-end logic: only add new logging handlers when a new file is specified (otherwise the same file can end up belonging to multiple handlers and messages can get duplicated). Added more tests to test_log.py. Modified: py/dist/py/misc/log_support.py ============================================================================== --- py/dist/py/misc/log_support.py (original) +++ py/dist/py/misc/log_support.py Mon Jun 13 19:56:46 2005 @@ -6,37 +6,45 @@ a 'producer' object and displays them using various logging mechanisms (stdout, stderr, files, syslog, etc.)""" + _handlers = {} + def __init__(self): self.formatter = logging.Formatter('%(message)s') def FileLogger(self, filename, mode='a'): - filename = str(filename) - file_handler = logging.FileHandler(filename, mode) - file_handler.setFormatter(self.formatter) - logger_name = "py.log.file.%s" % os.path.basename(filename) + filename = str(filename) + logger_name = "py.log.file.%s" % filename logger_file = logging.getLogger(logger_name) - logger_file.addHandler(file_handler) logger_file.setLevel(logging.DEBUG) + if not self._handlers.has_key(filename): + file_handler = logging.FileHandler(filename, mode) + file_handler.setFormatter(self.formatter) + logger_file.addHandler(file_handler) + self._handlers[filename] = file_handler def file_tracer(message): self.log_message(logger_file, message) return file_tracer def StdoutLogger(self): logger_stdout = logging.getLogger('py.log.stdout') - stdout_handler = logging.StreamHandler(sys.stdout) - stdout_handler.setFormatter(self.formatter) - logger_stdout.addHandler(stdout_handler) logger_stdout.setLevel(logging.DEBUG) + if not self._handlers.has_key('stdout'): + stdout_handler = logging.StreamHandler(sys.stdout) + stdout_handler.setFormatter(self.formatter) + logger_stdout.addHandler(stdout_handler) + self._handlers['stdout'] = stdout_handler def stdout_tracer(message): self.log_message(logger_stdout, message) return stdout_tracer def StderrLogger(self): logger_stderr = logging.getLogger('py.log.stderr') - stderr_handler = logging.StreamHandler(sys.stderr) - stderr_handler.setFormatter(self.formatter) - logger_stderr.addHandler(stderr_handler) logger_stderr.setLevel(logging.DEBUG) + if not self._handlers.has_key('stderr'): + stderr_handler = logging.StreamHandler(sys.stderr) + stderr_handler.setFormatter(self.formatter) + logger_stderr.addHandler(stderr_handler) + self._handlers['stderr'] = stderr_handler def stderr_tracer(message): self.log_message(logger_stderr, message) return stderr_tracer Modified: py/dist/py/misc/testing/test_log.py ============================================================================== --- py/dist/py/misc/testing/test_log.py (original) +++ py/dist/py/misc/testing/test_log.py Mon Jun 13 19:56:46 2005 @@ -57,8 +57,7 @@ def test_log_stdout(self): # We redirect stdout so that we can verify that # the log messages have been printed to it - dir = py.test.ensuretemp("logtest") - p = dir.join('py_stdout.out') + p = tempdir.join('py_stdout.out') redirect = str(p) sys.saved = sys.stdout sys.stdout = open(redirect, 'w') @@ -81,8 +80,7 @@ def test_log_stderr(self): # We redirect stderr so that we can verify that # the log messages have been printed to it - dir = py.test.ensuretemp("logtest") - p = dir.join('py_stderr.out') + p = tempdir.join('py_stderr.out') redirect = str(p) sys.saved = sys.stderr sys.stderr = open(redirect, 'w') @@ -114,8 +112,7 @@ # End of the 'consumer' code lines = logfilefn.readlines() - last_5_lines = lines[-5:] - assert last_5_lines == ['[debug] hello world #1\n', '[info] hello world #2\n', + assert lines == ['[debug] hello world #1\n', '[info] hello world #2\n', '[warn] hello world #3\n', '[error] hello world #4\n', '[critical] hello world #5\n'] @@ -132,8 +129,91 @@ # End of the 'consumer' code lines = custom_log.readlines() - last_5_lines = lines[-5:] - assert last_5_lines == ['[debug] hello world #1\n', '[info] hello world #2\n', + assert lines == ['[debug] hello world #1\n', '[info] hello world #2\n', + '[warn] hello world #3\n', '[error] hello world #4\n', + '[critical] hello world #5\n'] + + def test_log_file_append_mode(self): + logfilefn = tempdir.join('log_append.out') + + # The append mode is on by default, so we don't need to specify it for FileLogger + py.log.set_logger("default", py.log.FileLogger(logfilefn)) + py.log.debug("hello world #1") + py.log.info("hello world #2") + py.log.warn("hello world #3") + py.log.error("hello world #4") + py.log.critical("hello world #5") + + lines = logfilefn.readlines() + assert lines == ['[debug] hello world #1\n', '[info] hello world #2\n', '[warn] hello world #3\n', '[error] hello world #4\n', '[critical] hello world #5\n'] + # We log 5 more lines that should be appended to the log + py.log.set_logger("default", py.log.FileLogger(logfilefn)) + py.log.debug("hello world #6") + py.log.info("hello world #7") + py.log.warn("hello world #8") + py.log.error("hello world #9") + py.log.critical("hello world #10") + + lines = logfilefn.readlines() + assert lines == ['[debug] hello world #1\n', '[info] hello world #2\n', + '[warn] hello world #3\n', '[error] hello world #4\n', + '[critical] hello world #5\n', + '[debug] hello world #6\n', '[info] hello world #7\n', + '[warn] hello world #8\n', '[error] hello world #9\n', + '[critical] hello world #10\n'] + + + def test_log_file_write_mode(self): + logfilefn = tempdir.join('log_write.out') + logfilefn.write("This line should be zapped when we start logging\n") + + # We specify mode='w' for the FileLogger + py.log.set_logger("default", py.log.FileLogger(logfilefn, mode='w')) + py.log.debug("hello world #1") + py.log.info("hello world #2") + py.log.warn("hello world #3") + py.log.error("hello world #4") + py.log.critical("hello world #5") + + lines = logfilefn.readlines() + assert lines == ['[debug] hello world #1\n', '[info] hello world #2\n', + '[warn] hello world #3\n', '[error] hello world #4\n', + '[critical] hello world #5\n'] + + + def test_keyword_based_log_files(self): + logfiledebug = tempdir.join('log_debug.out') + logfileinfo = tempdir.join('log_info.out') + logfilewarn = tempdir.join('log_warn.out') + logfileerror = tempdir.join('log_error.out') + logfilecritical = tempdir.join('log_critical.out') + + py.log.set_logger("debug", py.log.FileLogger(logfiledebug)) + py.log.set_logger("info", py.log.FileLogger(logfileinfo)) + py.log.set_logger("warn", py.log.FileLogger(logfilewarn)) + py.log.set_logger("error", py.log.FileLogger(logfileerror)) + py.log.set_logger("critical", py.log.FileLogger(logfilecritical)) + + py.log.debug("hello world #1") + py.log.info("hello world #2") + py.log.warn("hello world #3") + py.log.error("hello world #4") + py.log.critical("hello world #5") + + lines = logfiledebug.readlines() + assert lines == ['[debug] hello world #1\n'] + + lines = logfileinfo.readlines() + assert lines == ['[info] hello world #2\n'] + + lines = logfilewarn.readlines() + assert lines == ['[warn] hello world #3\n'] + + lines = logfileerror.readlines() + assert lines == ['[error] hello world #4\n'] + + lines = logfilecritical.readlines() + assert lines == ['[critical] hello world #5\n'] From ggheo at codespeak.net Tue Jun 14 04:22:14 2005 From: ggheo at codespeak.net (ggheo at codespeak.net) Date: Tue, 14 Jun 2005 04:22:14 +0200 (CEST) Subject: [py-svn] r13370 - in py/dist/py: . misc misc/testing Message-ID: <20050614022214.0342E27B72@code1.codespeak.net> Author: ggheo Date: Tue Jun 14 04:22:14 2005 New Revision: 13370 Modified: py/dist/py/__init__.py py/dist/py/misc/log.py py/dist/py/misc/log_support.py py/dist/py/misc/testing/test_log.py Log: Added more loggers (some not tested yet); refactored log_support.py; added more tests to test_log.py. Modified: py/dist/py/__init__.py ============================================================================== --- py/dist/py/__init__.py (original) +++ py/dist/py/__init__.py Tue Jun 14 04:22:14 2005 @@ -111,4 +111,7 @@ 'log.FileLogger' :('./misc/log_support.py', 'FileLogger'), 'log.StdoutLogger' :('./misc/log_support.py', 'StdoutLogger'), 'log.StderrLogger' :('./misc/log_support.py', 'StderrLogger'), + 'log.EmailLogger' :('./misc/log_support.py', 'EmailLogger'), + 'log.SyslogLogger' :('./misc/log_support.py', 'SyslogLogger'), + 'log.WinEventLogger' :('./misc/log_support.py', 'WinEventLogger'), }) Modified: py/dist/py/misc/log.py ============================================================================== --- py/dist/py/misc/log.py (original) +++ py/dist/py/misc/log.py Tue Jun 14 04:22:14 2005 @@ -40,9 +40,14 @@ def set_logger(self, name, func): assert callable(func) - keywords = self.keywords + (name,) - self._registry[keywords] = func - + keywords = self.keywords + (name,) + self._registry[keywords] = func + # if default logger is set, also reset the other ones + if keywords == ('default',): + for k in [('debug',), ('info',), ('warn',), + ('error',), ('critical',)]: + self._registry[k] = func + def __getattr__(self, name): if name[0] == '_': raise AttributeError, name Modified: py/dist/py/misc/log_support.py ============================================================================== --- py/dist/py/misc/log_support.py (original) +++ py/dist/py/misc/log_support.py Tue Jun 14 04:22:14 2005 @@ -1,5 +1,6 @@ import py import os, sys, logging +import logging.handlers class LogConsumer(object): """Log "consumer" API which receives messages from @@ -14,41 +15,73 @@ def FileLogger(self, filename, mode='a'): filename = str(filename) logger_name = "py.log.file.%s" % filename - logger_file = logging.getLogger(logger_name) - logger_file.setLevel(logging.DEBUG) - if not self._handlers.has_key(filename): - file_handler = logging.FileHandler(filename, mode) - file_handler.setFormatter(self.formatter) - logger_file.addHandler(file_handler) - self._handlers[filename] = file_handler - def file_tracer(message): - self.log_message(logger_file, message) - return file_tracer - - def StdoutLogger(self): - logger_stdout = logging.getLogger('py.log.stdout') - logger_stdout.setLevel(logging.DEBUG) - if not self._handlers.has_key('stdout'): - stdout_handler = logging.StreamHandler(sys.stdout) - stdout_handler.setFormatter(self.formatter) - logger_stdout.addHandler(stdout_handler) - self._handlers['stdout'] = stdout_handler - def stdout_tracer(message): - self.log_message(logger_stdout, message) - return stdout_tracer + handler_type = logging.FileHandler + handler_args = {'filename': filename, + 'mode': mode, + } + return self._logger_func(logger_name, handler_type, **handler_args) + + def StdoutLogger(self): + # Add str(sys.stdout) to logger name because sys.stdout might be redirected + # to a file, and in this case we need to distinguish between files + logger_name = 'py.log.stdout.%s' % str(sys.stdout) + handler_type = logging.StreamHandler + handler_args = {'strm': sys.stdout, + } + return self._logger_func(logger_name, handler_type, **handler_args) def StderrLogger(self): - logger_stderr = logging.getLogger('py.log.stderr') - logger_stderr.setLevel(logging.DEBUG) - if not self._handlers.has_key('stderr'): - stderr_handler = logging.StreamHandler(sys.stderr) - stderr_handler.setFormatter(self.formatter) - logger_stderr.addHandler(stderr_handler) - self._handlers['stderr'] = stderr_handler - def stderr_tracer(message): - self.log_message(logger_stderr, message) - return stderr_tracer - + # Add str(sys.stderr) to logger name because sys.stderr might be redirected + # to a file, and in this case we need to distinguish between files + logger_name = 'py.log.stderr.%s' % str(sys.stderr) + handler_type = logging.StreamHandler + handler_args = {'strm': sys.stderr, + } + return self._logger_func(logger_name, handler_type, **handler_args) + + def SyslogLogger(self, address=('localhost', 514), facility=1): + logger_name = 'py.log.syslog' + handler_type = logging.handlers.SysLogHandler + handler_args = {'address': address, + 'facility': facility, + } + return self._logger_func(logger_name, handler_type, **handler_args) + + def WinEventLogger(self, appname='pylib', logtype='Application'): + logger_name = 'py.log.winevent' + handler_type = logging.handlers.NTEventLogHandler + handler_args = {'appname': appname, + 'logtype': logtype, + } + return self._logger_func(logger_name, handler_type, **handler_args) + + def EmailLogger(self, mailhost, fromaddr, toaddrs, subject): + logger_name = 'py.log.email' + handler_type = logging.handlers.SMTPHandler + handler_args = {'mailhost': mailhost, + 'fromaddr': fromaddr, + 'toaddrs': toaddrs, + 'subject': subject, + } + return self._logger_func(logger_name, handler_type, **handler_args) + + def _logger_func(self, logger_name, handler_type, **handler_args): + logger = logging.getLogger(logger_name) + #print "got logger " + str(logger) + "for name " + logger_name + logger.setLevel(logging.DEBUG) + + # Add handler to logger only if it hasn't been already set for + # the same logger name + if not self._handlers.has_key(logger_name): + #print "adding handler for logger " + logger_name + handler = handler_type(**handler_args) + handler.setFormatter(self.formatter) + logger.addHandler(handler) + self._handlers[logger_name] = handler + def message_processing_func(message): + self.log_message(logger, message) + return message_processing_func + def log_message(self, logger, message): for keyword in message.keywords: if keyword.startswith('debug'): @@ -66,3 +99,6 @@ FileLogger = consumer.FileLogger StdoutLogger = consumer.StdoutLogger StderrLogger = consumer.StderrLogger +SyslogLogger = consumer.SyslogLogger +WinEventLogger = consumer.WinEventLogger +EmailLogger = consumer.EmailLogger \ No newline at end of file Modified: py/dist/py/misc/testing/test_log.py ============================================================================== --- py/dist/py/misc/testing/test_log.py (original) +++ py/dist/py/misc/testing/test_log.py Tue Jun 14 04:22:14 2005 @@ -57,7 +57,7 @@ def test_log_stdout(self): # We redirect stdout so that we can verify that # the log messages have been printed to it - p = tempdir.join('py_stdout.out') + p = tempdir.join('log_stdout.out') redirect = str(p) sys.saved = sys.stdout sys.stdout = open(redirect, 'w') @@ -80,7 +80,7 @@ def test_log_stderr(self): # We redirect stderr so that we can verify that # the log messages have been printed to it - p = tempdir.join('py_stderr.out') + p = tempdir.join('log_stderr.out') redirect = str(p) sys.saved = sys.stderr sys.stderr = open(redirect, 'w') @@ -99,25 +99,9 @@ assert lines == ['[debug] hello world #1\n', '[info] hello world #2\n', '[warn] hello world #3\n', '[error] hello world #4\n', '[critical] hello world #5\n'] - - def test_default_log_file(self): - # Start of the 'consumer' code - logfilefn = tempdir.join('log.out') - py.log.set_logger("default", py.log.FileLogger(logfilefn)) - py.log.debug("hello world #1") - py.log.info("hello world #2") - py.log.warn("hello world #3") - py.log.error("hello world #4") - py.log.critical("hello world #5") - # End of the 'consumer' code - - lines = logfilefn.readlines() - assert lines == ['[debug] hello world #1\n', '[info] hello world #2\n', - '[warn] hello world #3\n', '[error] hello world #4\n', - '[critical] hello world #5\n'] - def test_custom_log_file(self): - custom_log = tempdir.join('log2.out') + def test_log_file(self): + custom_log = tempdir.join('log.out') # Start of the 'consumer' code py.log.set_logger("default", py.log.FileLogger(custom_log)) @@ -217,3 +201,194 @@ lines = logfilecritical.readlines() assert lines == ['[critical] hello world #5\n'] + + def test_reassign_default_logger(self): + logfiledefault1 = tempdir.join('default_log1.out') + + # We set a file logger as the default logger + py.log.set_logger("default", py.log.FileLogger(logfiledefault1)) + py.log.debug("hello world #1") + py.log.info("hello world #2") + py.log.warn("hello world #3") + py.log.error("hello world #4") + py.log.critical("hello world #5") + + lines = logfiledefault1.readlines() + assert lines == ['[debug] hello world #1\n', '[info] hello world #2\n', + '[warn] hello world #3\n', '[error] hello world #4\n', + '[critical] hello world #5\n'] + + # We set a different file logger as the default logger and verify + # that the new one receives messages and the old one does not receive them anymore + logfiledefault2 = tempdir.join('default_log2.out') + + py.log.set_logger("default", py.log.FileLogger(logfiledefault2)) + py.log.debug("hello world #6") + py.log.info("hello world #7") + py.log.warn("hello world #8") + py.log.error("hello world #9") + py.log.critical("hello world #10") + + lines = logfiledefault1.readlines() + assert lines == ['[debug] hello world #1\n', '[info] hello world #2\n', + '[warn] hello world #3\n', '[error] hello world #4\n', + '[critical] hello world #5\n'] + + lines = logfiledefault2.readlines() + assert lines == ['[debug] hello world #6\n', '[info] hello world #7\n', + '[warn] hello world #8\n', '[error] hello world #9\n', + '[critical] hello world #10\n'] + + # We set stderr as the default logger and verify that messages go to stderr + # and not to the previous 2 file loggers + p = tempdir.join('log_stderr_default.out') + redirect = str(p) + saved = sys.stderr + sys.stderr = open(redirect, 'w') + + py.log.set_logger("default", py.log.StderrLogger()) + py.log.debug("hello world #11") + py.log.info("hello world #12") + py.log.warn("hello world #13") + py.log.error("hello world #14") + py.log.critical("hello world #15") + + sys.stderr = saved + lines = open(redirect).readlines() + assert lines == ['[debug] hello world #11\n', '[info] hello world #12\n', + '[warn] hello world #13\n', '[error] hello world #14\n', + '[critical] hello world #15\n'] + + lines = logfiledefault1.readlines() + assert lines == ['[debug] hello world #1\n', '[info] hello world #2\n', + '[warn] hello world #3\n', '[error] hello world #4\n', + '[critical] hello world #5\n'] + + lines = logfiledefault2.readlines() + assert lines == ['[debug] hello world #6\n', '[info] hello world #7\n', + '[warn] hello world #8\n', '[error] hello world #9\n', + '[critical] hello world #10\n'] + + def test_reassign_debug_logger(self): + logfiledefault = tempdir.join('default.out') + logfiledebug1 = tempdir.join('debug_log1.out') + + # We set a file logger as the default logger in non-append mode + py.log.set_logger("default", py.log.FileLogger(logfiledefault, mode='w')) + + # We set a file logger as the debug logger + py.log.set_logger("debug", py.log.FileLogger(logfiledebug1)) + py.log.debug("hello world #1") + py.log.info("hello world #2") + py.log.warn("hello world #3") + py.log.error("hello world #4") + py.log.critical("hello world #5") + + # The debug message should have gone to the debug file logger + lines = logfiledebug1.readlines() + assert lines == ['[debug] hello world #1\n'] + + # All other messages should have gone to the default file logger + lines = logfiledefault.readlines() + assert lines == ['[info] hello world #2\n', + '[warn] hello world #3\n', '[error] hello world #4\n', + '[critical] hello world #5\n'] + + # We set a different file logger as the debug logger and verify + # that the new one receives messages and the old one does not receive them anymore + logfiledebug2 = tempdir.join('debug_log2.out') + + py.log.set_logger("debug", py.log.FileLogger(logfiledebug2)) + py.log.debug("hello world #6") + py.log.info("hello world #7") + py.log.warn("hello world #8") + py.log.error("hello world #9") + py.log.critical("hello world #10") + + # The debug message should have gone to the new debug file logger + lines = logfiledebug2.readlines() + assert lines == ['[debug] hello world #6\n'] + + # All other messages should have gone to the default file logger + lines = logfiledefault.readlines() + assert lines == ['[info] hello world #2\n', + '[warn] hello world #3\n', '[error] hello world #4\n', + '[critical] hello world #5\n', + '[info] hello world #7\n', + '[warn] hello world #8\n', '[error] hello world #9\n', + '[critical] hello world #10\n'] + + # The old debug file logger should be unchanged + lines = logfiledebug1.readlines() + assert lines == ['[debug] hello world #1\n'] + + # We set stdout as the debug logger and verify that messages go to stdout + # and not to the previous 2 file loggers + p = tempdir.join('log_stdout_debug.out') + redirect = str(p) + saved = sys.stdout + sys.stdout = open(redirect, 'w') + + py.log.set_logger("debug", py.log.StdoutLogger()) + py.log.debug("hello world #11") + py.log.info("hello world #12") + py.log.warn("hello world #13") + py.log.error("hello world #14") + py.log.critical("hello world #15") + + sys.stdout = saved + # The debug message should have gone to stdout + lines = open(redirect).readlines() + assert lines == ['[debug] hello world #11\n'] + + # All other messages should have gone to the default file logger + lines = logfiledefault.readlines() + assert lines == ['[info] hello world #2\n', + '[warn] hello world #3\n', '[error] hello world #4\n', + '[critical] hello world #5\n', + '[info] hello world #7\n', + '[warn] hello world #8\n', '[error] hello world #9\n', + '[critical] hello world #10\n', + '[info] hello world #12\n', + '[warn] hello world #13\n', '[error] hello world #14\n', + '[critical] hello world #15\n'] + + # The 2 old debug file logger should be unchanged + lines = logfiledebug1.readlines() + assert lines == ['[debug] hello world #1\n'] + + lines = logfiledebug2.readlines() + assert lines == ['[debug] hello world #6\n'] + + # disabled for now; the syslog log file can usually be read only by root + # I manually inspected /var/log/messages and the entries were there + def no_test_log_syslog(self): + py.log.set_logger("default", py.log.SyslogLogger()) + py.log.debug("hello world #1") + py.log.info("hello world #2") + py.log.warn("hello world #3") + py.log.error("hello world #4") + py.log.critical("hello world #5") + + # disabled for now until I figure out how to read entries in the + # Event Logs on Windows + # I manually inspected the Application Log and the entries were there + def no_test_log_winevent(self): + py.log.set_logger("default", py.log.WinEventLogger()) + py.log.debug("hello world #1") + py.log.info("hello world #2") + py.log.warn("hello world #3") + py.log.error("hello world #4") + py.log.critical("hello world #5") + + # disabled for now until I figure out how to properly pass the parameters + def no_test_log_email(self): + py.log.set_logger("default", py.log.EmailLogger(mailhost="gheorghiu.net", + fromaddr="grig", + toaddrs="grig", + subject = "py.log email")) + py.log.debug("hello world #1") + py.log.info("hello world #2") + py.log.warn("hello world #3") + py.log.error("hello world #4") + py.log.critical("hello world #5") From dstanek at codespeak.net Tue Jun 14 11:14:55 2005 From: dstanek at codespeak.net (dstanek at codespeak.net) Date: Tue, 14 Jun 2005 11:14:55 +0200 (CEST) Subject: [py-svn] r13373 - in py/branch/dist-doctest/py: . compat compat/testing misc/testing Message-ID: <20050614091455.1380727B84@code1.codespeak.net> Author: dstanek Date: Tue Jun 14 11:14:48 2005 New Revision: 13373 Added: py/branch/dist-doctest/py/compat/ py/branch/dist-doctest/py/compat/__init__.py (contents, props changed) py/branch/dist-doctest/py/compat/conftest.py (contents, props changed) py/branch/dist-doctest/py/compat/doctest.py (contents, props changed) py/branch/dist-doctest/py/compat/optparse.py (contents, props changed) py/branch/dist-doctest/py/compat/testing/ py/branch/dist-doctest/py/compat/testing/test_doctest.py (contents, props changed) py/branch/dist-doctest/py/compat/testing/test_doctest.txt py/branch/dist-doctest/py/compat/testing/test_doctest2.py (contents, props changed) py/branch/dist-doctest/py/compat/testing/test_doctest2.txt py/branch/dist-doctest/py/compat/textwrap.py (contents, props changed) Modified: py/branch/dist-doctest/py/__init__.py py/branch/dist-doctest/py/initpkg.py py/branch/dist-doctest/py/misc/testing/test_initpkg.py Log: Added support for Python compatibility modules. Modified: py/branch/dist-doctest/py/__init__.py ============================================================================== --- py/branch/dist-doctest/py/__init__.py (original) +++ py/branch/dist-doctest/py/__init__.py Tue Jun 14 11:14:48 2005 @@ -99,9 +99,20 @@ 'xml.Namespace' : ('./xmlobj/xml.py', 'Namespace'), 'xml.escape' : ('./xmlobj/misc.py', 'escape'), - # trace - 'trace' : ('./misc/trace.py', 'trace'), + # logging API ('producers' and 'consumers') + 'log.debug' : ('./misc/log.py', 'debug'), + 'log.info' :('./misc/log.py', 'info'), + 'log.warn' :('./misc/log.py', 'warn'), + 'log.error' :('./misc/log.py', 'error'), + 'log.critical' :('./misc/log.py', 'critical'), + 'log.set_logger' :('./misc/log.py', 'set_logger'), + 'log.getstate' :('./misc/log.py', '_getstate_'), + 'log.setstate' :('./misc/log.py', '_setstate_'), + 'log.FileLogger' :('./misc/log_support.py', 'FileLogger'), + 'log.StdoutLogger' :('./misc/log_support.py', 'StdoutLogger'), + 'log.StderrLogger' :('./misc/log_support.py', 'StderrLogger'), - # log - 'log' : ('./misc/log.py', 'log'), + #'compat.doctest' :('./compat/doctest.py', None), + #'compat.optparse' :('./compat/optparse.py', None), + #'compat.textwrap' :('./compat/textwrap.py', None), }) Added: py/branch/dist-doctest/py/compat/__init__.py ============================================================================== --- (empty file) +++ py/branch/dist-doctest/py/compat/__init__.py Tue Jun 14 11:14:48 2005 @@ -0,0 +1 @@ +# Added: py/branch/dist-doctest/py/compat/conftest.py ============================================================================== --- (empty file) +++ py/branch/dist-doctest/py/compat/conftest.py Tue Jun 14 11:14:48 2005 @@ -0,0 +1,5 @@ +import py + +class Directory(py.test.collect.Directory): + def run(self): + py.test.skip("compat tests currently need to be run manually") Added: py/branch/dist-doctest/py/compat/doctest.py ============================================================================== --- (empty file) +++ py/branch/dist-doctest/py/compat/doctest.py Tue Jun 14 11:14:48 2005 @@ -0,0 +1,2665 @@ +# Module doctest. +# Released to the public domain 16-Jan-2001, by Tim Peters (tim at python.org). +# Major enhancements and refactoring by: +# Jim Fulton +# Edward Loper + +# Provided as-is; use at your own risk; no warranty; no promises; enjoy! + +r"""Module doctest -- a framework for running examples in docstrings. + +In simplest use, end each module M to be tested with: + +def _test(): + import doctest + doctest.testmod() + +if __name__ == "__main__": + _test() + +Then running the module as a script will cause the examples in the +docstrings to get executed and verified: + +python M.py + +This won't display anything unless an example fails, in which case the +failing example(s) and the cause(s) of the failure(s) are printed to stdout +(why not stderr? because stderr is a lame hack <0.2 wink>), and the final +line of output is "Test failed.". + +Run it with the -v switch instead: + +python M.py -v + +and a detailed report of all examples tried is printed to stdout, along +with assorted summaries at the end. + +You can force verbose mode by passing "verbose=True" to testmod, or prohibit +it by passing "verbose=False". In either of those cases, sys.argv is not +examined by testmod. + +There are a variety of other ways to run doctests, including integration +with the unittest framework, and support for running non-Python text +files containing doctests. There are also many ways to override parts +of doctest's default behaviors. See the Library Reference Manual for +details. +""" + +__docformat__ = 'reStructuredText en' + +__all__ = [ + # 0, Option Flags + 'register_optionflag', + 'DONT_ACCEPT_TRUE_FOR_1', + 'DONT_ACCEPT_BLANKLINE', + 'NORMALIZE_WHITESPACE', + 'ELLIPSIS', + 'IGNORE_EXCEPTION_DETAIL', + 'COMPARISON_FLAGS', + 'REPORT_UDIFF', + 'REPORT_CDIFF', + 'REPORT_NDIFF', + 'REPORT_ONLY_FIRST_FAILURE', + 'REPORTING_FLAGS', + # 1. Utility Functions + 'is_private', + # 2. Example & DocTest + 'Example', + 'DocTest', + # 3. Doctest Parser + 'DocTestParser', + # 4. Doctest Finder + 'DocTestFinder', + # 5. Doctest Runner + 'DocTestRunner', + 'OutputChecker', + 'DocTestFailure', + 'UnexpectedException', + 'DebugRunner', + # 6. Test Functions + 'testmod', + 'testfile', + 'run_docstring_examples', + # 7. Tester + 'Tester', + # 8. Unittest Support + 'DocTestSuite', + 'DocFileSuite', + 'set_unittest_reportflags', + # 9. Debugging Support + 'script_from_examples', + 'testsource', + 'debug_src', + 'debug', +] + +import __future__ + +import sys, traceback, inspect, linecache, os, re, types +import unittest, difflib, pdb, tempfile +import warnings +from StringIO import StringIO + +# Don't whine about the deprecated is_private function in this +# module's tests. +warnings.filterwarnings("ignore", "is_private", DeprecationWarning, + __name__, 0) + +# There are 4 basic classes: +# - Example: a pair, plus an intra-docstring line number. +# - DocTest: a collection of examples, parsed from a docstring, plus +# info about where the docstring came from (name, filename, lineno). +# - DocTestFinder: extracts DocTests from a given object's docstring and +# its contained objects' docstrings. +# - DocTestRunner: runs DocTest cases, and accumulates statistics. +# +# So the basic picture is: +# +# list of: +# +------+ +---------+ +-------+ +# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results| +# +------+ +---------+ +-------+ +# | Example | +# | ... | +# | Example | +# +---------+ + +# Option constants. + +OPTIONFLAGS_BY_NAME = {} +def register_optionflag(name): + flag = 1 << len(OPTIONFLAGS_BY_NAME) + OPTIONFLAGS_BY_NAME[name] = flag + return flag + +DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1') +DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE') +NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE') +ELLIPSIS = register_optionflag('ELLIPSIS') +IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL') + +COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 | + DONT_ACCEPT_BLANKLINE | + NORMALIZE_WHITESPACE | + ELLIPSIS | + IGNORE_EXCEPTION_DETAIL) + +REPORT_UDIFF = register_optionflag('REPORT_UDIFF') +REPORT_CDIFF = register_optionflag('REPORT_CDIFF') +REPORT_NDIFF = register_optionflag('REPORT_NDIFF') +REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE') + +REPORTING_FLAGS = (REPORT_UDIFF | + REPORT_CDIFF | + REPORT_NDIFF | + REPORT_ONLY_FIRST_FAILURE) + +# Special string markers for use in `want` strings: +BLANKLINE_MARKER = '' +ELLIPSIS_MARKER = '...' + +###################################################################### +## Table of Contents +###################################################################### +# 1. Utility Functions +# 2. Example & DocTest -- store test cases +# 3. DocTest Parser -- extracts examples from strings +# 4. DocTest Finder -- extracts test cases from objects +# 5. DocTest Runner -- runs test cases +# 6. Test Functions -- convenient wrappers for testing +# 7. Tester Class -- for backwards compatibility +# 8. Unittest Support +# 9. Debugging Support +# 10. Example Usage + +###################################################################### +## 1. Utility Functions +###################################################################### + +def is_private(prefix, base): + """prefix, base -> true iff name prefix + "." + base is "private". + + Prefix may be an empty string, and base does not contain a period. + Prefix is ignored (although functions you write conforming to this + protocol may make use of it). + Return true iff base begins with an (at least one) underscore, but + does not both begin and end with (at least) two underscores. + + >>> is_private("a.b", "my_func") + False + >>> is_private("____", "_my_func") + True + >>> is_private("someclass", "__init__") + False + >>> is_private("sometypo", "__init_") + True + >>> is_private("x.y.z", "_") + True + >>> is_private("_x.y.z", "__") + False + >>> is_private("", "") # senseless but consistent + False + """ + warnings.warn("is_private is deprecated; it wasn't useful; " + "examine DocTestFinder.find() lists instead", + DeprecationWarning, stacklevel=2) + return base[:1] == "_" and not base[:2] == "__" == base[-2:] + +def _extract_future_flags(globs): + """ + Return the compiler-flags associated with the future features that + have been imported into the given namespace (globs). + """ + flags = 0 + for fname in __future__.all_feature_names: + feature = globs.get(fname, None) + if feature is getattr(__future__, fname): + flags |= feature.compiler_flag + return flags + +def _normalize_module(module, depth=2): + """ + Return the module specified by `module`. In particular: + - If `module` is a module, then return module. + - If `module` is a string, then import and return the + module with that name. + - If `module` is None, then return the calling module. + The calling module is assumed to be the module of + the stack frame at the given depth in the call stack. + """ + if inspect.ismodule(module): + return module + elif isinstance(module, (str, unicode)): + return __import__(module, globals(), locals(), ["*"]) + elif module is None: + return sys.modules[sys._getframe(depth).f_globals['__name__']] + else: + raise TypeError("Expected a module, string, or None") + +def _indent(s, indent=4): + """ + Add the given number of space characters to the beginning every + non-blank line in `s`, and return the result. + """ + # This regexp matches the start of non-blank lines: + return re.sub('(?m)^(?!$)', indent*' ', s) + +def _exception_traceback(exc_info): + """ + Return a string containing a traceback message for the given + exc_info tuple (as returned by sys.exc_info()). + """ + # Get a traceback message. + excout = StringIO() + exc_type, exc_val, exc_tb = exc_info + traceback.print_exception(exc_type, exc_val, exc_tb, file=excout) + return excout.getvalue() + +# Override some StringIO methods. +class _SpoofOut(StringIO): + def getvalue(self): + result = StringIO.getvalue(self) + # If anything at all was written, make sure there's a trailing + # newline. There's no way for the expected output to indicate + # that a trailing newline is missing. + if result and not result.endswith("\n"): + result += "\n" + # Prevent softspace from screwing up the next test case, in + # case they used print with a trailing comma in an example. + if hasattr(self, "softspace"): + del self.softspace + return result + + def truncate(self, size=None): + StringIO.truncate(self, size) + if hasattr(self, "softspace"): + del self.softspace + +# Worst-case linear-time ellipsis matching. +def _ellipsis_match(want, got): + """ + Essentially the only subtle case: + >>> _ellipsis_match('aa...aa', 'aaa') + False + """ + if ELLIPSIS_MARKER not in want: + return want == got + + # Find "the real" strings. + ws = want.split(ELLIPSIS_MARKER) + assert len(ws) >= 2 + + # Deal with exact matches possibly needed at one or both ends. + startpos, endpos = 0, len(got) + w = ws[0] + if w: # starts with exact match + if got.startswith(w): + startpos = len(w) + del ws[0] + else: + return False + w = ws[-1] + if w: # ends with exact match + if got.endswith(w): + endpos -= len(w) + del ws[-1] + else: + return False + + if startpos > endpos: + # Exact end matches required more characters than we have, as in + # _ellipsis_match('aa...aa', 'aaa') + return False + + # For the rest, we only need to find the leftmost non-overlapping + # match for each piece. If there's no overall match that way alone, + # there's no overall match period. + for w in ws: + # w may be '' at times, if there are consecutive ellipses, or + # due to an ellipsis at the start or end of `want`. That's OK. + # Search for an empty string succeeds, and doesn't change startpos. + startpos = got.find(w, startpos, endpos) + if startpos < 0: + return False + startpos += len(w) + + return True + +def _comment_line(line): + "Return a commented form of the given line" + line = line.rstrip() + if line: + return '# '+line + else: + return '#' + +class _OutputRedirectingPdb(pdb.Pdb): + """ + A specialized version of the python debugger that redirects stdout + to a given stream when interacting with the user. Stdout is *not* + redirected when traced code is executed. + """ + def __init__(self, out): + self.__out = out + pdb.Pdb.__init__(self) + + def trace_dispatch(self, *args): + # Redirect stdout to the given stream. + save_stdout = sys.stdout + sys.stdout = self.__out + # Call Pdb's trace dispatch method. + try: + return pdb.Pdb.trace_dispatch(self, *args) + finally: + sys.stdout = save_stdout + +# [XX] Normalize with respect to os.path.pardir? +def _module_relative_path(module, path): + if not inspect.ismodule(module): + raise TypeError, 'Expected a module: %r' % module + if path.startswith('/'): + raise ValueError, 'Module-relative files may not have absolute paths' + + # Find the base directory for the path. + if hasattr(module, '__file__'): + # A normal module/package + basedir = os.path.split(module.__file__)[0] + elif module.__name__ == '__main__': + # An interactive session. + if len(sys.argv)>0 and sys.argv[0] != '': + basedir = os.path.split(sys.argv[0])[0] + else: + basedir = os.curdir + else: + # A module w/o __file__ (this includes builtins) + raise ValueError("Can't resolve paths relative to the module " + + module + " (it has no __file__)") + + # Combine the base directory and the path. + return os.path.join(basedir, *(path.split('/'))) + +###################################################################### +## 2. Example & DocTest +###################################################################### +## - An "example" is a pair, where "source" is a +## fragment of source code, and "want" is the expected output for +## "source." The Example class also includes information about +## where the example was extracted from. +## +## - A "doctest" is a collection of examples, typically extracted from +## a string (such as an object's docstring). The DocTest class also +## includes information about where the string was extracted from. + +class Example: + """ + A single doctest example, consisting of source code and expected + output. `Example` defines the following attributes: + + - source: A single Python statement, always ending with a newline. + The constructor adds a newline if needed. + + - want: The expected output from running the source code (either + from stdout, or a traceback in case of exception). `want` ends + with a newline unless it's empty, in which case it's an empty + string. The constructor adds a newline if needed. + + - exc_msg: The exception message generated by the example, if + the example is expected to generate an exception; or `None` if + it is not expected to generate an exception. This exception + message is compared against the return value of + `traceback.format_exception_only()`. `exc_msg` ends with a + newline unless it's `None`. The constructor adds a newline + if needed. + + - lineno: The line number within the DocTest string containing + this Example where the Example begins. This line number is + zero-based, with respect to the beginning of the DocTest. + + - indent: The example's indentation in the DocTest string. + I.e., the number of space characters that preceed the + example's first prompt. + + - options: A dictionary mapping from option flags to True or + False, which is used to override default options for this + example. Any option flags not contained in this dictionary + are left at their default value (as specified by the + DocTestRunner's optionflags). By default, no options are set. + """ + def __init__(self, source, want, exc_msg=None, lineno=0, indent=0, + options=None): + # Normalize inputs. + if not source.endswith('\n'): + source += '\n' + if want and not want.endswith('\n'): + want += '\n' + if exc_msg is not None and not exc_msg.endswith('\n'): + exc_msg += '\n' + # Store properties. + self.source = source + self.want = want + self.lineno = lineno + self.indent = indent + if options is None: options = {} + self.options = options + self.exc_msg = exc_msg + +class DocTest: + """ + A collection of doctest examples that should be run in a single + namespace. Each `DocTest` defines the following attributes: + + - examples: the list of examples. + + - globs: The namespace (aka globals) that the examples should + be run in. + + - name: A name identifying the DocTest (typically, the name of + the object whose docstring this DocTest was extracted from). + + - filename: The name of the file that this DocTest was extracted + from, or `None` if the filename is unknown. + + - lineno: The line number within filename where this DocTest + begins, or `None` if the line number is unavailable. This + line number is zero-based, with respect to the beginning of + the file. + + - docstring: The string that the examples were extracted from, + or `None` if the string is unavailable. + """ + def __init__(self, examples, globs, name, filename, lineno, docstring): + """ + Create a new DocTest containing the given examples. The + DocTest's globals are initialized with a copy of `globs`. + """ + assert not isinstance(examples, basestring), \ + "DocTest no longer accepts str; use DocTestParser instead" + self.examples = examples + self.docstring = docstring + self.globs = globs.copy() + self.name = name + self.filename = filename + self.lineno = lineno + + def __repr__(self): + if len(self.examples) == 0: + examples = 'no examples' + elif len(self.examples) == 1: + examples = '1 example' + else: + examples = '%d examples' % len(self.examples) + return ('' % + (self.name, self.filename, self.lineno, examples)) + + + # This lets us sort tests by name: + def __cmp__(self, other): + if not isinstance(other, DocTest): + return -1 + return cmp((self.name, self.filename, self.lineno, id(self)), + (other.name, other.filename, other.lineno, id(other))) + +###################################################################### +## 3. DocTestParser +###################################################################### + +class DocTestParser: + """ + A class used to parse strings containing doctest examples. + """ + # This regular expression is used to find doctest examples in a + # string. It defines three groups: `source` is the source code + # (including leading indentation and prompts); `indent` is the + # indentation of the first (PS1) line of the source code; and + # `want` is the expected output (including leading indentation). + _EXAMPLE_RE = re.compile(r''' + # Source consists of a PS1 line followed by zero or more PS2 lines. + (?P + (?:^(?P [ ]*) >>> .*) # PS1 line + (?:\n [ ]* \.\.\. .*)*) # PS2 lines + \n? + # Want consists of any non-blank lines that do not start with PS1. + (?P (?:(?![ ]*$) # Not a blank line + (?![ ]*>>>) # Not a line starting with PS1 + .*$\n? # But any other line + )*) + ''', re.MULTILINE | re.VERBOSE) + + # A regular expression for handling `want` strings that contain + # expected exceptions. It divides `want` into three pieces: + # - the traceback header line (`hdr`) + # - the traceback stack (`stack`) + # - the exception message (`msg`), as generated by + # traceback.format_exception_only() + # `msg` may have multiple lines. We assume/require that the + # exception message is the first non-indented line starting with a word + # character following the traceback header line. + _EXCEPTION_RE = re.compile(r""" + # Grab the traceback header. Different versions of Python have + # said different things on the first traceback line. + ^(?P Traceback\ \( + (?: most\ recent\ call\ last + | innermost\ last + ) \) : + ) + \s* $ # toss trailing whitespace on the header. + (?P .*?) # don't blink: absorb stuff until... + ^ (?P \w+ .*) # a line *starts* with alphanum. + """, re.VERBOSE | re.MULTILINE | re.DOTALL) + + # A callable returning a true value iff its argument is a blank line + # or contains a single comment. + _IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match + + def parse(self, string, name=''): + """ + Divide the given string into examples and intervening text, + and return them as a list of alternating Examples and strings. + Line numbers for the Examples are 0-based. The optional + argument `name` is a name identifying this string, and is only + used for error messages. + """ + string = string.expandtabs() + # If all lines begin with the same indentation, then strip it. + min_indent = self._min_indent(string) + if min_indent > 0: + string = '\n'.join([l[min_indent:] for l in string.split('\n')]) + + output = [] + charno, lineno = 0, 0 + # Find all doctest examples in the string: + for m in self._EXAMPLE_RE.finditer(string): + # Add the pre-example text to `output`. + output.append(string[charno:m.start()]) + # Update lineno (lines before this example) + lineno += string.count('\n', charno, m.start()) + # Extract info from the regexp match. + (source, options, want, exc_msg) = \ + self._parse_example(m, name, lineno) + # Create an Example, and add it to the list. + if not self._IS_BLANK_OR_COMMENT(source): + output.append( Example(source, want, exc_msg, + lineno=lineno, + indent=min_indent+len(m.group('indent')), + options=options) ) + # Update lineno (lines inside this example) + lineno += string.count('\n', m.start(), m.end()) + # Update charno. + charno = m.end() + # Add any remaining post-example text to `output`. + output.append(string[charno:]) + return output + + def get_doctest(self, string, globs, name, filename, lineno): + """ + Extract all doctest examples from the given string, and + collect them into a `DocTest` object. + + `globs`, `name`, `filename`, and `lineno` are attributes for + the new `DocTest` object. See the documentation for `DocTest` + for more information. + """ + return DocTest(self.get_examples(string, name), globs, + name, filename, lineno, string) + + def get_examples(self, string, name=''): + """ + Extract all doctest examples from the given string, and return + them as a list of `Example` objects. Line numbers are + 0-based, because it's most common in doctests that nothing + interesting appears on the same line as opening triple-quote, + and so the first interesting line is called \"line 1\" then. + + The optional argument `name` is a name identifying this + string, and is only used for error messages. + """ + return [x for x in self.parse(string, name) + if isinstance(x, Example)] + + def _parse_example(self, m, name, lineno): + """ + Given a regular expression match from `_EXAMPLE_RE` (`m`), + return a pair `(source, want)`, where `source` is the matched + example's source code (with prompts and indentation stripped); + and `want` is the example's expected output (with indentation + stripped). + + `name` is the string's name, and `lineno` is the line number + where the example starts; both are used for error messages. + """ + # Get the example's indentation level. + indent = len(m.group('indent')) + + # Divide source into lines; check that they're properly + # indented; and then strip their indentation & prompts. + source_lines = m.group('source').split('\n') + self._check_prompt_blank(source_lines, indent, name, lineno) + self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno) + source = '\n'.join([sl[indent+4:] for sl in source_lines]) + + # Divide want into lines; check that it's properly indented; and + # then strip the indentation. Spaces before the last newline should + # be preserved, so plain rstrip() isn't good enough. + want = m.group('want') + want_lines = want.split('\n') + if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]): + del want_lines[-1] # forget final newline & spaces after it + self._check_prefix(want_lines, ' '*indent, name, + lineno + len(source_lines)) + want = '\n'.join([wl[indent:] for wl in want_lines]) + + # If `want` contains a traceback message, then extract it. + m = self._EXCEPTION_RE.match(want) + if m: + exc_msg = m.group('msg') + else: + exc_msg = None + + # Extract options from the source. + options = self._find_options(source, name, lineno) + + return source, options, want, exc_msg + + # This regular expression looks for option directives in the + # source code of an example. Option directives are comments + # starting with "doctest:". Warning: this may give false + # positives for string-literals that contain the string + # "#doctest:". Eliminating these false positives would require + # actually parsing the string; but we limit them by ignoring any + # line containing "#doctest:" that is *followed* by a quote mark. + _OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$', + re.MULTILINE) + + def _find_options(self, source, name, lineno): + """ + Return a dictionary containing option overrides extracted from + option directives in the given source string. + + `name` is the string's name, and `lineno` is the line number + where the example starts; both are used for error messages. + """ + options = {} + # (note: with the current regexp, this will match at most once:) + for m in self._OPTION_DIRECTIVE_RE.finditer(source): + option_strings = m.group(1).replace(',', ' ').split() + for option in option_strings: + if (option[0] not in '+-' or + option[1:] not in OPTIONFLAGS_BY_NAME): + raise ValueError('line %r of the doctest for %s ' + 'has an invalid option: %r' % + (lineno+1, name, option)) + flag = OPTIONFLAGS_BY_NAME[option[1:]] + options[flag] = (option[0] == '+') + if options and self._IS_BLANK_OR_COMMENT(source): + raise ValueError('line %r of the doctest for %s has an option ' + 'directive on a line with no example: %r' % + (lineno, name, source)) + return options + + # This regular expression finds the indentation of every non-blank + # line in a string. + _INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE) + + def _min_indent(self, s): + "Return the minimum indentation of any non-blank line in `s`" + indents = [len(indent) for indent in self._INDENT_RE.findall(s)] + if len(indents) > 0: + return min(indents) + else: + return 0 + + def _check_prompt_blank(self, lines, indent, name, lineno): + """ + Given the lines of a source string (including prompts and + leading indentation), check to make sure that every prompt is + followed by a space character. If any line is not followed by + a space character, then raise ValueError. + """ + for i, line in enumerate(lines): + if len(line) >= indent+4 and line[indent+3] != ' ': + raise ValueError('line %r of the docstring for %s ' + 'lacks blank after %s: %r' % + (lineno+i+1, name, + line[indent:indent+3], line)) + + def _check_prefix(self, lines, prefix, name, lineno): + """ + Check that every line in the given list starts with the given + prefix; if any line does not, then raise a ValueError. + """ + for i, line in enumerate(lines): + if line and not line.startswith(prefix): + raise ValueError('line %r of the docstring for %s has ' + 'inconsistent leading whitespace: %r' % + (lineno+i+1, name, line)) + + +###################################################################### +## 4. DocTest Finder +###################################################################### + +class DocTestFinder: + """ + A class used to extract the DocTests that are relevant to a given + object, from its docstring and the docstrings of its contained + objects. Doctests can currently be extracted from the following + object types: modules, functions, classes, methods, staticmethods, + classmethods, and properties. + """ + + def __init__(self, verbose=False, parser=DocTestParser(), + recurse=True, _namefilter=None, exclude_empty=True): + """ + Create a new doctest finder. + + The optional argument `parser` specifies a class or + function that should be used to create new DocTest objects (or + objects that implement the same interface as DocTest). The + signature for this factory function should match the signature + of the DocTest constructor. + + If the optional argument `recurse` is false, then `find` will + only examine the given object, and not any contained objects. + + If the optional argument `exclude_empty` is false, then `find` + will include tests for objects with empty docstrings. + """ + self._parser = parser + self._verbose = verbose + self._recurse = recurse + self._exclude_empty = exclude_empty + # _namefilter is undocumented, and exists only for temporary backward- + # compatibility support of testmod's deprecated isprivate mess. + self._namefilter = _namefilter + + def find(self, obj, name=None, module=None, globs=None, + extraglobs=None): + """ + Return a list of the DocTests that are defined by the given + object's docstring, or by any of its contained objects' + docstrings. + + The optional parameter `module` is the module that contains + the given object. If the module is not specified or is None, then + the test finder will attempt to automatically determine the + correct module. The object's module is used: + + - As a default namespace, if `globs` is not specified. + - To prevent the DocTestFinder from extracting DocTests + from objects that are imported from other modules. + - To find the name of the file containing the object. + - To help find the line number of the object within its + file. + + Contained objects whose module does not match `module` are ignored. + + If `module` is False, no attempt to find the module will be made. + This is obscure, of use mostly in tests: if `module` is False, or + is None but cannot be found automatically, then all objects are + considered to belong to the (non-existent) module, so all contained + objects will (recursively) be searched for doctests. + + The globals for each DocTest is formed by combining `globs` + and `extraglobs` (bindings in `extraglobs` override bindings + in `globs`). A new copy of the globals dictionary is created + for each DocTest. If `globs` is not specified, then it + defaults to the module's `__dict__`, if specified, or {} + otherwise. If `extraglobs` is not specified, then it defaults + to {}. + + """ + # If name was not specified, then extract it from the object. + if name is None: + name = getattr(obj, '__name__', None) + if name is None: + raise ValueError("DocTestFinder.find: name must be given " + "when obj.__name__ doesn't exist: %r" % + (type(obj),)) + + # Find the module that contains the given object (if obj is + # a module, then module=obj.). Note: this may fail, in which + # case module will be None. + if module is False: + module = None + elif module is None: + module = inspect.getmodule(obj) + + # Read the module's source code. This is used by + # DocTestFinder._find_lineno to find the line number for a + # given object's docstring. + try: + file = inspect.getsourcefile(obj) or inspect.getfile(obj) + source_lines = linecache.getlines(file) + if not source_lines: + source_lines = None + except TypeError: + source_lines = None + + # Initialize globals, and merge in extraglobs. + if globs is None: + if module is None: + globs = {} + else: + globs = module.__dict__.copy() + else: + globs = globs.copy() + if extraglobs is not None: + globs.update(extraglobs) + + # Recursively expore `obj`, extracting DocTests. + tests = [] + self._find(tests, obj, name, module, source_lines, globs, {}) + return tests + + def _filter(self, obj, prefix, base): + """ + Return true if the given object should not be examined. + """ + return (self._namefilter is not None and + self._namefilter(prefix, base)) + + def _from_module(self, module, object): + """ + Return true if the given object is defined in the given + module. + """ + if module is None: + return True + elif inspect.isfunction(object): + return module.__dict__ is object.func_globals + elif inspect.isclass(object): + return module.__name__ == object.__module__ + elif inspect.getmodule(object) is not None: + return module is inspect.getmodule(object) + elif hasattr(object, '__module__'): + return module.__name__ == object.__module__ + elif isinstance(object, property): + return True # [XX] no way not be sure. + else: + raise ValueError("object must be a class or function") + + def _find(self, tests, obj, name, module, source_lines, globs, seen): + """ + Find tests for the given object and any contained objects, and + add them to `tests`. + """ + if self._verbose: + print 'Finding tests in %s' % name + + # If we've already processed this object, then ignore it. + if id(obj) in seen: + return + seen[id(obj)] = 1 + + # Find a test for this object, and add it to the list of tests. + test = self._get_test(obj, name, module, globs, source_lines) + if test is not None: + tests.append(test) + + # Look for tests in a module's contained objects. + if inspect.ismodule(obj) and self._recurse: + for valname, val in obj.__dict__.items(): + # Check if this contained object should be ignored. + if self._filter(val, name, valname): + continue + valname = '%s.%s' % (name, valname) + # Recurse to functions & classes. + if ((inspect.isfunction(val) or inspect.isclass(val)) and + self._from_module(module, val)): + self._find(tests, val, valname, module, source_lines, + globs, seen) + + # Look for tests in a module's __test__ dictionary. + if inspect.ismodule(obj) and self._recurse: + for valname, val in getattr(obj, '__test__', {}).items(): + if not isinstance(valname, basestring): + raise ValueError("DocTestFinder.find: __test__ keys " + "must be strings: %r" % + (type(valname),)) + if not (inspect.isfunction(val) or inspect.isclass(val) or + inspect.ismethod(val) or inspect.ismodule(val) or + isinstance(val, basestring)): + raise ValueError("DocTestFinder.find: __test__ values " + "must be strings, functions, methods, " + "classes, or modules: %r" % + (type(val),)) + valname = '%s.__test__.%s' % (name, valname) + self._find(tests, val, valname, module, source_lines, + globs, seen) + + # Look for tests in a class's contained objects. + if inspect.isclass(obj) and self._recurse: + for valname, val in obj.__dict__.items(): + # Check if this contained object should be ignored. + if self._filter(val, name, valname): + continue + # Special handling for staticmethod/classmethod. + if isinstance(val, staticmethod): + val = getattr(obj, valname) + if isinstance(val, classmethod): + val = getattr(obj, valname).im_func + + # Recurse to methods, properties, and nested classes. + if ((inspect.isfunction(val) or inspect.isclass(val) or + isinstance(val, property)) and + self._from_module(module, val)): + valname = '%s.%s' % (name, valname) + self._find(tests, val, valname, module, source_lines, + globs, seen) + + def _get_test(self, obj, name, module, globs, source_lines): + """ + Return a DocTest for the given object, if it defines a docstring; + otherwise, return None. + """ + # Extract the object's docstring. If it doesn't have one, + # then return None (no test for this object). + if isinstance(obj, basestring): + docstring = obj + else: + try: + if obj.__doc__ is None: + docstring = '' + else: + docstring = obj.__doc__ + if not isinstance(docstring, basestring): + docstring = str(docstring) + except (TypeError, AttributeError): + docstring = '' + + # Find the docstring's location in the file. + lineno = self._find_lineno(obj, source_lines) + + # Don't bother if the docstring is empty. + if self._exclude_empty and not docstring: + return None + + # Return a DocTest for this object. + if module is None: + filename = None + else: + filename = getattr(module, '__file__', module.__name__) + if filename[-4:] in (".pyc", ".pyo"): + filename = filename[:-1] + return self._parser.get_doctest(docstring, globs, name, + filename, lineno) + + def _find_lineno(self, obj, source_lines): + """ + Return a line number of the given object's docstring. Note: + this method assumes that the object has a docstring. + """ + lineno = None + + # Find the line number for modules. + if inspect.ismodule(obj): + lineno = 0 + + # Find the line number for classes. + # Note: this could be fooled if a class is defined multiple + # times in a single file. + if inspect.isclass(obj): + if source_lines is None: + return None + pat = re.compile(r'^\s*class\s*%s\b' % + getattr(obj, '__name__', '-')) + for i, line in enumerate(source_lines): + if pat.match(line): + lineno = i + break + + # Find the line number for functions & methods. + if inspect.ismethod(obj): obj = obj.im_func + if inspect.isfunction(obj): obj = obj.func_code + if inspect.istraceback(obj): obj = obj.tb_frame + if inspect.isframe(obj): obj = obj.f_code + if inspect.iscode(obj): + lineno = getattr(obj, 'co_firstlineno', None)-1 + + # Find the line number where the docstring starts. Assume + # that it's the first line that begins with a quote mark. + # Note: this could be fooled by a multiline function + # signature, where a continuation line begins with a quote + # mark. + if lineno is not None: + if source_lines is None: + return lineno+1 + pat = re.compile('(^|.*:)\s*\w*("|\')') + for lineno in range(lineno, len(source_lines)): + if pat.match(source_lines[lineno]): + return lineno + + # We couldn't find the line number. + return None + +###################################################################### +## 5. DocTest Runner +###################################################################### + +class DocTestRunner: + """ + A class used to run DocTest test cases, and accumulate statistics. + The `run` method is used to process a single DocTest case. It + returns a tuple `(f, t)`, where `t` is the number of test cases + tried, and `f` is the number of test cases that failed. + + >>> tests = DocTestFinder().find(_TestClass) + >>> runner = DocTestRunner(verbose=False) + >>> for test in tests: + ... print runner.run(test) + (0, 2) + (0, 1) + (0, 2) + (0, 2) + + The `summarize` method prints a summary of all the test cases that + have been run by the runner, and returns an aggregated `(f, t)` + tuple: + + >>> runner.summarize(verbose=1) + 4 items passed all tests: + 2 tests in _TestClass + 2 tests in _TestClass.__init__ + 2 tests in _TestClass.get + 1 tests in _TestClass.square + 7 tests in 4 items. + 7 passed and 0 failed. + Test passed. + (0, 7) + + The aggregated number of tried examples and failed examples is + also available via the `tries` and `failures` attributes: + + >>> runner.tries + 7 + >>> runner.failures + 0 + + The comparison between expected outputs and actual outputs is done + by an `OutputChecker`. This comparison may be customized with a + number of option flags; see the documentation for `testmod` for + more information. If the option flags are insufficient, then the + comparison may also be customized by passing a subclass of + `OutputChecker` to the constructor. + + The test runner's display output can be controlled in two ways. + First, an output function (`out) can be passed to + `TestRunner.run`; this function will be called with strings that + should be displayed. It defaults to `sys.stdout.write`. If + capturing the output is not sufficient, then the display output + can be also customized by subclassing DocTestRunner, and + overriding the methods `report_start`, `report_success`, + `report_unexpected_exception`, and `report_failure`. + """ + # This divider string is used to separate failure messages, and to + # separate sections of the summary. + DIVIDER = "*" * 70 + + def __init__(self, checker=None, verbose=None, optionflags=0): + """ + Create a new test runner. + + Optional keyword arg `checker` is the `OutputChecker` that + should be used to compare the expected outputs and actual + outputs of doctest examples. + + Optional keyword arg 'verbose' prints lots of stuff if true, + only failures if false; by default, it's true iff '-v' is in + sys.argv. + + Optional argument `optionflags` can be used to control how the + test runner compares expected output to actual output, and how + it displays failures. See the documentation for `testmod` for + more information. + """ + self._checker = checker or OutputChecker() + if verbose is None: + verbose = '-v' in sys.argv + self._verbose = verbose + self.optionflags = optionflags + self.original_optionflags = optionflags + + # Keep track of the examples we've run. + self.tries = 0 + self.failures = 0 + self._name2ft = {} + + # Create a fake output target for capturing doctest output. + self._fakeout = _SpoofOut() + + #///////////////////////////////////////////////////////////////// + # Reporting methods + #///////////////////////////////////////////////////////////////// + + def report_start(self, out, test, example): + """ + Report that the test runner is about to process the given + example. (Only displays a message if verbose=True) + """ + if self._verbose: + if example.want: + out('Trying:\n' + _indent(example.source) + + 'Expecting:\n' + _indent(example.want)) + else: + out('Trying:\n' + _indent(example.source) + + 'Expecting nothing\n') + + def report_success(self, out, test, example, got): + """ + Report that the given example ran successfully. (Only + displays a message if verbose=True) + """ + if self._verbose: + out("ok\n") + + def report_failure(self, out, test, example, got): + """ + Report that the given example failed. + """ + out(self._failure_header(test, example) + + self._checker.output_difference(example, got, self.optionflags)) + + def report_unexpected_exception(self, out, test, example, exc_info): + """ + Report that the given example raised an unexpected exception. + """ + out(self._failure_header(test, example) + + 'Exception raised:\n' + _indent(_exception_traceback(exc_info))) + + def _failure_header(self, test, example): + out = [self.DIVIDER] + if test.filename: + if test.lineno is not None and example.lineno is not None: + lineno = test.lineno + example.lineno + 1 + else: + lineno = '?' + out.append('File "%s", line %s, in %s' % + (test.filename, lineno, test.name)) + else: + out.append('Line %s, in %s' % (example.lineno+1, test.name)) + out.append('Failed example:') + source = example.source + out.append(_indent(source)) + return '\n'.join(out) + + #///////////////////////////////////////////////////////////////// + # DocTest Running + #///////////////////////////////////////////////////////////////// + + def __run(self, test, compileflags, out): + """ + Run the examples in `test`. Write the outcome of each example + with one of the `DocTestRunner.report_*` methods, using the + writer function `out`. `compileflags` is the set of compiler + flags that should be used to execute examples. Return a tuple + `(f, t)`, where `t` is the number of examples tried, and `f` + is the number of examples that failed. The examples are run + in the namespace `test.globs`. + """ + # Keep track of the number of failures and tries. + failures = tries = 0 + + # Save the option flags (since option directives can be used + # to modify them). + original_optionflags = self.optionflags + + SUCCESS, FAILURE, BOOM = range(3) # `outcome` state + + check = self._checker.check_output + + # Process each example. + for examplenum, example in enumerate(test.examples): + + # If REPORT_ONLY_FIRST_FAILURE is set, then supress + # reporting after the first failure. + quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and + failures > 0) + + # Merge in the example's options. + self.optionflags = original_optionflags + if example.options: + for (optionflag, val) in example.options.items(): + if val: + self.optionflags |= optionflag + else: + self.optionflags &= ~optionflag + + # Record that we started this example. + tries += 1 + if not quiet: + self.report_start(out, test, example) + + # Use a special filename for compile(), so we can retrieve + # the source code during interactive debugging (see + # __patched_linecache_getlines). + filename = '' % (test.name, examplenum) + + # Run the example in the given context (globs), and record + # any exception that gets raised. (But don't intercept + # keyboard interrupts.) + try: + # Don't blink! This is where the user's code gets run. + exec compile(example.source, filename, "single", + compileflags, 1) in test.globs + self.debugger.set_continue() # ==== Example Finished ==== + exception = None + except KeyboardInterrupt: + raise + except: + exception = sys.exc_info() + self.debugger.set_continue() # ==== Example Finished ==== + + got = self._fakeout.getvalue() # the actual output + self._fakeout.truncate(0) + outcome = FAILURE # guilty until proved innocent or insane + + # If the example executed without raising any exceptions, + # verify its output. + if exception is None: + if check(example.want, got, self.optionflags): + outcome = SUCCESS + + # The example raised an exception: check if it was expected. + else: + exc_info = sys.exc_info() + exc_msg = traceback.format_exception_only(*exc_info[:2])[-1] + if not quiet: + got += _exception_traceback(exc_info) + + # If `example.exc_msg` is None, then we weren't expecting + # an exception. + if example.exc_msg is None: + outcome = BOOM + + # We expected an exception: see whether it matches. + elif check(example.exc_msg, exc_msg, self.optionflags): + outcome = SUCCESS + + # Another chance if they didn't care about the detail. + elif self.optionflags & IGNORE_EXCEPTION_DETAIL: + m1 = re.match(r'[^:]*:', example.exc_msg) + m2 = re.match(r'[^:]*:', exc_msg) + if m1 and m2 and check(m1.group(0), m2.group(0), + self.optionflags): + outcome = SUCCESS + + # Report the outcome. + if outcome is SUCCESS: + if not quiet: + self.report_success(out, test, example, got) + elif outcome is FAILURE: + if not quiet: + self.report_failure(out, test, example, got) + failures += 1 + elif outcome is BOOM: + if not quiet: + self.report_unexpected_exception(out, test, example, + exc_info) + failures += 1 + else: + assert False, ("unknown outcome", outcome) + + # Restore the option flags (in case they were modified) + self.optionflags = original_optionflags + + # Record and return the number of failures and tries. + self.__record_outcome(test, failures, tries) + return failures, tries + + def __record_outcome(self, test, f, t): + """ + Record the fact that the given DocTest (`test`) generated `f` + failures out of `t` tried examples. + """ + f2, t2 = self._name2ft.get(test.name, (0,0)) + self._name2ft[test.name] = (f+f2, t+t2) + self.failures += f + self.tries += t + + __LINECACHE_FILENAME_RE = re.compile(r'[\w\.]+)' + r'\[(?P\d+)\]>$') + def __patched_linecache_getlines(self, filename): + m = self.__LINECACHE_FILENAME_RE.match(filename) + if m and m.group('name') == self.test.name: + example = self.test.examples[int(m.group('examplenum'))] + return example.source.splitlines(True) + else: + return self.save_linecache_getlines(filename) + + def run(self, test, compileflags=None, out=None, clear_globs=True): + """ + Run the examples in `test`, and display the results using the + writer function `out`. + + The examples are run in the namespace `test.globs`. If + `clear_globs` is true (the default), then this namespace will + be cleared after the test runs, to help with garbage + collection. If you would like to examine the namespace after + the test completes, then use `clear_globs=False`. + + `compileflags` gives the set of flags that should be used by + the Python compiler when running the examples. If not + specified, then it will default to the set of future-import + flags that apply to `globs`. + + The output of each example is checked using + `DocTestRunner.check_output`, and the results are formatted by + the `DocTestRunner.report_*` methods. + """ + self.test = test + + if compileflags is None: + compileflags = _extract_future_flags(test.globs) + + save_stdout = sys.stdout + if out is None: + out = save_stdout.write + sys.stdout = self._fakeout + + # Patch pdb.set_trace to restore sys.stdout during interactive + # debugging (so it's not still redirected to self._fakeout). + # Note that the interactive output will go to *our* + # save_stdout, even if that's not the real sys.stdout; this + # allows us to write test cases for the set_trace behavior. + save_set_trace = pdb.set_trace + self.debugger = _OutputRedirectingPdb(save_stdout) + self.debugger.reset() + pdb.set_trace = self.debugger.set_trace + + # Patch linecache.getlines, so we can see the example's source + # when we're inside the debugger. + self.save_linecache_getlines = linecache.getlines + linecache.getlines = self.__patched_linecache_getlines + + try: + return self.__run(test, compileflags, out) + finally: + sys.stdout = save_stdout + pdb.set_trace = save_set_trace + linecache.getlines = self.save_linecache_getlines + if clear_globs: + test.globs.clear() + + #///////////////////////////////////////////////////////////////// + # Summarization + #///////////////////////////////////////////////////////////////// + def summarize(self, verbose=None): + """ + Print a summary of all the test cases that have been run by + this DocTestRunner, and return a tuple `(f, t)`, where `f` is + the total number of failed examples, and `t` is the total + number of tried examples. + + The optional `verbose` argument controls how detailed the + summary is. If the verbosity is not specified, then the + DocTestRunner's verbosity is used. + """ + if verbose is None: + verbose = self._verbose + notests = [] + passed = [] + failed = [] + totalt = totalf = 0 + for x in self._name2ft.items(): + name, (f, t) = x + assert f <= t + totalt += t + totalf += f + if t == 0: + notests.append(name) + elif f == 0: + passed.append( (name, t) ) + else: + failed.append(x) + if verbose: + if notests: + print len(notests), "items had no tests:" + notests.sort() + for thing in notests: + print " ", thing + if passed: + print len(passed), "items passed all tests:" + passed.sort() + for thing, count in passed: + print " %3d tests in %s" % (count, thing) + if failed: + print self.DIVIDER + print len(failed), "items had failures:" + failed.sort() + for thing, (f, t) in failed: + print " %3d of %3d in %s" % (f, t, thing) + if verbose: + print totalt, "tests in", len(self._name2ft), "items." + print totalt - totalf, "passed and", totalf, "failed." + if totalf: + print "***Test Failed***", totalf, "failures." + elif verbose: + print "Test passed." + return totalf, totalt + + #///////////////////////////////////////////////////////////////// + # Backward compatibility cruft to maintain doctest.master. + #///////////////////////////////////////////////////////////////// + def merge(self, other): + d = self._name2ft + for name, (f, t) in other._name2ft.items(): + if name in d: + print "*** DocTestRunner.merge: '" + name + "' in both" \ + " testers; summing outcomes." + f2, t2 = d[name] + f = f + f2 + t = t + t2 + d[name] = f, t + +class OutputChecker: + """ + A class used to check the whether the actual output from a doctest + example matches the expected output. `OutputChecker` defines two + methods: `check_output`, which compares a given pair of outputs, + and returns true if they match; and `output_difference`, which + returns a string describing the differences between two outputs. + """ + def check_output(self, want, got, optionflags): + """ + Return True iff the actual output from an example (`got`) + matches the expected output (`want`). These strings are + always considered to match if they are identical; but + depending on what option flags the test runner is using, + several non-exact match types are also possible. See the + documentation for `TestRunner` for more information about + option flags. + """ + # Handle the common case first, for efficiency: + # if they're string-identical, always return true. + if got == want: + return True + + # The values True and False replaced 1 and 0 as the return + # value for boolean comparisons in Python 2.3. + if not (optionflags & DONT_ACCEPT_TRUE_FOR_1): + if (got,want) == ("True\n", "1\n"): + return True + if (got,want) == ("False\n", "0\n"): + return True + + # can be used as a special sequence to signify a + # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used. + if not (optionflags & DONT_ACCEPT_BLANKLINE): + # Replace in want with a blank line. + want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER), + '', want) + # If a line in got contains only spaces, then remove the + # spaces. + got = re.sub('(?m)^\s*?$', '', got) + if got == want: + return True + + # This flag causes doctest to ignore any differences in the + # contents of whitespace strings. Note that this can be used + # in conjunction with the ELLIPSIS flag. + if optionflags & NORMALIZE_WHITESPACE: + got = ' '.join(got.split()) + want = ' '.join(want.split()) + if got == want: + return True + + # The ELLIPSIS flag says to let the sequence "..." in `want` + # match any substring in `got`. + if optionflags & ELLIPSIS: + if _ellipsis_match(want, got): + return True + + # We didn't find any match; return false. + return False + + # Should we do a fancy diff? + def _do_a_fancy_diff(self, want, got, optionflags): + # Not unless they asked for a fancy diff. + if not optionflags & (REPORT_UDIFF | + REPORT_CDIFF | + REPORT_NDIFF): + return False + + # If expected output uses ellipsis, a meaningful fancy diff is + # too hard ... or maybe not. In two real-life failures Tim saw, + # a diff was a major help anyway, so this is commented out. + # [todo] _ellipsis_match() knows which pieces do and don't match, + # and could be the basis for a kick-ass diff in this case. + ##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want: + ## return False + + # ndiff does intraline difference marking, so can be useful even + # for 1-line differences. + if optionflags & REPORT_NDIFF: + return True + + # The other diff types need at least a few lines to be helpful. + return want.count('\n') > 2 and got.count('\n') > 2 + + def output_difference(self, example, got, optionflags): + """ + Return a string describing the differences between the + expected output for a given example (`example`) and the actual + output (`got`). `optionflags` is the set of option flags used + to compare `want` and `got`. + """ + want = example.want + # If s are being used, then replace blank lines + # with in the actual output string. + if not (optionflags & DONT_ACCEPT_BLANKLINE): + got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got) + + # Check if we should use diff. + if self._do_a_fancy_diff(want, got, optionflags): + # Split want & got into lines. + want_lines = want.splitlines(True) # True == keep line ends + got_lines = got.splitlines(True) + # Use difflib to find their differences. + if optionflags & REPORT_UDIFF: + diff = difflib.unified_diff(want_lines, got_lines, n=2) + diff = list(diff)[2:] # strip the diff header + kind = 'unified diff with -expected +actual' + elif optionflags & REPORT_CDIFF: + diff = difflib.context_diff(want_lines, got_lines, n=2) + diff = list(diff)[2:] # strip the diff header + kind = 'context diff with expected followed by actual' + elif optionflags & REPORT_NDIFF: + engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK) + diff = list(engine.compare(want_lines, got_lines)) + kind = 'ndiff with -expected +actual' + else: + assert 0, 'Bad diff option' + # Remove trailing whitespace on diff output. + diff = [line.rstrip() + '\n' for line in diff] + return 'Differences (%s):\n' % kind + _indent(''.join(diff)) + + # If we're not using diff, then simply list the expected + # output followed by the actual output. + if want and got: + return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got)) + elif want: + return 'Expected:\n%sGot nothing\n' % _indent(want) + elif got: + return 'Expected nothing\nGot:\n%s' % _indent(got) + else: + return 'Expected nothing\nGot nothing\n' + +class DocTestFailure(Exception): + """A DocTest example has failed in debugging mode. + + The exception instance has variables: + + - test: the DocTest object being run + + - excample: the Example object that failed + + - got: the actual output + """ + def __init__(self, test, example, got): + self.test = test + self.example = example + self.got = got + + def __str__(self): + return str(self.test) + +class UnexpectedException(Exception): + """A DocTest example has encountered an unexpected exception + + The exception instance has variables: + + - test: the DocTest object being run + + - excample: the Example object that failed + + - exc_info: the exception info + """ + def __init__(self, test, example, exc_info): + self.test = test + self.example = example + self.exc_info = exc_info + + def __str__(self): + return str(self.test) + +class DebugRunner(DocTestRunner): + r"""Run doc tests but raise an exception as soon as there is a failure. + + If an unexpected exception occurs, an UnexpectedException is raised. + It contains the test, the example, and the original exception: + + >>> runner = DebugRunner(verbose=False) + >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42', + ... {}, 'foo', 'foo.py', 0) + >>> try: + ... runner.run(test) + ... except UnexpectedException, failure: + ... pass + + >>> failure.test is test + True + + >>> failure.example.want + '42\n' + + >>> exc_info = failure.exc_info + >>> raise exc_info[0], exc_info[1], exc_info[2] + Traceback (most recent call last): + ... + KeyError + + We wrap the original exception to give the calling application + access to the test and example information. + + If the output doesn't match, then a DocTestFailure is raised: + + >>> test = DocTestParser().get_doctest(''' + ... >>> x = 1 + ... >>> x + ... 2 + ... ''', {}, 'foo', 'foo.py', 0) + + >>> try: + ... runner.run(test) + ... except DocTestFailure, failure: + ... pass + + DocTestFailure objects provide access to the test: + + >>> failure.test is test + True + + As well as to the example: + + >>> failure.example.want + '2\n' + + and the actual output: + + >>> failure.got + '1\n' + + If a failure or error occurs, the globals are left intact: + + >>> del test.globs['__builtins__'] + >>> test.globs + {'x': 1} + + >>> test = DocTestParser().get_doctest(''' + ... >>> x = 2 + ... >>> raise KeyError + ... ''', {}, 'foo', 'foo.py', 0) + + >>> runner.run(test) + Traceback (most recent call last): + ... + UnexpectedException: + + >>> del test.globs['__builtins__'] + >>> test.globs + {'x': 2} + + But the globals are cleared if there is no error: + + >>> test = DocTestParser().get_doctest(''' + ... >>> x = 2 + ... ''', {}, 'foo', 'foo.py', 0) + + >>> runner.run(test) + (0, 1) + + >>> test.globs + {} + + """ + + def run(self, test, compileflags=None, out=None, clear_globs=True): + r = DocTestRunner.run(self, test, compileflags, out, False) + if clear_globs: + test.globs.clear() + return r + + def report_unexpected_exception(self, out, test, example, exc_info): + raise UnexpectedException(test, example, exc_info) + + def report_failure(self, out, test, example, got): + raise DocTestFailure(test, example, got) + +###################################################################### +## 6. Test Functions +###################################################################### +# These should be backwards compatible. + +# For backward compatibility, a global instance of a DocTestRunner +# class, updated by testmod. +master = None + +def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None, + report=True, optionflags=0, extraglobs=None, + raise_on_error=False, exclude_empty=False): + """m=None, name=None, globs=None, verbose=None, isprivate=None, + report=True, optionflags=0, extraglobs=None, raise_on_error=False, + exclude_empty=False + + Test examples in docstrings in functions and classes reachable + from module m (or the current module if m is not supplied), starting + with m.__doc__. Unless isprivate is specified, private names + are not skipped. + + Also test examples reachable from dict m.__test__ if it exists and is + not None. m.__test__ maps names to functions, classes and strings; + function and class docstrings are tested even if the name is private; + strings are tested directly, as if they were docstrings. + + Return (#failures, #tests). + + See doctest.__doc__ for an overview. + + Optional keyword arg "name" gives the name of the module; by default + use m.__name__. + + Optional keyword arg "globs" gives a dict to be used as the globals + when executing examples; by default, use m.__dict__. A copy of this + dict is actually used for each docstring, so that each docstring's + examples start with a clean slate. + + Optional keyword arg "extraglobs" gives a dictionary that should be + merged into the globals that are used to execute examples. By + default, no extra globals are used. This is new in 2.4. + + Optional keyword arg "verbose" prints lots of stuff if true, prints + only failures if false; by default, it's true iff "-v" is in sys.argv. + + Optional keyword arg "report" prints a summary at the end when true, + else prints nothing at the end. In verbose mode, the summary is + detailed, else very brief (in fact, empty if all tests passed). + + Optional keyword arg "optionflags" or's together module constants, + and defaults to 0. This is new in 2.3. Possible values (see the + docs for details): + + DONT_ACCEPT_TRUE_FOR_1 + DONT_ACCEPT_BLANKLINE + NORMALIZE_WHITESPACE + ELLIPSIS + IGNORE_EXCEPTION_DETAIL + REPORT_UDIFF + REPORT_CDIFF + REPORT_NDIFF + REPORT_ONLY_FIRST_FAILURE + + Optional keyword arg "raise_on_error" raises an exception on the + first unexpected exception or failure. This allows failures to be + post-mortem debugged. + + Deprecated in Python 2.4: + Optional keyword arg "isprivate" specifies a function used to + determine whether a name is private. The default function is + treat all functions as public. Optionally, "isprivate" can be + set to doctest.is_private to skip over functions marked as private + using the underscore naming convention; see its docs for details. + + Advanced tomfoolery: testmod runs methods of a local instance of + class doctest.Tester, then merges the results into (or creates) + global Tester instance doctest.master. Methods of doctest.master + can be called directly too, if you want to do something unusual. + Passing report=0 to testmod is especially useful then, to delay + displaying a summary. Invoke doctest.master.summarize(verbose) + when you're done fiddling. + """ + global master + + if isprivate is not None: + warnings.warn("the isprivate argument is deprecated; " + "examine DocTestFinder.find() lists instead", + DeprecationWarning) + + # If no module was given, then use __main__. + if m is None: + # DWA - m will still be None if this wasn't invoked from the command + # line, in which case the following TypeError is about as good an error + # as we should expect + m = sys.modules.get('__main__') + + # Check that we were actually given a module. + if not inspect.ismodule(m): + raise TypeError("testmod: module required; %r" % (m,)) + + # If no name was given, then use the module's name. + if name is None: + name = m.__name__ + + # Find, parse, and run all tests in the given module. + finder = DocTestFinder(_namefilter=isprivate, exclude_empty=exclude_empty) + + if raise_on_error: + runner = DebugRunner(verbose=verbose, optionflags=optionflags) + else: + runner = DocTestRunner(verbose=verbose, optionflags=optionflags) + + for test in finder.find(m, name, globs=globs, extraglobs=extraglobs): + runner.run(test) + + if report: + runner.summarize() + + if master is None: + master = runner + else: + master.merge(runner) + + return runner.failures, runner.tries + +def testfile(filename, module_relative=True, name=None, package=None, + globs=None, verbose=None, report=True, optionflags=0, + extraglobs=None, raise_on_error=False, parser=DocTestParser()): + """ + Test examples in the given file. Return (#failures, #tests). + + Optional keyword arg "module_relative" specifies how filenames + should be interpreted: + + - If "module_relative" is True (the default), then "filename" + specifies a module-relative path. By default, this path is + relative to the calling module's directory; but if the + "package" argument is specified, then it is relative to that + package. To ensure os-independence, "filename" should use + "/" characters to separate path segments, and should not + be an absolute path (i.e., it may not begin with "/"). + + - If "module_relative" is False, then "filename" specifies an + os-specific path. The path may be absolute or relative (to + the current working directory). + + Optional keyword arg "name" gives the name of the test; by default + use the file's basename. + + Optional keyword argument "package" is a Python package or the + name of a Python package whose directory should be used as the + base directory for a module relative filename. If no package is + specified, then the calling module's directory is used as the base + directory for module relative filenames. It is an error to + specify "package" if "module_relative" is False. + + Optional keyword arg "globs" gives a dict to be used as the globals + when executing examples; by default, use {}. A copy of this dict + is actually used for each docstring, so that each docstring's + examples start with a clean slate. + + Optional keyword arg "extraglobs" gives a dictionary that should be + merged into the globals that are used to execute examples. By + default, no extra globals are used. + + Optional keyword arg "verbose" prints lots of stuff if true, prints + only failures if false; by default, it's true iff "-v" is in sys.argv. + + Optional keyword arg "report" prints a summary at the end when true, + else prints nothing at the end. In verbose mode, the summary is + detailed, else very brief (in fact, empty if all tests passed). + + Optional keyword arg "optionflags" or's together module constants, + and defaults to 0. Possible values (see the docs for details): + + DONT_ACCEPT_TRUE_FOR_1 + DONT_ACCEPT_BLANKLINE + NORMALIZE_WHITESPACE + ELLIPSIS + IGNORE_EXCEPTION_DETAIL + REPORT_UDIFF + REPORT_CDIFF + REPORT_NDIFF + REPORT_ONLY_FIRST_FAILURE + + Optional keyword arg "raise_on_error" raises an exception on the + first unexpected exception or failure. This allows failures to be + post-mortem debugged. + + Optional keyword arg "parser" specifies a DocTestParser (or + subclass) that should be used to extract tests from the files. + + Advanced tomfoolery: testmod runs methods of a local instance of + class doctest.Tester, then merges the results into (or creates) + global Tester instance doctest.master. Methods of doctest.master + can be called directly too, if you want to do something unusual. + Passing report=0 to testmod is especially useful then, to delay + displaying a summary. Invoke doctest.master.summarize(verbose) + when you're done fiddling. + """ + global master + + if package and not module_relative: + raise ValueError("Package may only be specified for module-" + "relative paths.") + + # Relativize the path + if module_relative: + package = _normalize_module(package) + filename = _module_relative_path(package, filename) + + # If no name was given, then use the file's name. + if name is None: + name = os.path.basename(filename) + + # Assemble the globals. + if globs is None: + globs = {} + else: + globs = globs.copy() + if extraglobs is not None: + globs.update(extraglobs) + + if raise_on_error: + runner = DebugRunner(verbose=verbose, optionflags=optionflags) + else: + runner = DocTestRunner(verbose=verbose, optionflags=optionflags) + + # Read the file, convert it to a test, and run it. + s = open(filename).read() + test = parser.get_doctest(s, globs, name, filename, 0) + runner.run(test) + + if report: + runner.summarize() + + if master is None: + master = runner + else: + master.merge(runner) + + return runner.failures, runner.tries + +def run_docstring_examples(f, globs, verbose=False, name="NoName", + compileflags=None, optionflags=0): + """ + Test examples in the given object's docstring (`f`), using `globs` + as globals. Optional argument `name` is used in failure messages. + If the optional argument `verbose` is true, then generate output + even if there are no failures. + + `compileflags` gives the set of flags that should be used by the + Python compiler when running the examples. If not specified, then + it will default to the set of future-import flags that apply to + `globs`. + + Optional keyword arg `optionflags` specifies options for the + testing and output. See the documentation for `testmod` for more + information. + """ + # Find, parse, and run all tests in the given module. + finder = DocTestFinder(verbose=verbose, recurse=False) + runner = DocTestRunner(verbose=verbose, optionflags=optionflags) + for test in finder.find(f, name, globs=globs): + runner.run(test, compileflags=compileflags) + +###################################################################### +## 7. Tester +###################################################################### +# This is provided only for backwards compatibility. It's not +# actually used in any way. + +class Tester: + def __init__(self, mod=None, globs=None, verbose=None, + isprivate=None, optionflags=0): + + warnings.warn("class Tester is deprecated; " + "use class doctest.DocTestRunner instead", + DeprecationWarning, stacklevel=2) + if mod is None and globs is None: + raise TypeError("Tester.__init__: must specify mod or globs") + if mod is not None and not inspect.ismodule(mod): + raise TypeError("Tester.__init__: mod must be a module; %r" % + (mod,)) + if globs is None: + globs = mod.__dict__ + self.globs = globs + + self.verbose = verbose + self.isprivate = isprivate + self.optionflags = optionflags + self.testfinder = DocTestFinder(_namefilter=isprivate) + self.testrunner = DocTestRunner(verbose=verbose, + optionflags=optionflags) + + def runstring(self, s, name): + test = DocTestParser().get_doctest(s, self.globs, name, None, None) + if self.verbose: + print "Running string", name + (f,t) = self.testrunner.run(test) + if self.verbose: + print f, "of", t, "examples failed in string", name + return (f,t) + + def rundoc(self, object, name=None, module=None): + f = t = 0 + tests = self.testfinder.find(object, name, module=module, + globs=self.globs) + for test in tests: + (f2, t2) = self.testrunner.run(test) + (f,t) = (f+f2, t+t2) + return (f,t) + + def rundict(self, d, name, module=None): + import new + m = new.module(name) + m.__dict__.update(d) + if module is None: + module = False + return self.rundoc(m, name, module) + + def run__test__(self, d, name): + import new + m = new.module(name) + m.__test__ = d + return self.rundoc(m, name) + + def summarize(self, verbose=None): + return self.testrunner.summarize(verbose) + + def merge(self, other): + self.testrunner.merge(other.testrunner) + +###################################################################### +## 8. Unittest Support +###################################################################### + +_unittest_reportflags = 0 + +def set_unittest_reportflags(flags): + """Sets the unittest option flags. + + The old flag is returned so that a runner could restore the old + value if it wished to: + + >>> import doctest + >>> old = doctest._unittest_reportflags + >>> doctest.set_unittest_reportflags(REPORT_NDIFF | + ... REPORT_ONLY_FIRST_FAILURE) == old + True + + >>> doctest._unittest_reportflags == (REPORT_NDIFF | + ... REPORT_ONLY_FIRST_FAILURE) + True + + Only reporting flags can be set: + + >>> doctest.set_unittest_reportflags(ELLIPSIS) + Traceback (most recent call last): + ... + ValueError: ('Only reporting flags allowed', 8) + + >>> doctest.set_unittest_reportflags(old) == (REPORT_NDIFF | + ... REPORT_ONLY_FIRST_FAILURE) + True + """ + global _unittest_reportflags + + if (flags & REPORTING_FLAGS) != flags: + raise ValueError("Only reporting flags allowed", flags) + old = _unittest_reportflags + _unittest_reportflags = flags + return old + + +class DocTestCase(unittest.TestCase): + + def __init__(self, test, optionflags=0, setUp=None, tearDown=None, + checker=None): + + unittest.TestCase.__init__(self) + self._dt_optionflags = optionflags + self._dt_checker = checker + self._dt_test = test + self._dt_setUp = setUp + self._dt_tearDown = tearDown + + def setUp(self): + test = self._dt_test + + if self._dt_setUp is not None: + self._dt_setUp(test) + + def tearDown(self): + test = self._dt_test + + if self._dt_tearDown is not None: + self._dt_tearDown(test) + + test.globs.clear() + + def runTest(self): + test = self._dt_test + old = sys.stdout + new = StringIO() + optionflags = self._dt_optionflags + + if not (optionflags & REPORTING_FLAGS): + # The option flags don't include any reporting flags, + # so add the default reporting flags + optionflags |= _unittest_reportflags + + runner = DocTestRunner(optionflags=optionflags, + checker=self._dt_checker, verbose=False) + + try: + runner.DIVIDER = "-"*70 + failures, tries = runner.run( + test, out=new.write, clear_globs=False) + finally: + sys.stdout = old + + if failures: + raise self.failureException(self.format_failure(new.getvalue())) + + def format_failure(self, err): + test = self._dt_test + if test.lineno is None: + lineno = 'unknown line number' + else: + lineno = '%s' % test.lineno + lname = '.'.join(test.name.split('.')[-1:]) + return ('Failed doctest test for %s\n' + ' File "%s", line %s, in %s\n\n%s' + % (test.name, test.filename, lineno, lname, err) + ) + + def debug(self): + r"""Run the test case without results and without catching exceptions + + The unit test framework includes a debug method on test cases + and test suites to support post-mortem debugging. The test code + is run in such a way that errors are not caught. This way a + caller can catch the errors and initiate post-mortem debugging. + + The DocTestCase provides a debug method that raises + UnexpectedException errors if there is an unexepcted + exception: + + >>> test = DocTestParser().get_doctest('>>> raise KeyError\n42', + ... {}, 'foo', 'foo.py', 0) + >>> case = DocTestCase(test) + >>> try: + ... case.debug() + ... except UnexpectedException, failure: + ... pass + + The UnexpectedException contains the test, the example, and + the original exception: + + >>> failure.test is test + True + + >>> failure.example.want + '42\n' + + >>> exc_info = failure.exc_info + >>> raise exc_info[0], exc_info[1], exc_info[2] + Traceback (most recent call last): + ... + KeyError + + If the output doesn't match, then a DocTestFailure is raised: + + >>> test = DocTestParser().get_doctest(''' + ... >>> x = 1 + ... >>> x + ... 2 + ... ''', {}, 'foo', 'foo.py', 0) + >>> case = DocTestCase(test) + + >>> try: + ... case.debug() + ... except DocTestFailure, failure: + ... pass + + DocTestFailure objects provide access to the test: + + >>> failure.test is test + True + + As well as to the example: + + >>> failure.example.want + '2\n' + + and the actual output: + + >>> failure.got + '1\n' + + """ + + self.setUp() + runner = DebugRunner(optionflags=self._dt_optionflags, + checker=self._dt_checker, verbose=False) + runner.run(self._dt_test) + self.tearDown() + + def id(self): + return self._dt_test.name + + def __repr__(self): + name = self._dt_test.name.split('.') + return "%s (%s)" % (name[-1], '.'.join(name[:-1])) + + __str__ = __repr__ + + def shortDescription(self): + return "Doctest: " + self._dt_test.name + +def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None, + **options): + """ + Convert doctest tests for a module to a unittest test suite. + + This converts each documentation string in a module that + contains doctest tests to a unittest test case. If any of the + tests in a doc string fail, then the test case fails. An exception + is raised showing the name of the file containing the test and a + (sometimes approximate) line number. + + The `module` argument provides the module to be tested. The argument + can be either a module or a module name. + + If no argument is given, the calling module is used. + + A number of options may be provided as keyword arguments: + + setUp + A set-up function. This is called before running the + tests in each file. The setUp function will be passed a DocTest + object. The setUp function can access the test globals as the + globs attribute of the test passed. + + tearDown + A tear-down function. This is called after running the + tests in each file. The tearDown function will be passed a DocTest + object. The tearDown function can access the test globals as the + globs attribute of the test passed. + + globs + A dictionary containing initial global variables for the tests. + + optionflags + A set of doctest option flags expressed as an integer. + """ + + if test_finder is None: + test_finder = DocTestFinder() + + module = _normalize_module(module) + tests = test_finder.find(module, globs=globs, extraglobs=extraglobs) + if globs is None: + globs = module.__dict__ + if not tests: + # Why do we want to do this? Because it reveals a bug that might + # otherwise be hidden. + raise ValueError(module, "has no tests") + + tests.sort() + suite = unittest.TestSuite() + for test in tests: + if len(test.examples) == 0: + continue + if not test.filename: + filename = module.__file__ + if filename[-4:] in (".pyc", ".pyo"): + filename = filename[:-1] + test.filename = filename + suite.addTest(DocTestCase(test, **options)) + + return suite + +class DocFileCase(DocTestCase): + + def id(self): + return '_'.join(self._dt_test.name.split('.')) + + def __repr__(self): + return self._dt_test.filename + __str__ = __repr__ + + def format_failure(self, err): + return ('Failed doctest test for %s\n File "%s", line 0\n\n%s' + % (self._dt_test.name, self._dt_test.filename, err) + ) + +def DocFileTest(path, module_relative=True, package=None, + globs=None, parser=DocTestParser(), **options): + if globs is None: + globs = {} + + if package and not module_relative: + raise ValueError("Package may only be specified for module-" + "relative paths.") + + # Relativize the path. + if module_relative: + package = _normalize_module(package) + path = _module_relative_path(package, path) + + # Find the file and read it. + name = os.path.basename(path) + doc = open(path).read() + + # Convert it to a test, and wrap it in a DocFileCase. + test = parser.get_doctest(doc, globs, name, path, 0) + return DocFileCase(test, **options) + +def DocFileSuite(*paths, **kw): + """A unittest suite for one or more doctest files. + + The path to each doctest file is given as a string; the + interpretation of that string depends on the keyword argument + "module_relative". + + A number of options may be provided as keyword arguments: + + module_relative + If "module_relative" is True, then the given file paths are + interpreted as os-independent module-relative paths. By + default, these paths are relative to the calling module's + directory; but if the "package" argument is specified, then + they are relative to that package. To ensure os-independence, + "filename" should use "/" characters to separate path + segments, and may not be an absolute path (i.e., it may not + begin with "/"). + + If "module_relative" is False, then the given file paths are + interpreted as os-specific paths. These paths may be absolute + or relative (to the current working directory). + + package + A Python package or the name of a Python package whose directory + should be used as the base directory for module relative paths. + If "package" is not specified, then the calling module's + directory is used as the base directory for module relative + filenames. It is an error to specify "package" if + "module_relative" is False. + + setUp + A set-up function. This is called before running the + tests in each file. The setUp function will be passed a DocTest + object. The setUp function can access the test globals as the + globs attribute of the test passed. + + tearDown + A tear-down function. This is called after running the + tests in each file. The tearDown function will be passed a DocTest + object. The tearDown function can access the test globals as the + globs attribute of the test passed. + + globs + A dictionary containing initial global variables for the tests. + + optionflags + A set of doctest option flags expressed as an integer. + + parser + A DocTestParser (or subclass) that should be used to extract + tests from the files. + """ + suite = unittest.TestSuite() + + # We do this here so that _normalize_module is called at the right + # level. If it were called in DocFileTest, then this function + # would be the caller and we might guess the package incorrectly. + if kw.get('module_relative', True): + kw['package'] = _normalize_module(kw.get('package')) + + for path in paths: + suite.addTest(DocFileTest(path, **kw)) + + return suite + +###################################################################### +## 9. Debugging Support +###################################################################### + +def script_from_examples(s): + r"""Extract script from text with examples. + + Converts text with examples to a Python script. Example input is + converted to regular code. Example output and all other words + are converted to comments: + + >>> text = ''' + ... Here are examples of simple math. + ... + ... Python has super accurate integer addition + ... + ... >>> 2 + 2 + ... 5 + ... + ... And very friendly error messages: + ... + ... >>> 1/0 + ... To Infinity + ... And + ... Beyond + ... + ... You can use logic if you want: + ... + ... >>> if 0: + ... ... blah + ... ... blah + ... ... + ... + ... Ho hum + ... ''' + + >>> print script_from_examples(text) + # Here are examples of simple math. + # + # Python has super accurate integer addition + # + 2 + 2 + # Expected: + ## 5 + # + # And very friendly error messages: + # + 1/0 + # Expected: + ## To Infinity + ## And + ## Beyond + # + # You can use logic if you want: + # + if 0: + blah + blah + # + # Ho hum + """ + output = [] + for piece in DocTestParser().parse(s): + if isinstance(piece, Example): + # Add the example's source code (strip trailing NL) + output.append(piece.source[:-1]) + # Add the expected output: + want = piece.want + if want: + output.append('# Expected:') + output += ['## '+l for l in want.split('\n')[:-1]] + else: + # Add non-example text. + output += [_comment_line(l) + for l in piece.split('\n')[:-1]] + + # Trim junk on both ends. + while output and output[-1] == '#': + output.pop() + while output and output[0] == '#': + output.pop(0) + # Combine the output, and return it. + return '\n'.join(output) + +def testsource(module, name): + """Extract the test sources from a doctest docstring as a script. + + Provide the module (or dotted name of the module) containing the + test to be debugged and the name (within the module) of the object + with the doc string with tests to be debugged. + """ + module = _normalize_module(module) + tests = DocTestFinder().find(module) + test = [t for t in tests if t.name == name] + if not test: + raise ValueError(name, "not found in tests") + test = test[0] + testsrc = script_from_examples(test.docstring) + return testsrc + +def debug_src(src, pm=False, globs=None): + """Debug a single doctest docstring, in argument `src`'""" + testsrc = script_from_examples(src) + debug_script(testsrc, pm, globs) + +def debug_script(src, pm=False, globs=None): + "Debug a test script. `src` is the script, as a string." + import pdb + + # Note that tempfile.NameTemporaryFile() cannot be used. As the + # docs say, a file so created cannot be opened by name a second time + # on modern Windows boxes, and execfile() needs to open it. + srcfilename = tempfile.mktemp(".py", "doctestdebug") + f = open(srcfilename, 'w') + f.write(src) + f.close() + + try: + if globs: + globs = globs.copy() + else: + globs = {} + + if pm: + try: + execfile(srcfilename, globs, globs) + except: + print sys.exc_info()[1] + pdb.post_mortem(sys.exc_info()[2]) + else: + # Note that %r is vital here. '%s' instead can, e.g., cause + # backslashes to get treated as metacharacters on Windows. + pdb.run("execfile(%r)" % srcfilename, globs, globs) + + finally: + os.remove(srcfilename) + +def debug(module, name, pm=False): + """Debug a single doctest docstring. + + Provide the module (or dotted name of the module) containing the + test to be debugged and the name (within the module) of the object + with the docstring with tests to be debugged. + """ + module = _normalize_module(module) + testsrc = testsource(module, name) + debug_script(testsrc, pm, module.__dict__) + +###################################################################### +## 10. Example Usage +###################################################################### +class _TestClass: + """ + A pointless class, for sanity-checking of docstring testing. + + Methods: + square() + get() + + >>> _TestClass(13).get() + _TestClass(-12).get() + 1 + >>> hex(_TestClass(13).square().get()) + '0xa9' + """ + + def __init__(self, val): + """val -> _TestClass object with associated value val. + + >>> t = _TestClass(123) + >>> print t.get() + 123 + """ + + self.val = val + + def square(self): + """square() -> square TestClass's associated value + + >>> _TestClass(13).square().get() + 169 + """ + + self.val = self.val ** 2 + return self + + def get(self): + """get() -> return TestClass's associated value. + + >>> x = _TestClass(-42) + >>> print x.get() + -42 + """ + + return self.val + +__test__ = {"_TestClass": _TestClass, + "string": r""" + Example of a string object, searched as-is. + >>> x = 1; y = 2 + >>> x + y, x * y + (3, 2) + """, + + "bool-int equivalence": r""" + In 2.2, boolean expressions displayed + 0 or 1. By default, we still accept + them. This can be disabled by passing + DONT_ACCEPT_TRUE_FOR_1 to the new + optionflags argument. + >>> 4 == 4 + 1 + >>> 4 == 4 + True + >>> 4 > 4 + 0 + >>> 4 > 4 + False + """, + + "blank lines": r""" + Blank lines can be marked with : + >>> print 'foo\n\nbar\n' + foo + + bar + + """, + + "ellipsis": r""" + If the ellipsis flag is used, then '...' can be used to + elide substrings in the desired output: + >>> print range(1000) #doctest: +ELLIPSIS + [0, 1, 2, ..., 999] + """, + + "whitespace normalization": r""" + If the whitespace normalization flag is used, then + differences in whitespace are ignored. + >>> print range(30) #doctest: +NORMALIZE_WHITESPACE + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29] + """, + } + +def _test(): + r = unittest.TextTestRunner() + r.run(DocTestSuite()) + +if __name__ == "__main__": + _test() Added: py/branch/dist-doctest/py/compat/optparse.py ============================================================================== --- (empty file) +++ py/branch/dist-doctest/py/compat/optparse.py Tue Jun 14 11:14:48 2005 @@ -0,0 +1,1567 @@ +"""optparse - a powerful, extensible, and easy-to-use option parser. + +By Greg Ward + +Originally distributed as Optik; see http://optik.sourceforge.net/ . + +If you have problems with this module, please do not file bugs, +patches, or feature requests with Python; instead, use Optik's +SourceForge project page: + http://sourceforge.net/projects/optik + +For support, use the optik-users at lists.sourceforge.net mailing list +(http://lists.sourceforge.net/lists/listinfo/optik-users). +""" + +# Python developers: please do not make changes to this file, since +# it is automatically generated from the Optik source code. + +__version__ = "1.5a2" + +__all__ = ['Option', + 'SUPPRESS_HELP', + 'SUPPRESS_USAGE', + 'Values', + 'OptionContainer', + 'OptionGroup', + 'OptionParser', + 'HelpFormatter', + 'IndentedHelpFormatter', + 'TitledHelpFormatter', + 'OptParseError', + 'OptionError', + 'OptionConflictError', + 'OptionValueError', + 'BadOptionError'] + +__copyright__ = """ +Copyright (c) 2001-2004 Gregory P. Ward. All rights reserved. +Copyright (c) 2002-2004 Python Software Foundation. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + * Neither the name of the author nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +""" + +import sys, os +import types +import textwrap +from gettext import gettext as _ + +def _repr(self): + return "<%s at 0x%x: %s>" % (self.__class__.__name__, id(self), self) + + +# This file was generated from: +# Id: option_parser.py 421 2004-10-26 00:45:16Z greg +# Id: option.py 422 2004-10-26 00:53:47Z greg +# Id: help.py 367 2004-07-24 23:21:21Z gward +# Id: errors.py 367 2004-07-24 23:21:21Z gward + +class OptParseError (Exception): + def __init__(self, msg): + self.msg = msg + + def __str__(self): + return self.msg + + +class OptionError (OptParseError): + """ + Raised if an Option instance is created with invalid or + inconsistent arguments. + """ + + def __init__(self, msg, option): + self.msg = msg + self.option_id = str(option) + + def __str__(self): + if self.option_id: + return "option %s: %s" % (self.option_id, self.msg) + else: + return self.msg + +class OptionConflictError (OptionError): + """ + Raised if conflicting options are added to an OptionParser. + """ + +class OptionValueError (OptParseError): + """ + Raised if an invalid option value is encountered on the command + line. + """ + +class BadOptionError (OptParseError): + """ + Raised if an invalid or ambiguous option is seen on the command-line. + """ + + +class HelpFormatter: + + """ + Abstract base class for formatting option help. OptionParser + instances should use one of the HelpFormatter subclasses for + formatting help; by default IndentedHelpFormatter is used. + + Instance attributes: + parser : OptionParser + the controlling OptionParser instance + indent_increment : int + the number of columns to indent per nesting level + max_help_position : int + the maximum starting column for option help text + help_position : int + the calculated starting column for option help text; + initially the same as the maximum + width : int + total number of columns for output (pass None to constructor for + this value to be taken from the $COLUMNS environment variable) + level : int + current indentation level + current_indent : int + current indentation level (in columns) + help_width : int + number of columns available for option help text (calculated) + default_tag : str + text to replace with each option's default value, "%default" + by default. Set to false value to disable default value expansion. + option_strings : { Option : str } + maps Option instances to the snippet of help text explaining + the syntax of that option, e.g. "-h, --help" or + "-fFILE, --file=FILE" + _short_opt_fmt : str + format string controlling how short options with values are + printed in help text. Must be either "%s%s" ("-fFILE") or + "%s %s" ("-f FILE"), because those are the two syntaxes that + Optik supports. + _long_opt_fmt : str + similar but for long options; must be either "%s %s" ("--file FILE") + or "%s=%s" ("--file=FILE"). + """ + + NO_DEFAULT_VALUE = "none" + + def __init__(self, + indent_increment, + max_help_position, + width, + short_first): + self.parser = None + self.indent_increment = indent_increment + self.help_position = self.max_help_position = max_help_position + if width is None: + try: + width = int(os.environ['COLUMNS']) + except (KeyError, ValueError): + width = 80 + width -= 2 + self.width = width + self.current_indent = 0 + self.level = 0 + self.help_width = None # computed later + self.short_first = short_first + self.default_tag = "%default" + self.option_strings = {} + self._short_opt_fmt = "%s %s" + self._long_opt_fmt = "%s=%s" + + def set_parser(self, parser): + self.parser = parser + + def set_short_opt_delimiter(self, delim): + if delim not in ("", " "): + raise ValueError( + "invalid metavar delimiter for short options: %r" % delim) + self._short_opt_fmt = "%s" + delim + "%s" + + def set_long_opt_delimiter(self, delim): + if delim not in ("=", " "): + raise ValueError( + "invalid metavar delimiter for long options: %r" % delim) + self._long_opt_fmt = "%s" + delim + "%s" + + def indent(self): + self.current_indent += self.indent_increment + self.level += 1 + + def dedent(self): + self.current_indent -= self.indent_increment + assert self.current_indent >= 0, "Indent decreased below 0." + self.level -= 1 + + def format_usage(self, usage): + raise NotImplementedError, "subclasses must implement" + + def format_heading(self, heading): + raise NotImplementedError, "subclasses must implement" + + def format_description(self, description): + if not description: + return "" + desc_width = self.width - self.current_indent + indent = " "*self.current_indent + return textwrap.fill(description, + desc_width, + initial_indent=indent, + subsequent_indent=indent) + "\n" + + def expand_default(self, option): + if self.parser is None or not self.default_tag: + return option.help + + default_value = self.parser.defaults.get(option.dest) + if default_value is NO_DEFAULT or default_value is None: + default_value = self.NO_DEFAULT_VALUE + + return option.help.replace(self.default_tag, str(default_value)) + + def format_option(self, option): + # The help for each option consists of two parts: + # * the opt strings and metavars + # eg. ("-x", or "-fFILENAME, --file=FILENAME") + # * the user-supplied help string + # eg. ("turn on expert mode", "read data from FILENAME") + # + # If possible, we write both of these on the same line: + # -x turn on expert mode + # + # But if the opt string list is too long, we put the help + # string on a second line, indented to the same column it would + # start in if it fit on the first line. + # -fFILENAME, --file=FILENAME + # read data from FILENAME + result = [] + opts = self.option_strings[option] + opt_width = self.help_position - self.current_indent - 2 + if len(opts) > opt_width: + opts = "%*s%s\n" % (self.current_indent, "", opts) + indent_first = self.help_position + else: # start help on same line as opts + opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts) + indent_first = 0 + result.append(opts) + if option.help: + help_text = self.expand_default(option) + help_lines = textwrap.wrap(help_text, self.help_width) + result.append("%*s%s\n" % (indent_first, "", help_lines[0])) + result.extend(["%*s%s\n" % (self.help_position, "", line) + for line in help_lines[1:]]) + elif opts[-1] != "\n": + result.append("\n") + return "".join(result) + + def store_option_strings(self, parser): + self.indent() + max_len = 0 + for opt in parser.option_list: + strings = self.format_option_strings(opt) + self.option_strings[opt] = strings + max_len = max(max_len, len(strings) + self.current_indent) + self.indent() + for group in parser.option_groups: + for opt in group.option_list: + strings = self.format_option_strings(opt) + self.option_strings[opt] = strings + max_len = max(max_len, len(strings) + self.current_indent) + self.dedent() + self.dedent() + self.help_position = min(max_len + 2, self.max_help_position) + self.help_width = self.width - self.help_position + + def format_option_strings(self, option): + """Return a comma-separated list of option strings & metavariables.""" + if option.takes_value(): + metavar = option.metavar or option.dest.upper() + short_opts = [self._short_opt_fmt % (sopt, metavar) + for sopt in option._short_opts] + long_opts = [self._long_opt_fmt % (lopt, metavar) + for lopt in option._long_opts] + else: + short_opts = option._short_opts + long_opts = option._long_opts + + if self.short_first: + opts = short_opts + long_opts + else: + opts = long_opts + short_opts + + return ", ".join(opts) + +class IndentedHelpFormatter (HelpFormatter): + """Format help with indented section bodies. + """ + + def __init__(self, + indent_increment=2, + max_help_position=24, + width=None, + short_first=1): + HelpFormatter.__init__( + self, indent_increment, max_help_position, width, short_first) + + def format_usage(self, usage): + return _("usage: %s\n") % usage + + def format_heading(self, heading): + return "%*s%s:\n" % (self.current_indent, "", heading) + + +class TitledHelpFormatter (HelpFormatter): + """Format help with underlined section headers. + """ + + def __init__(self, + indent_increment=0, + max_help_position=24, + width=None, + short_first=0): + HelpFormatter.__init__ ( + self, indent_increment, max_help_position, width, short_first) + + def format_usage(self, usage): + return "%s %s\n" % (self.format_heading(_("Usage")), usage) + + def format_heading(self, heading): + return "%s\n%s\n" % (heading, "=-"[self.level] * len(heading)) + + +_builtin_cvt = { "int" : (int, _("integer")), + "long" : (long, _("long integer")), + "float" : (float, _("floating-point")), + "complex" : (complex, _("complex")) } + +def check_builtin(option, opt, value): + (cvt, what) = _builtin_cvt[option.type] + try: + return cvt(value) + except ValueError: + raise OptionValueError( + _("option %s: invalid %s value: %r") % (opt, what, value)) + +def check_choice(option, opt, value): + if value in option.choices: + return value + else: + choices = ", ".join(map(repr, option.choices)) + raise OptionValueError( + _("option %s: invalid choice: %r (choose from %s)") + % (opt, value, choices)) + +# Not supplying a default is different from a default of None, +# so we need an explicit "not supplied" value. +NO_DEFAULT = ("NO", "DEFAULT") + + +class Option: + """ + Instance attributes: + _short_opts : [string] + _long_opts : [string] + + action : string + type : string + dest : string + default : any + nargs : int + const : any + choices : [string] + callback : function + callback_args : (any*) + callback_kwargs : { string : any } + help : string + metavar : string + """ + + # The list of instance attributes that may be set through + # keyword args to the constructor. + ATTRS = ['action', + 'type', + 'dest', + 'default', + 'nargs', + 'const', + 'choices', + 'callback', + 'callback_args', + 'callback_kwargs', + 'help', + 'metavar'] + + # The set of actions allowed by option parsers. Explicitly listed + # here so the constructor can validate its arguments. + ACTIONS = ("store", + "store_const", + "store_true", + "store_false", + "append", + "count", + "callback", + "help", + "version") + + # The set of actions that involve storing a value somewhere; + # also listed just for constructor argument validation. (If + # the action is one of these, there must be a destination.) + STORE_ACTIONS = ("store", + "store_const", + "store_true", + "store_false", + "append", + "count") + + # The set of actions for which it makes sense to supply a value + # type, ie. which may consume an argument from the command line. + TYPED_ACTIONS = ("store", + "append", + "callback") + + # The set of actions which *require* a value type, ie. that + # always consume an argument from the command line. + ALWAYS_TYPED_ACTIONS = ("store", + "append") + + # The set of known types for option parsers. Again, listed here for + # constructor argument validation. + TYPES = ("string", "int", "long", "float", "complex", "choice") + + # Dictionary of argument checking functions, which convert and + # validate option arguments according to the option type. + # + # Signature of checking functions is: + # check(option : Option, opt : string, value : string) -> any + # where + # option is the Option instance calling the checker + # opt is the actual option seen on the command-line + # (eg. "-a", "--file") + # value is the option argument seen on the command-line + # + # The return value should be in the appropriate Python type + # for option.type -- eg. an integer if option.type == "int". + # + # If no checker is defined for a type, arguments will be + # unchecked and remain strings. + TYPE_CHECKER = { "int" : check_builtin, + "long" : check_builtin, + "float" : check_builtin, + "complex": check_builtin, + "choice" : check_choice, + } + + + # CHECK_METHODS is a list of unbound method objects; they are called + # by the constructor, in order, after all attributes are + # initialized. The list is created and filled in later, after all + # the methods are actually defined. (I just put it here because I + # like to define and document all class attributes in the same + # place.) Subclasses that add another _check_*() method should + # define their own CHECK_METHODS list that adds their check method + # to those from this class. + CHECK_METHODS = None + + + # -- Constructor/initialization methods ---------------------------- + + def __init__(self, *opts, **attrs): + # Set _short_opts, _long_opts attrs from 'opts' tuple. + # Have to be set now, in case no option strings are supplied. + self._short_opts = [] + self._long_opts = [] + opts = self._check_opt_strings(opts) + self._set_opt_strings(opts) + + # Set all other attrs (action, type, etc.) from 'attrs' dict + self._set_attrs(attrs) + + # Check all the attributes we just set. There are lots of + # complicated interdependencies, but luckily they can be farmed + # out to the _check_*() methods listed in CHECK_METHODS -- which + # could be handy for subclasses! The one thing these all share + # is that they raise OptionError if they discover a problem. + for checker in self.CHECK_METHODS: + checker(self) + + def _check_opt_strings(self, opts): + # Filter out None because early versions of Optik had exactly + # one short option and one long option, either of which + # could be None. + opts = filter(None, opts) + if not opts: + raise TypeError("at least one option string must be supplied") + return opts + + def _set_opt_strings(self, opts): + for opt in opts: + if len(opt) < 2: + raise OptionError( + "invalid option string %r: " + "must be at least two characters long" % opt, self) + elif len(opt) == 2: + if not (opt[0] == "-" and opt[1] != "-"): + raise OptionError( + "invalid short option string %r: " + "must be of the form -x, (x any non-dash char)" % opt, + self) + self._short_opts.append(opt) + else: + if not (opt[0:2] == "--" and opt[2] != "-"): + raise OptionError( + "invalid long option string %r: " + "must start with --, followed by non-dash" % opt, + self) + self._long_opts.append(opt) + + def _set_attrs(self, attrs): + for attr in self.ATTRS: + if attrs.has_key(attr): + setattr(self, attr, attrs[attr]) + del attrs[attr] + else: + if attr == 'default': + setattr(self, attr, NO_DEFAULT) + else: + setattr(self, attr, None) + if attrs: + raise OptionError( + "invalid keyword arguments: %s" % ", ".join(attrs.keys()), + self) + + + # -- Constructor validation methods -------------------------------- + + def _check_action(self): + if self.action is None: + self.action = "store" + elif self.action not in self.ACTIONS: + raise OptionError("invalid action: %r" % self.action, self) + + def _check_type(self): + if self.type is None: + if self.action in self.ALWAYS_TYPED_ACTIONS: + if self.choices is not None: + # The "choices" attribute implies "choice" type. + self.type = "choice" + else: + # No type given? "string" is the most sensible default. + self.type = "string" + else: + # Allow type objects as an alternative to their names. + if type(self.type) is type: + self.type = self.type.__name__ + if self.type == "str": + self.type = "string" + + if self.type not in self.TYPES: + raise OptionError("invalid option type: %r" % self.type, self) + if self.action not in self.TYPED_ACTIONS: + raise OptionError( + "must not supply a type for action %r" % self.action, self) + + def _check_choice(self): + if self.type == "choice": + if self.choices is None: + raise OptionError( + "must supply a list of choices for type 'choice'", self) + elif type(self.choices) not in (types.TupleType, types.ListType): + raise OptionError( + "choices must be a list of strings ('%s' supplied)" + % str(type(self.choices)).split("'")[1], self) + elif self.choices is not None: + raise OptionError( + "must not supply choices for type %r" % self.type, self) + + def _check_dest(self): + # No destination given, and we need one for this action. The + # self.type check is for callbacks that take a value. + takes_value = (self.action in self.STORE_ACTIONS or + self.type is not None) + if self.dest is None and takes_value: + + # Glean a destination from the first long option string, + # or from the first short option string if no long options. + if self._long_opts: + # eg. "--foo-bar" -> "foo_bar" + self.dest = self._long_opts[0][2:].replace('-', '_') + else: + self.dest = self._short_opts[0][1] + + def _check_const(self): + if self.action != "store_const" and self.const is not None: + raise OptionError( + "'const' must not be supplied for action %r" % self.action, + self) + + def _check_nargs(self): + if self.action in self.TYPED_ACTIONS: + if self.nargs is None: + self.nargs = 1 + elif self.nargs is not None: + raise OptionError( + "'nargs' must not be supplied for action %r" % self.action, + self) + + def _check_callback(self): + if self.action == "callback": + if not callable(self.callback): + raise OptionError( + "callback not callable: %r" % self.callback, self) + if (self.callback_args is not None and + type(self.callback_args) is not types.TupleType): + raise OptionError( + "callback_args, if supplied, must be a tuple: not %r" + % self.callback_args, self) + if (self.callback_kwargs is not None and + type(self.callback_kwargs) is not types.DictType): + raise OptionError( + "callback_kwargs, if supplied, must be a dict: not %r" + % self.callback_kwargs, self) + else: + if self.callback is not None: + raise OptionError( + "callback supplied (%r) for non-callback option" + % self.callback, self) + if self.callback_args is not None: + raise OptionError( + "callback_args supplied for non-callback option", self) + if self.callback_kwargs is not None: + raise OptionError( + "callback_kwargs supplied for non-callback option", self) + + + CHECK_METHODS = [_check_action, + _check_type, + _check_choice, + _check_dest, + _check_const, + _check_nargs, + _check_callback] + + + # -- Miscellaneous methods ----------------------------------------- + + def __str__(self): + return "/".join(self._short_opts + self._long_opts) + + __repr__ = _repr + + def takes_value(self): + return self.type is not None + + def get_opt_string(self): + if self._long_opts: + return self._long_opts[0] + else: + return self._short_opts[0] + + + # -- Processing methods -------------------------------------------- + + def check_value(self, opt, value): + checker = self.TYPE_CHECKER.get(self.type) + if checker is None: + return value + else: + return checker(self, opt, value) + + def convert_value(self, opt, value): + if value is not None: + if self.nargs == 1: + return self.check_value(opt, value) + else: + return tuple([self.check_value(opt, v) for v in value]) + + def process(self, opt, value, values, parser): + + # First, convert the value(s) to the right type. Howl if any + # value(s) are bogus. + value = self.convert_value(opt, value) + + # And then take whatever action is expected of us. + # This is a separate method to make life easier for + # subclasses to add new actions. + return self.take_action( + self.action, self.dest, opt, value, values, parser) + + def take_action(self, action, dest, opt, value, values, parser): + if action == "store": + setattr(values, dest, value) + elif action == "store_const": + setattr(values, dest, self.const) + elif action == "store_true": + setattr(values, dest, True) + elif action == "store_false": + setattr(values, dest, False) + elif action == "append": + values.ensure_value(dest, []).append(value) + elif action == "count": + setattr(values, dest, values.ensure_value(dest, 0) + 1) + elif action == "callback": + args = self.callback_args or () + kwargs = self.callback_kwargs or {} + self.callback(self, opt, value, parser, *args, **kwargs) + elif action == "help": + parser.print_help() + parser.exit() + elif action == "version": + parser.print_version() + parser.exit() + else: + raise RuntimeError, "unknown action %r" % self.action + + return 1 + +# class Option + + +SUPPRESS_HELP = "SUPPRESS"+"HELP" +SUPPRESS_USAGE = "SUPPRESS"+"USAGE" + +# For compatibility with Python 2.2 +try: + True, False +except NameError: + (True, False) = (1, 0) +try: + basestring +except NameError: + basestring = (str, unicode) + + +class Values: + + def __init__(self, defaults=None): + if defaults: + for (attr, val) in defaults.items(): + setattr(self, attr, val) + + def __str__(self): + return str(self.__dict__) + + __repr__ = _repr + + def __eq__(self, other): + if isinstance(other, Values): + return self.__dict__ == other.__dict__ + elif isinstance(other, dict): + return self.__dict__ == other + else: + return False + + def __ne__(self, other): + return not (self == other) + + def _update_careful(self, dict): + """ + Update the option values from an arbitrary dictionary, but only + use keys from dict that already have a corresponding attribute + in self. Any keys in dict without a corresponding attribute + are silently ignored. + """ + for attr in dir(self): + if dict.has_key(attr): + dval = dict[attr] + if dval is not None: + setattr(self, attr, dval) + + def _update_loose(self, dict): + """ + Update the option values from an arbitrary dictionary, + using all keys from the dictionary regardless of whether + they have a corresponding attribute in self or not. + """ + self.__dict__.update(dict) + + def _update(self, dict, mode): + if mode == "careful": + self._update_careful(dict) + elif mode == "loose": + self._update_loose(dict) + else: + raise ValueError, "invalid update mode: %r" % mode + + def read_module(self, modname, mode="careful"): + __import__(modname) + mod = sys.modules[modname] + self._update(vars(mod), mode) + + def read_file(self, filename, mode="careful"): + vars = {} + execfile(filename, vars) + self._update(vars, mode) + + def ensure_value(self, attr, value): + if not hasattr(self, attr) or getattr(self, attr) is None: + setattr(self, attr, value) + return getattr(self, attr) + + +class OptionContainer: + + """ + Abstract base class. + + Class attributes: + standard_option_list : [Option] + list of standard options that will be accepted by all instances + of this parser class (intended to be overridden by subclasses). + + Instance attributes: + option_list : [Option] + the list of Option objects contained by this OptionContainer + _short_opt : { string : Option } + dictionary mapping short option strings, eg. "-f" or "-X", + to the Option instances that implement them. If an Option + has multiple short option strings, it will appears in this + dictionary multiple times. [1] + _long_opt : { string : Option } + dictionary mapping long option strings, eg. "--file" or + "--exclude", to the Option instances that implement them. + Again, a given Option can occur multiple times in this + dictionary. [1] + defaults : { string : any } + dictionary mapping option destination names to default + values for each destination [1] + + [1] These mappings are common to (shared by) all components of the + controlling OptionParser, where they are initially created. + + """ + + def __init__(self, option_class, conflict_handler, description): + # Initialize the option list and related data structures. + # This method must be provided by subclasses, and it must + # initialize at least the following instance attributes: + # option_list, _short_opt, _long_opt, defaults. + self._create_option_list() + + self.option_class = option_class + self.set_conflict_handler(conflict_handler) + self.set_description(description) + + def _create_option_mappings(self): + # For use by OptionParser constructor -- create the master + # option mappings used by this OptionParser and all + # OptionGroups that it owns. + self._short_opt = {} # single letter -> Option instance + self._long_opt = {} # long option -> Option instance + self.defaults = {} # maps option dest -> default value + + + def _share_option_mappings(self, parser): + # For use by OptionGroup constructor -- use shared option + # mappings from the OptionParser that owns this OptionGroup. + self._short_opt = parser._short_opt + self._long_opt = parser._long_opt + self.defaults = parser.defaults + + def set_conflict_handler(self, handler): + if handler not in ("error", "resolve"): + raise ValueError, "invalid conflict_resolution value %r" % handler + self.conflict_handler = handler + + def set_description(self, description): + self.description = description + + def get_description(self): + return self.description + + + # -- Option-adding methods ----------------------------------------- + + def _check_conflict(self, option): + conflict_opts = [] + for opt in option._short_opts: + if self._short_opt.has_key(opt): + conflict_opts.append((opt, self._short_opt[opt])) + for opt in option._long_opts: + if self._long_opt.has_key(opt): + conflict_opts.append((opt, self._long_opt[opt])) + + if conflict_opts: + handler = self.conflict_handler + if handler == "error": + raise OptionConflictError( + "conflicting option string(s): %s" + % ", ".join([co[0] for co in conflict_opts]), + option) + elif handler == "resolve": + for (opt, c_option) in conflict_opts: + if opt.startswith("--"): + c_option._long_opts.remove(opt) + del self._long_opt[opt] + else: + c_option._short_opts.remove(opt) + del self._short_opt[opt] + if not (c_option._short_opts or c_option._long_opts): + c_option.container.option_list.remove(c_option) + + def add_option(self, *args, **kwargs): + """add_option(Option) + add_option(opt_str, ..., kwarg=val, ...) + """ + if type(args[0]) is types.StringType: + option = self.option_class(*args, **kwargs) + elif len(args) == 1 and not kwargs: + option = args[0] + if not isinstance(option, Option): + raise TypeError, "not an Option instance: %r" % option + else: + raise TypeError, "invalid arguments" + + self._check_conflict(option) + + self.option_list.append(option) + option.container = self + for opt in option._short_opts: + self._short_opt[opt] = option + for opt in option._long_opts: + self._long_opt[opt] = option + + if option.dest is not None: # option has a dest, we need a default + if option.default is not NO_DEFAULT: + self.defaults[option.dest] = option.default + elif not self.defaults.has_key(option.dest): + self.defaults[option.dest] = None + + return option + + def add_options(self, option_list): + for option in option_list: + self.add_option(option) + + # -- Option query/removal methods ---------------------------------- + + def get_option(self, opt_str): + return (self._short_opt.get(opt_str) or + self._long_opt.get(opt_str)) + + def has_option(self, opt_str): + return (self._short_opt.has_key(opt_str) or + self._long_opt.has_key(opt_str)) + + def remove_option(self, opt_str): + option = self._short_opt.get(opt_str) + if option is None: + option = self._long_opt.get(opt_str) + if option is None: + raise ValueError("no such option %r" % opt_str) + + for opt in option._short_opts: + del self._short_opt[opt] + for opt in option._long_opts: + del self._long_opt[opt] + option.container.option_list.remove(option) + + + # -- Help-formatting methods --------------------------------------- + + def format_option_help(self, formatter): + if not self.option_list: + return "" + result = [] + for option in self.option_list: + if not option.help is SUPPRESS_HELP: + result.append(formatter.format_option(option)) + return "".join(result) + + def format_description(self, formatter): + return formatter.format_description(self.get_description()) + + def format_help(self, formatter): + result = [] + if self.description: + result.append(self.format_description(formatter)) + if self.option_list: + result.append(self.format_option_help(formatter)) + return "\n".join(result) + + +class OptionGroup (OptionContainer): + + def __init__(self, parser, title, description=None): + self.parser = parser + OptionContainer.__init__( + self, parser.option_class, parser.conflict_handler, description) + self.title = title + + def _create_option_list(self): + self.option_list = [] + self._share_option_mappings(self.parser) + + def set_title(self, title): + self.title = title + + # -- Help-formatting methods --------------------------------------- + + def format_help(self, formatter): + result = formatter.format_heading(self.title) + formatter.indent() + result += OptionContainer.format_help(self, formatter) + formatter.dedent() + return result + + +class OptionParser (OptionContainer): + + """ + Class attributes: + standard_option_list : [Option] + list of standard options that will be accepted by all instances + of this parser class (intended to be overridden by subclasses). + + Instance attributes: + usage : string + a usage string for your program. Before it is displayed + to the user, "%prog" will be expanded to the name of + your program (self.prog or os.path.basename(sys.argv[0])). + prog : string + the name of the current program (to override + os.path.basename(sys.argv[0])). + + option_groups : [OptionGroup] + list of option groups in this parser (option groups are + irrelevant for parsing the command-line, but very useful + for generating help) + + allow_interspersed_args : bool = true + if true, positional arguments may be interspersed with options. + Assuming -a and -b each take a single argument, the command-line + -ablah foo bar -bboo baz + will be interpreted the same as + -ablah -bboo -- foo bar baz + If this flag were false, that command line would be interpreted as + -ablah -- foo bar -bboo baz + -- ie. we stop processing options as soon as we see the first + non-option argument. (This is the tradition followed by + Python's getopt module, Perl's Getopt::Std, and other argument- + parsing libraries, but it is generally annoying to users.) + + process_default_values : bool = true + if true, option default values are processed similarly to option + values from the command line: that is, they are passed to the + type-checking function for the option's type (as long as the + default value is a string). (This really only matters if you + have defined custom types; see SF bug #955889.) Set it to false + to restore the behaviour of Optik 1.4.1 and earlier. + + rargs : [string] + the argument list currently being parsed. Only set when + parse_args() is active, and continually trimmed down as + we consume arguments. Mainly there for the benefit of + callback options. + largs : [string] + the list of leftover arguments that we have skipped while + parsing options. If allow_interspersed_args is false, this + list is always empty. + values : Values + the set of option values currently being accumulated. Only + set when parse_args() is active. Also mainly for callbacks. + + Because of the 'rargs', 'largs', and 'values' attributes, + OptionParser is not thread-safe. If, for some perverse reason, you + need to parse command-line arguments simultaneously in different + threads, use different OptionParser instances. + + """ + + standard_option_list = [] + + def __init__(self, + usage=None, + option_list=None, + option_class=Option, + version=None, + conflict_handler="error", + description=None, + formatter=None, + add_help_option=True, + prog=None): + OptionContainer.__init__( + self, option_class, conflict_handler, description) + self.set_usage(usage) + self.prog = prog + self.version = version + self.allow_interspersed_args = True + self.process_default_values = True + if formatter is None: + formatter = IndentedHelpFormatter() + self.formatter = formatter + self.formatter.set_parser(self) + + # Populate the option list; initial sources are the + # standard_option_list class attribute, the 'option_list' + # argument, and (if applicable) the _add_version_option() and + # _add_help_option() methods. + self._populate_option_list(option_list, + add_help=add_help_option) + + self._init_parsing_state() + + # -- Private methods ----------------------------------------------- + # (used by our or OptionContainer's constructor) + + def _create_option_list(self): + self.option_list = [] + self.option_groups = [] + self._create_option_mappings() + + def _add_help_option(self): + self.add_option("-h", "--help", + action="help", + help=_("show this help message and exit")) + + def _add_version_option(self): + self.add_option("--version", + action="version", + help=_("show program's version number and exit")) + + def _populate_option_list(self, option_list, add_help=True): + if self.standard_option_list: + self.add_options(self.standard_option_list) + if option_list: + self.add_options(option_list) + if self.version: + self._add_version_option() + if add_help: + self._add_help_option() + + def _init_parsing_state(self): + # These are set in parse_args() for the convenience of callbacks. + self.rargs = None + self.largs = None + self.values = None + + + # -- Simple modifier methods --------------------------------------- + + def set_usage(self, usage): + if usage is None: + self.usage = _("%prog [options]") + elif usage is SUPPRESS_USAGE: + self.usage = None + # For backwards compatibility with Optik 1.3 and earlier. + elif usage.startswith("usage:" + " "): + self.usage = usage[7:] + else: + self.usage = usage + + def enable_interspersed_args(self): + self.allow_interspersed_args = True + + def disable_interspersed_args(self): + self.allow_interspersed_args = False + + def set_process_default_values(self, process): + self.process_default_values = process + + def set_default(self, dest, value): + self.defaults[dest] = value + + def set_defaults(self, **kwargs): + self.defaults.update(kwargs) + + def _get_all_options(self): + options = self.option_list[:] + for group in self.option_groups: + options.extend(group.option_list) + return options + + def get_default_values(self): + if not self.process_default_values: + # Old, pre-Optik 1.5 behaviour. + return Values(self.defaults) + + defaults = self.defaults.copy() + for option in self._get_all_options(): + default = defaults.get(option.dest) + if isinstance(default, basestring): + opt_str = option.get_opt_string() + defaults[option.dest] = option.check_value(opt_str, default) + + return Values(defaults) + + + # -- OptionGroup methods ------------------------------------------- + + def add_option_group(self, *args, **kwargs): + # XXX lots of overlap with OptionContainer.add_option() + if type(args[0]) is types.StringType: + group = OptionGroup(self, *args, **kwargs) + elif len(args) == 1 and not kwargs: + group = args[0] + if not isinstance(group, OptionGroup): + raise TypeError, "not an OptionGroup instance: %r" % group + if group.parser is not self: + raise ValueError, "invalid OptionGroup (wrong parser)" + else: + raise TypeError, "invalid arguments" + + self.option_groups.append(group) + return group + + def get_option_group(self, opt_str): + option = (self._short_opt.get(opt_str) or + self._long_opt.get(opt_str)) + if option and option.container is not self: + return option.container + return None + + + # -- Option-parsing methods ---------------------------------------- + + def _get_args(self, args): + if args is None: + return sys.argv[1:] + else: + return args[:] # don't modify caller's list + + def parse_args(self, args=None, values=None): + """ + parse_args(args : [string] = sys.argv[1:], + values : Values = None) + -> (values : Values, args : [string]) + + Parse the command-line options found in 'args' (default: + sys.argv[1:]). Any errors result in a call to 'error()', which + by default prints the usage message to stderr and calls + sys.exit() with an error message. On success returns a pair + (values, args) where 'values' is an Values instance (with all + your option values) and 'args' is the list of arguments left + over after parsing options. + """ + rargs = self._get_args(args) + if values is None: + values = self.get_default_values() + + # Store the halves of the argument list as attributes for the + # convenience of callbacks: + # rargs + # the rest of the command-line (the "r" stands for + # "remaining" or "right-hand") + # largs + # the leftover arguments -- ie. what's left after removing + # options and their arguments (the "l" stands for "leftover" + # or "left-hand") + self.rargs = rargs + self.largs = largs = [] + self.values = values + + try: + stop = self._process_args(largs, rargs, values) + except (BadOptionError, OptionValueError), err: + self.error(err.msg) + + args = largs + rargs + return self.check_values(values, args) + + def check_values(self, values, args): + """ + check_values(values : Values, args : [string]) + -> (values : Values, args : [string]) + + Check that the supplied option values and leftover arguments are + valid. Returns the option values and leftover arguments + (possibly adjusted, possibly completely new -- whatever you + like). Default implementation just returns the passed-in + values; subclasses may override as desired. + """ + return (values, args) + + def _process_args(self, largs, rargs, values): + """_process_args(largs : [string], + rargs : [string], + values : Values) + + Process command-line arguments and populate 'values', consuming + options and arguments from 'rargs'. If 'allow_interspersed_args' is + false, stop at the first non-option argument. If true, accumulate any + interspersed non-option arguments in 'largs'. + """ + while rargs: + arg = rargs[0] + # We handle bare "--" explicitly, and bare "-" is handled by the + # standard arg handler since the short arg case ensures that the + # len of the opt string is greater than 1. + if arg == "--": + del rargs[0] + return + elif arg[0:2] == "--": + # process a single long option (possibly with value(s)) + self._process_long_opt(rargs, values) + elif arg[:1] == "-" and len(arg) > 1: + # process a cluster of short options (possibly with + # value(s) for the last one only) + self._process_short_opts(rargs, values) + elif self.allow_interspersed_args: + largs.append(arg) + del rargs[0] + else: + return # stop now, leave this arg in rargs + + # Say this is the original argument list: + # [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)] + # ^ + # (we are about to process arg(i)). + # + # Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of + # [arg0, ..., arg(i-1)] (any options and their arguments will have + # been removed from largs). + # + # The while loop will usually consume 1 or more arguments per pass. + # If it consumes 1 (eg. arg is an option that takes no arguments), + # then after _process_arg() is done the situation is: + # + # largs = subset of [arg0, ..., arg(i)] + # rargs = [arg(i+1), ..., arg(N-1)] + # + # If allow_interspersed_args is false, largs will always be + # *empty* -- still a subset of [arg0, ..., arg(i-1)], but + # not a very interesting subset! + + def _match_long_opt(self, opt): + """_match_long_opt(opt : string) -> string + + Determine which long option string 'opt' matches, ie. which one + it is an unambiguous abbrevation for. Raises BadOptionError if + 'opt' doesn't unambiguously match any long option string. + """ + return _match_abbrev(opt, self._long_opt) + + def _process_long_opt(self, rargs, values): + arg = rargs.pop(0) + + # Value explicitly attached to arg? Pretend it's the next + # argument. + if "=" in arg: + (opt, next_arg) = arg.split("=", 1) + rargs.insert(0, next_arg) + had_explicit_value = True + else: + opt = arg + had_explicit_value = False + + opt = self._match_long_opt(opt) + option = self._long_opt[opt] + if option.takes_value(): + nargs = option.nargs + if len(rargs) < nargs: + if nargs == 1: + self.error(_("%s option requires an argument") % opt) + else: + self.error(_("%s option requires %d arguments") + % (opt, nargs)) + elif nargs == 1: + value = rargs.pop(0) + else: + value = tuple(rargs[0:nargs]) + del rargs[0:nargs] + + elif had_explicit_value: + self.error(_("%s option does not take a value") % opt) + + else: + value = None + + option.process(opt, value, values, self) + + def _process_short_opts(self, rargs, values): + arg = rargs.pop(0) + stop = False + i = 1 + for ch in arg[1:]: + opt = "-" + ch + option = self._short_opt.get(opt) + i += 1 # we have consumed a character + + if not option: + self.error(_("no such option: %s") % opt) + if option.takes_value(): + # Any characters left in arg? Pretend they're the + # next arg, and stop consuming characters of arg. + if i < len(arg): + rargs.insert(0, arg[i:]) + stop = True + + nargs = option.nargs + if len(rargs) < nargs: + if nargs == 1: + self.error(_("%s option requires an argument") % opt) + else: + self.error(_("%s option requires %d arguments") + % (opt, nargs)) + elif nargs == 1: + value = rargs.pop(0) + else: + value = tuple(rargs[0:nargs]) + del rargs[0:nargs] + + else: # option doesn't take a value + value = None + + option.process(opt, value, values, self) + + if stop: + break + + + # -- Feedback methods ---------------------------------------------- + + def get_prog_name(self): + if self.prog is None: + return os.path.basename(sys.argv[0]) + else: + return self.prog + + def expand_prog_name(self, s): + return s.replace("%prog", self.get_prog_name()) + + def get_description(self): + return self.expand_prog_name(self.description) + + def exit(self, status=0, msg=None): + if msg: + sys.stderr.write(msg) + sys.exit(status) + + def error(self, msg): + """error(msg : string) + + Print a usage message incorporating 'msg' to stderr and exit. + If you override this in a subclass, it should not return -- it + should either exit or raise an exception. + """ + self.print_usage(sys.stderr) + self.exit(2, "%s: error: %s\n" % (self.get_prog_name(), msg)) + + def get_usage(self): + if self.usage: + return self.formatter.format_usage( + self.expand_prog_name(self.usage)) + else: + return "" + + def print_usage(self, file=None): + """print_usage(file : file = stdout) + + Print the usage message for the current program (self.usage) to + 'file' (default stdout). Any occurence of the string "%prog" in + self.usage is replaced with the name of the current program + (basename of sys.argv[0]). Does nothing if self.usage is empty + or not defined. + """ + if self.usage: + print >>file, self.get_usage() + + def get_version(self): + if self.version: + return self.expand_prog_name(self.version) + else: + return "" + + def print_version(self, file=None): + """print_version(file : file = stdout) + + Print the version message for this program (self.version) to + 'file' (default stdout). As with print_usage(), any occurence + of "%prog" in self.version is replaced by the current program's + name. Does nothing if self.version is empty or undefined. + """ + if self.version: + print >>file, self.get_version() + + def format_option_help(self, formatter=None): + if formatter is None: + formatter = self.formatter + formatter.store_option_strings(self) + result = [] + result.append(formatter.format_heading(_("options"))) + formatter.indent() + if self.option_list: + result.append(OptionContainer.format_option_help(self, formatter)) + result.append("\n") + for group in self.option_groups: + result.append(group.format_help(formatter)) + result.append("\n") + formatter.dedent() + # Drop the last "\n", or the header if no options or option groups: + return "".join(result[:-1]) + + def format_help(self, formatter=None): + if formatter is None: + formatter = self.formatter + result = [] + if self.usage: + result.append(self.get_usage() + "\n") + if self.description: + result.append(self.format_description(formatter) + "\n") + result.append(self.format_option_help(formatter)) + return "".join(result) + + def print_help(self, file=None): + """print_help(file : file = stdout) + + Print an extended help message, listing all options and any + help text provided with them, to 'file' (default stdout). + """ + if file is None: + file = sys.stdout + file.write(self.format_help()) + +# class OptionParser + + +def _match_abbrev(s, wordmap): + """_match_abbrev(s : string, wordmap : {string : Option}) -> string + + Return the string key in 'wordmap' for which 's' is an unambiguous + abbreviation. If 's' is found to be ambiguous or doesn't match any of + 'words', raise BadOptionError. + """ + # Is there an exact match? + if wordmap.has_key(s): + return s + else: + # Isolate all words with s as a prefix. + possibilities = [word for word in wordmap.keys() + if word.startswith(s)] + # No exact match, so there had better be just one possibility. + if len(possibilities) == 1: + return possibilities[0] + elif not possibilities: + raise BadOptionError(_("no such option: %s") % s) + else: + # More than one possible completion: ambiguous prefix. + raise BadOptionError(_("ambiguous option: %s (%s?)") + % (s, ", ".join(possibilities))) + + +# Some day, there might be many Option classes. As of Optik 1.3, the +# preferred way to instantiate Options is indirectly, via make_option(), +# which will become a factory function when there are many Option +# classes. +make_option = Option Added: py/branch/dist-doctest/py/compat/testing/test_doctest.py ============================================================================== --- (empty file) +++ py/branch/dist-doctest/py/compat/testing/test_doctest.py Tue Jun 14 11:14:48 2005 @@ -0,0 +1,2377 @@ +""" +Test script for doctest. +""" + +import py +doctest = py.compat.doctest + +import sys +sys.modules['doctest'] = py.compat.doctest + +from test import test_support +import warnings + +###################################################################### +## Sample Objects (used by test cases) +###################################################################### + +def sample_func(v): + """ + Blah blah + + >>> print sample_func(22) + 44 + + Yee ha! + """ + return v+v + +class SampleClass: + """ + >>> print 1 + 1 + + >>> # comments get ignored. so are empty PS1 and PS2 prompts: + >>> + ... + + Multiline example: + >>> sc = SampleClass(3) + >>> for i in range(10): + ... sc = sc.double() + ... print sc.get(), + 6 12 24 48 96 192 384 768 1536 3072 + """ + def __init__(self, val): + """ + >>> print SampleClass(12).get() + 12 + """ + self.val = val + + def double(self): + """ + >>> print SampleClass(12).double().get() + 24 + """ + return SampleClass(self.val + self.val) + + def get(self): + """ + >>> print SampleClass(-5).get() + -5 + """ + return self.val + + def a_staticmethod(v): + """ + >>> print SampleClass.a_staticmethod(10) + 11 + """ + return v+1 + a_staticmethod = staticmethod(a_staticmethod) + + def a_classmethod(cls, v): + """ + >>> print SampleClass.a_classmethod(10) + 12 + >>> print SampleClass(0).a_classmethod(10) + 12 + """ + return v+2 + a_classmethod = classmethod(a_classmethod) + + a_property = property(get, doc=""" + >>> print SampleClass(22).a_property + 22 + """) + + class NestedClass: + """ + >>> x = SampleClass.NestedClass(5) + >>> y = x.square() + >>> print y.get() + 25 + """ + def __init__(self, val=0): + """ + >>> print SampleClass.NestedClass().get() + 0 + """ + self.val = val + def square(self): + return SampleClass.NestedClass(self.val*self.val) + def get(self): + return self.val + +class SampleNewStyleClass(object): + r""" + >>> print '1\n2\n3' + 1 + 2 + 3 + """ + def __init__(self, val): + """ + >>> print SampleNewStyleClass(12).get() + 12 + """ + self.val = val + + def double(self): + """ + >>> print SampleNewStyleClass(12).double().get() + 24 + """ + return SampleNewStyleClass(self.val + self.val) + + def get(self): + """ + >>> print SampleNewStyleClass(-5).get() + -5 + """ + return self.val + +###################################################################### +## Fake stdin (for testing interactive debugging) +###################################################################### + +class _FakeInput: + """ + A fake input stream for pdb's interactive debugger. Whenever a + line is read, print it (to simulate the user typing it), and then + return it. The set of lines to return is specified in the + constructor; they should not have trailing newlines. + """ + def __init__(self, lines): + self.lines = lines + + def readline(self): + line = self.lines.pop(0) + print line + return line+'\n' + +###################################################################### +## Test Cases +###################################################################### + +def test_Example(): r""" +Unit tests for the `Example` class. + +Example is a simple container class that holds: + - `source`: A source string. + - `want`: An expected output string. + - `exc_msg`: An expected exception message string (or None if no + exception is expected). + - `lineno`: A line number (within the docstring). + - `indent`: The example's indentation in the input string. + - `options`: An option dictionary, mapping option flags to True or + False. + +These attributes are set by the constructor. `source` and `want` are +required; the other attributes all have default values: + + >>> example = doctest.Example('print 1', '1\n') + >>> (example.source, example.want, example.exc_msg, + ... example.lineno, example.indent, example.options) + ('print 1\n', '1\n', None, 0, 0, {}) + +The first three attributes (`source`, `want`, and `exc_msg`) may be +specified positionally; the remaining arguments should be specified as +keyword arguments: + + >>> exc_msg = 'IndexError: pop from an empty list' + >>> example = doctest.Example('[].pop()', '', exc_msg, + ... lineno=5, indent=4, + ... options={doctest.ELLIPSIS: True}) + >>> (example.source, example.want, example.exc_msg, + ... example.lineno, example.indent, example.options) + ('[].pop()\n', '', 'IndexError: pop from an empty list\n', 5, 4, {8: True}) + +The constructor normalizes the `source` string to end in a newline: + + Source spans a single line: no terminating newline. + >>> e = doctest.Example('print 1', '1\n') + >>> e.source, e.want + ('print 1\n', '1\n') + + >>> e = doctest.Example('print 1\n', '1\n') + >>> e.source, e.want + ('print 1\n', '1\n') + + Source spans multiple lines: require terminating newline. + >>> e = doctest.Example('print 1;\nprint 2\n', '1\n2\n') + >>> e.source, e.want + ('print 1;\nprint 2\n', '1\n2\n') + + >>> e = doctest.Example('print 1;\nprint 2', '1\n2\n') + >>> e.source, e.want + ('print 1;\nprint 2\n', '1\n2\n') + + Empty source string (which should never appear in real examples) + >>> e = doctest.Example('', '') + >>> e.source, e.want + ('\n', '') + +The constructor normalizes the `want` string to end in a newline, +unless it's the empty string: + + >>> e = doctest.Example('print 1', '1\n') + >>> e.source, e.want + ('print 1\n', '1\n') + + >>> e = doctest.Example('print 1', '1') + >>> e.source, e.want + ('print 1\n', '1\n') + + >>> e = doctest.Example('print', '') + >>> e.source, e.want + ('print\n', '') + +The constructor normalizes the `exc_msg` string to end in a newline, +unless it's `None`: + + Message spans one line + >>> exc_msg = 'IndexError: pop from an empty list' + >>> e = doctest.Example('[].pop()', '', exc_msg) + >>> e.exc_msg + 'IndexError: pop from an empty list\n' + + >>> exc_msg = 'IndexError: pop from an empty list\n' + >>> e = doctest.Example('[].pop()', '', exc_msg) + >>> e.exc_msg + 'IndexError: pop from an empty list\n' + + Message spans multiple lines + >>> exc_msg = 'ValueError: 1\n 2' + >>> e = doctest.Example('raise ValueError("1\n 2")', '', exc_msg) + >>> e.exc_msg + 'ValueError: 1\n 2\n' + + >>> exc_msg = 'ValueError: 1\n 2\n' + >>> e = doctest.Example('raise ValueError("1\n 2")', '', exc_msg) + >>> e.exc_msg + 'ValueError: 1\n 2\n' + + Empty (but non-None) exception message (which should never appear + in real examples) + >>> exc_msg = '' + >>> e = doctest.Example('raise X()', '', exc_msg) + >>> e.exc_msg + '\n' +""" + +def test_DocTest(): r""" +Unit tests for the `DocTest` class. + +DocTest is a collection of examples, extracted from a docstring, along +with information about where the docstring comes from (a name, +filename, and line number). The docstring is parsed by the `DocTest` +constructor: + + >>> docstring = ''' + ... >>> print 12 + ... 12 + ... + ... Non-example text. + ... + ... >>> print 'another\example' + ... another + ... example + ... ''' + >>> globs = {} # globals to run the test in. + >>> parser = doctest.DocTestParser() + >>> test = parser.get_doctest(docstring, globs, 'some_test', + ... 'some_file', 20) + >>> print test + + >>> len(test.examples) + 2 + >>> e1, e2 = test.examples + >>> (e1.source, e1.want, e1.lineno) + ('print 12\n', '12\n', 1) + >>> (e2.source, e2.want, e2.lineno) + ("print 'another\\example'\n", 'another\nexample\n', 6) + +Source information (name, filename, and line number) is available as +attributes on the doctest object: + + >>> (test.name, test.filename, test.lineno) + ('some_test', 'some_file', 20) + +The line number of an example within its containing file is found by +adding the line number of the example and the line number of its +containing test: + + >>> test.lineno + e1.lineno + 21 + >>> test.lineno + e2.lineno + 26 + +If the docstring contains inconsistant leading whitespace in the +expected output of an example, then `DocTest` will raise a ValueError: + + >>> docstring = r''' + ... >>> print 'bad\nindentation' + ... bad + ... indentation + ... ''' + >>> parser.get_doctest(docstring, globs, 'some_test', 'filename', 0) + Traceback (most recent call last): + ValueError: line 4 of the docstring for some_test has inconsistent leading whitespace: 'indentation' + +If the docstring contains inconsistent leading whitespace on +continuation lines, then `DocTest` will raise a ValueError: + + >>> docstring = r''' + ... >>> print ('bad indentation', + ... ... 2) + ... ('bad', 'indentation') + ... ''' + >>> parser.get_doctest(docstring, globs, 'some_test', 'filename', 0) + Traceback (most recent call last): + ValueError: line 2 of the docstring for some_test has inconsistent leading whitespace: '... 2)' + +If there's no blank space after a PS1 prompt ('>>>'), then `DocTest` +will raise a ValueError: + + >>> docstring = '>>>print 1\n1' + >>> parser.get_doctest(docstring, globs, 'some_test', 'filename', 0) + Traceback (most recent call last): + ValueError: line 1 of the docstring for some_test lacks blank after >>>: '>>>print 1' + +If there's no blank space after a PS2 prompt ('...'), then `DocTest` +will raise a ValueError: + + >>> docstring = '>>> if 1:\n...print 1\n1' + >>> parser.get_doctest(docstring, globs, 'some_test', 'filename', 0) + Traceback (most recent call last): + ValueError: line 2 of the docstring for some_test lacks blank after ...: '...print 1' + +""" + +def test_DocTestFinder(): r""" +Unit tests for the `DocTestFinder` class. + +DocTestFinder is used to extract DocTests from an object's docstring +and the docstrings of its contained objects. It can be used with +modules, functions, classes, methods, staticmethods, classmethods, and +properties. + +Finding Tests in Functions +~~~~~~~~~~~~~~~~~~~~~~~~~~ +For a function whose docstring contains examples, DocTestFinder.find() +will return a single test (for that function's docstring): + + >>> finder = doctest.DocTestFinder() + +We'll simulate a __file__ attr that ends in pyc: + + >>> import test.test_doctest + >>> old = test.test_doctest.__file__ + >>> test.test_doctest.__file__ = 'test_doctest.pyc' + + >>> tests = finder.find(sample_func) + + >>> print tests # doctest: +ELLIPSIS + [] + +The exact name depends on how test_doctest was invoked, so allow for +leading path components. + + >>> tests[0].filename # doctest: +ELLIPSIS + '...test_doctest.py' + + >>> test.test_doctest.__file__ = old + + + >>> e = tests[0].examples[0] + >>> (e.source, e.want, e.lineno) + ('print sample_func(22)\n', '44\n', 3) + +By default, tests are created for objects with no docstring: + + >>> def no_docstring(v): + ... pass + >>> finder.find(no_docstring) + [] + +However, the optional argument `exclude_empty` to the DocTestFinder +constructor can be used to exclude tests for objects with empty +docstrings: + + >>> def no_docstring(v): + ... pass + >>> excl_empty_finder = doctest.DocTestFinder(exclude_empty=True) + >>> excl_empty_finder.find(no_docstring) + [] + +If the function has a docstring with no examples, then a test with no +examples is returned. (This lets `DocTestRunner` collect statistics +about which functions have no tests -- but is that useful? And should +an empty test also be created when there's no docstring?) + + >>> def no_examples(v): + ... ''' no doctest examples ''' + >>> finder.find(no_examples) # doctest: +ELLIPSIS + [] + +Finding Tests in Classes +~~~~~~~~~~~~~~~~~~~~~~~~ +For a class, DocTestFinder will create a test for the class's +docstring, and will recursively explore its contents, including +methods, classmethods, staticmethods, properties, and nested classes. + + >>> finder = doctest.DocTestFinder() + >>> tests = finder.find(SampleClass) + >>> tests.sort() + >>> for t in tests: + ... print '%2s %s' % (len(t.examples), t.name) + 3 SampleClass + 3 SampleClass.NestedClass + 1 SampleClass.NestedClass.__init__ + 1 SampleClass.__init__ + 2 SampleClass.a_classmethod + 1 SampleClass.a_property + 1 SampleClass.a_staticmethod + 1 SampleClass.double + 1 SampleClass.get + +New-style classes are also supported: + + >>> tests = finder.find(SampleNewStyleClass) + >>> tests.sort() + >>> for t in tests: + ... print '%2s %s' % (len(t.examples), t.name) + 1 SampleNewStyleClass + 1 SampleNewStyleClass.__init__ + 1 SampleNewStyleClass.double + 1 SampleNewStyleClass.get + +Finding Tests in Modules +~~~~~~~~~~~~~~~~~~~~~~~~ +For a module, DocTestFinder will create a test for the class's +docstring, and will recursively explore its contents, including +functions, classes, and the `__test__` dictionary, if it exists: + + >>> # A module + >>> import new + >>> m = new.module('some_module') + >>> def triple(val): + ... ''' + ... >>> print triple(11) + ... 33 + ... ''' + ... return val*3 + >>> m.__dict__.update({ + ... 'sample_func': sample_func, + ... 'SampleClass': SampleClass, + ... '__doc__': ''' + ... Module docstring. + ... >>> print 'module' + ... module + ... ''', + ... '__test__': { + ... 'd': '>>> print 6\n6\n>>> print 7\n7\n', + ... 'c': triple}}) + + >>> finder = doctest.DocTestFinder() + >>> # Use module=test.test_doctest, to prevent doctest from + >>> # ignoring the objects since they weren't defined in m. + >>> import test.test_doctest + >>> tests = finder.find(m, module=test.test_doctest) + >>> tests.sort() + >>> for t in tests: + ... print '%2s %s' % (len(t.examples), t.name) + 1 some_module + 3 some_module.SampleClass + 3 some_module.SampleClass.NestedClass + 1 some_module.SampleClass.NestedClass.__init__ + 1 some_module.SampleClass.__init__ + 2 some_module.SampleClass.a_classmethod + 1 some_module.SampleClass.a_property + 1 some_module.SampleClass.a_staticmethod + 1 some_module.SampleClass.double + 1 some_module.SampleClass.get + 1 some_module.__test__.c + 2 some_module.__test__.d + 1 some_module.sample_func + +Duplicate Removal +~~~~~~~~~~~~~~~~~ +If a single object is listed twice (under different names), then tests +will only be generated for it once: + + >>> from test import doctest_aliases + >>> tests = excl_empty_finder.find(doctest_aliases) + >>> tests.sort() + >>> print len(tests) + 2 + >>> print tests[0].name + test.doctest_aliases.TwoNames + + TwoNames.f and TwoNames.g are bound to the same object. + We can't guess which will be found in doctest's traversal of + TwoNames.__dict__ first, so we have to allow for either. + + >>> tests[1].name.split('.')[-1] in ['f', 'g'] + True + +Filter Functions +~~~~~~~~~~~~~~~~ +A filter function can be used to restrict which objects get examined, +but this is temporary, undocumented internal support for testmod's +deprecated isprivate gimmick. + + >>> def namefilter(prefix, base): + ... return base.startswith('a_') + >>> tests = doctest.DocTestFinder(_namefilter=namefilter).find(SampleClass) + >>> tests.sort() + >>> for t in tests: + ... print '%2s %s' % (len(t.examples), t.name) + 3 SampleClass + 3 SampleClass.NestedClass + 1 SampleClass.NestedClass.__init__ + 1 SampleClass.__init__ + 1 SampleClass.double + 1 SampleClass.get + +By default, that excluded objects with no doctests. exclude_empty=False +tells it to include (empty) tests for objects with no doctests. This feature +is really to support backward compatibility in what doctest.master.summarize() +displays. + + >>> tests = doctest.DocTestFinder(_namefilter=namefilter, + ... exclude_empty=False).find(SampleClass) + >>> tests.sort() + >>> for t in tests: + ... print '%2s %s' % (len(t.examples), t.name) + 3 SampleClass + 3 SampleClass.NestedClass + 1 SampleClass.NestedClass.__init__ + 0 SampleClass.NestedClass.get + 0 SampleClass.NestedClass.square + 1 SampleClass.__init__ + 1 SampleClass.double + 1 SampleClass.get + +If a given object is filtered out, then none of the objects that it +contains will be added either: + + >>> def namefilter(prefix, base): + ... return base == 'NestedClass' + >>> tests = doctest.DocTestFinder(_namefilter=namefilter).find(SampleClass) + >>> tests.sort() + >>> for t in tests: + ... print '%2s %s' % (len(t.examples), t.name) + 3 SampleClass + 1 SampleClass.__init__ + 2 SampleClass.a_classmethod + 1 SampleClass.a_property + 1 SampleClass.a_staticmethod + 1 SampleClass.double + 1 SampleClass.get + +The filter function apply to contained objects, and *not* to the +object explicitly passed to DocTestFinder: + + >>> def namefilter(prefix, base): + ... return base == 'SampleClass' + >>> tests = doctest.DocTestFinder(_namefilter=namefilter).find(SampleClass) + >>> len(tests) + 9 + +Turning off Recursion +~~~~~~~~~~~~~~~~~~~~~ +DocTestFinder can be told not to look for tests in contained objects +using the `recurse` flag: + + >>> tests = doctest.DocTestFinder(recurse=False).find(SampleClass) + >>> tests.sort() + >>> for t in tests: + ... print '%2s %s' % (len(t.examples), t.name) + 3 SampleClass + +Line numbers +~~~~~~~~~~~~ +DocTestFinder finds the line number of each example: + + >>> def f(x): + ... ''' + ... >>> x = 12 + ... + ... some text + ... + ... >>> # examples are not created for comments & bare prompts. + ... >>> + ... ... + ... + ... >>> for x in range(10): + ... ... print x, + ... 0 1 2 3 4 5 6 7 8 9 + ... >>> x/2 + ... 6 + ... ''' + >>> test = doctest.DocTestFinder().find(f)[0] + >>> [e.lineno for e in test.examples] + [1, 9, 12] +""" + +def test_DocTestParser(): r""" +Unit tests for the `DocTestParser` class. + +DocTestParser is used to parse docstrings containing doctest examples. + +The `parse` method divides a docstring into examples and intervening +text: + + >>> s = ''' + ... >>> x, y = 2, 3 # no output expected + ... >>> if 1: + ... ... print x + ... ... print y + ... 2 + ... 3 + ... + ... Some text. + ... >>> x+y + ... 5 + ... ''' + >>> parser = doctest.DocTestParser() + >>> for piece in parser.parse(s): + ... if isinstance(piece, doctest.Example): + ... print 'Example:', (piece.source, piece.want, piece.lineno) + ... else: + ... print ' Text:', `piece` + Text: '\n' + Example: ('x, y = 2, 3 # no output expected\n', '', 1) + Text: '' + Example: ('if 1:\n print x\n print y\n', '2\n3\n', 2) + Text: '\nSome text.\n' + Example: ('x+y\n', '5\n', 9) + Text: '' + +The `get_examples` method returns just the examples: + + >>> for piece in parser.get_examples(s): + ... print (piece.source, piece.want, piece.lineno) + ('x, y = 2, 3 # no output expected\n', '', 1) + ('if 1:\n print x\n print y\n', '2\n3\n', 2) + ('x+y\n', '5\n', 9) + +The `get_doctest` method creates a Test from the examples, along with the +given arguments: + + >>> test = parser.get_doctest(s, {}, 'name', 'filename', lineno=5) + >>> (test.name, test.filename, test.lineno) + ('name', 'filename', 5) + >>> for piece in test.examples: + ... print (piece.source, piece.want, piece.lineno) + ('x, y = 2, 3 # no output expected\n', '', 1) + ('if 1:\n print x\n print y\n', '2\n3\n', 2) + ('x+y\n', '5\n', 9) +""" + +class test_DocTestRunner: + def basics(): r""" +Unit tests for the `DocTestRunner` class. + +DocTestRunner is used to run DocTest test cases, and to accumulate +statistics. Here's a simple DocTest case we can use: + + >>> def f(x): + ... ''' + ... >>> x = 12 + ... >>> print x + ... 12 + ... >>> x/2 + ... 6 + ... ''' + >>> test = doctest.DocTestFinder().find(f)[0] + +The main DocTestRunner interface is the `run` method, which runs a +given DocTest case in a given namespace (globs). It returns a tuple +`(f,t)`, where `f` is the number of failed tests and `t` is the number +of tried tests. + + >>> doctest.DocTestRunner(verbose=False).run(test) + (0, 3) + +If any example produces incorrect output, then the test runner reports +the failure and proceeds to the next example: + + >>> def f(x): + ... ''' + ... >>> x = 12 + ... >>> print x + ... 14 + ... >>> x/2 + ... 6 + ... ''' + >>> test = doctest.DocTestFinder().find(f)[0] + >>> doctest.DocTestRunner(verbose=True).run(test) + ... # doctest: +ELLIPSIS + Trying: + x = 12 + Expecting nothing + ok + Trying: + print x + Expecting: + 14 + ********************************************************************** + File ..., line 4, in f + Failed example: + print x + Expected: + 14 + Got: + 12 + Trying: + x/2 + Expecting: + 6 + ok + (1, 3) +""" + def verbose_flag(): r""" +The `verbose` flag makes the test runner generate more detailed +output: + + >>> def f(x): + ... ''' + ... >>> x = 12 + ... >>> print x + ... 12 + ... >>> x/2 + ... 6 + ... ''' + >>> test = doctest.DocTestFinder().find(f)[0] + + >>> doctest.DocTestRunner(verbose=True).run(test) + Trying: + x = 12 + Expecting nothing + ok + Trying: + print x + Expecting: + 12 + ok + Trying: + x/2 + Expecting: + 6 + ok + (0, 3) + +If the `verbose` flag is unspecified, then the output will be verbose +iff `-v` appears in sys.argv: + + >>> # Save the real sys.argv list. + >>> old_argv = sys.argv + + >>> # If -v does not appear in sys.argv, then output isn't verbose. + >>> sys.argv = ['test'] + >>> doctest.DocTestRunner().run(test) + (0, 3) + + >>> # If -v does appear in sys.argv, then output is verbose. + >>> sys.argv = ['test', '-v'] + >>> doctest.DocTestRunner().run(test) + Trying: + x = 12 + Expecting nothing + ok + Trying: + print x + Expecting: + 12 + ok + Trying: + x/2 + Expecting: + 6 + ok + (0, 3) + + >>> # Restore sys.argv + >>> sys.argv = old_argv + +In the remaining examples, the test runner's verbosity will be +explicitly set, to ensure that the test behavior is consistent. + """ + def exceptions(): r""" +Tests of `DocTestRunner`'s exception handling. + +An expected exception is specified with a traceback message. The +lines between the first line and the type/value may be omitted or +replaced with any other string: + + >>> def f(x): + ... ''' + ... >>> x = 12 + ... >>> print x/0 + ... Traceback (most recent call last): + ... ZeroDivisionError: integer division or modulo by zero + ... ''' + >>> test = doctest.DocTestFinder().find(f)[0] + >>> doctest.DocTestRunner(verbose=False).run(test) + (0, 2) + +An example may not generate output before it raises an exception; if +it does, then the traceback message will not be recognized as +signaling an expected exception, so the example will be reported as an +unexpected exception: + + >>> def f(x): + ... ''' + ... >>> x = 12 + ... >>> print 'pre-exception output', x/0 + ... pre-exception output + ... Traceback (most recent call last): + ... ZeroDivisionError: integer division or modulo by zero + ... ''' + >>> test = doctest.DocTestFinder().find(f)[0] + >>> doctest.DocTestRunner(verbose=False).run(test) + ... # doctest: +ELLIPSIS + ********************************************************************** + File ..., line 4, in f + Failed example: + print 'pre-exception output', x/0 + Exception raised: + ... + ZeroDivisionError: integer division or modulo by zero + (1, 2) + +Exception messages may contain newlines: + + >>> def f(x): + ... r''' + ... >>> raise ValueError, 'multi\nline\nmessage' + ... Traceback (most recent call last): + ... ValueError: multi + ... line + ... message + ... ''' + >>> test = doctest.DocTestFinder().find(f)[0] + >>> doctest.DocTestRunner(verbose=False).run(test) + (0, 1) + +If an exception is expected, but an exception with the wrong type or +message is raised, then it is reported as a failure: + + >>> def f(x): + ... r''' + ... >>> raise ValueError, 'message' + ... Traceback (most recent call last): + ... ValueError: wrong message + ... ''' + >>> test = doctest.DocTestFinder().find(f)[0] + >>> doctest.DocTestRunner(verbose=False).run(test) + ... # doctest: +ELLIPSIS + ********************************************************************** + File ..., line 3, in f + Failed example: + raise ValueError, 'message' + Expected: + Traceback (most recent call last): + ValueError: wrong message + Got: + Traceback (most recent call last): + ... + ValueError: message + (1, 1) + +However, IGNORE_EXCEPTION_DETAIL can be used to allow a mismatch in the +detail: + + >>> def f(x): + ... r''' + ... >>> raise ValueError, 'message' #doctest: +IGNORE_EXCEPTION_DETAIL + ... Traceback (most recent call last): + ... ValueError: wrong message + ... ''' + >>> test = doctest.DocTestFinder().find(f)[0] + >>> doctest.DocTestRunner(verbose=False).run(test) + (0, 1) + +But IGNORE_EXCEPTION_DETAIL does not allow a mismatch in the exception type: + + >>> def f(x): + ... r''' + ... >>> raise ValueError, 'message' #doctest: +IGNORE_EXCEPTION_DETAIL + ... Traceback (most recent call last): + ... TypeError: wrong type + ... ''' + >>> test = doctest.DocTestFinder().find(f)[0] + >>> doctest.DocTestRunner(verbose=False).run(test) + ... # doctest: +ELLIPSIS + ********************************************************************** + File ..., line 3, in f + Failed example: + raise ValueError, 'message' #doctest: +IGNORE_EXCEPTION_DETAIL + Expected: + Traceback (most recent call last): + TypeError: wrong type + Got: + Traceback (most recent call last): + ... + ValueError: message + (1, 1) + +If an exception is raised but not expected, then it is reported as an +unexpected exception: + + >>> def f(x): + ... r''' + ... >>> 1/0 + ... 0 + ... ''' + >>> test = doctest.DocTestFinder().find(f)[0] + >>> doctest.DocTestRunner(verbose=False).run(test) + ... # doctest: +ELLIPSIS + ********************************************************************** + File ..., line 3, in f + Failed example: + 1/0 + Exception raised: + Traceback (most recent call last): + ... + ZeroDivisionError: integer division or modulo by zero + (1, 1) +""" + def optionflags(): r""" +Tests of `DocTestRunner`'s option flag handling. + +Several option flags can be used to customize the behavior of the test +runner. These are defined as module constants in doctest, and passed +to the DocTestRunner constructor (multiple constants should be or-ed +together). + +The DONT_ACCEPT_TRUE_FOR_1 flag disables matches between True/False +and 1/0: + + >>> def f(x): + ... '>>> True\n1\n' + + >>> # Without the flag: + >>> test = doctest.DocTestFinder().find(f)[0] + >>> doctest.DocTestRunner(verbose=False).run(test) + (0, 1) + + >>> # With the flag: + >>> test = doctest.DocTestFinder().find(f)[0] + >>> flags = doctest.DONT_ACCEPT_TRUE_FOR_1 + >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test) + ... # doctest: +ELLIPSIS + ********************************************************************** + File ..., line 2, in f + Failed example: + True + Expected: + 1 + Got: + True + (1, 1) + +The DONT_ACCEPT_BLANKLINE flag disables the match between blank lines +and the '' marker: + + >>> def f(x): + ... '>>> print "a\\n\\nb"\na\n\nb\n' + + >>> # Without the flag: + >>> test = doctest.DocTestFinder().find(f)[0] + >>> doctest.DocTestRunner(verbose=False).run(test) + (0, 1) + + >>> # With the flag: + >>> test = doctest.DocTestFinder().find(f)[0] + >>> flags = doctest.DONT_ACCEPT_BLANKLINE + >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test) + ... # doctest: +ELLIPSIS + ********************************************************************** + File ..., line 2, in f + Failed example: + print "a\n\nb" + Expected: + a + + b + Got: + a + + b + (1, 1) + +The NORMALIZE_WHITESPACE flag causes all sequences of whitespace to be +treated as equal: + + >>> def f(x): + ... '>>> print 1, 2, 3\n 1 2\n 3' + + >>> # Without the flag: + >>> test = doctest.DocTestFinder().find(f)[0] + >>> doctest.DocTestRunner(verbose=False).run(test) + ... # doctest: +ELLIPSIS + ********************************************************************** + File ..., line 2, in f + Failed example: + print 1, 2, 3 + Expected: + 1 2 + 3 + Got: + 1 2 3 + (1, 1) + + >>> # With the flag: + >>> test = doctest.DocTestFinder().find(f)[0] + >>> flags = doctest.NORMALIZE_WHITESPACE + >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test) + (0, 1) + + An example from the docs: + >>> print range(20) #doctest: +NORMALIZE_WHITESPACE + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] + +The ELLIPSIS flag causes ellipsis marker ("...") in the expected +output to match any substring in the actual output: + + >>> def f(x): + ... '>>> print range(15)\n[0, 1, 2, ..., 14]\n' + + >>> # Without the flag: + >>> test = doctest.DocTestFinder().find(f)[0] + >>> doctest.DocTestRunner(verbose=False).run(test) + ... # doctest: +ELLIPSIS + ********************************************************************** + File ..., line 2, in f + Failed example: + print range(15) + Expected: + [0, 1, 2, ..., 14] + Got: + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14] + (1, 1) + + >>> # With the flag: + >>> test = doctest.DocTestFinder().find(f)[0] + >>> flags = doctest.ELLIPSIS + >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test) + (0, 1) + + ... also matches nothing: + + >>> for i in range(100): + ... print i**2, #doctest: +ELLIPSIS + 0 1...4...9 16 ... 36 49 64 ... 9801 + + ... can be surprising; e.g., this test passes: + + >>> for i in range(21): #doctest: +ELLIPSIS + ... print i, + 0 1 2 ...1...2...0 + + Examples from the docs: + + >>> print range(20) # doctest:+ELLIPSIS + [0, 1, ..., 18, 19] + + >>> print range(20) # doctest: +ELLIPSIS + ... # doctest: +NORMALIZE_WHITESPACE + [0, 1, ..., 18, 19] + +The REPORT_UDIFF flag causes failures that involve multi-line expected +and actual outputs to be displayed using a unified diff: + + >>> def f(x): + ... r''' + ... >>> print '\n'.join('abcdefg') + ... a + ... B + ... c + ... d + ... f + ... g + ... h + ... ''' + + >>> # Without the flag: + >>> test = doctest.DocTestFinder().find(f)[0] + >>> doctest.DocTestRunner(verbose=False).run(test) + ... # doctest: +ELLIPSIS + ********************************************************************** + File ..., line 3, in f + Failed example: + print '\n'.join('abcdefg') + Expected: + a + B + c + d + f + g + h + Got: + a + b + c + d + e + f + g + (1, 1) + + >>> # With the flag: + >>> test = doctest.DocTestFinder().find(f)[0] + >>> flags = doctest.REPORT_UDIFF + >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test) + ... # doctest: +ELLIPSIS + ********************************************************************** + File ..., line 3, in f + Failed example: + print '\n'.join('abcdefg') + Differences (unified diff with -expected +actual): + @@ -1,7 +1,7 @@ + a + -B + +b + c + d + +e + f + g + -h + (1, 1) + +The REPORT_CDIFF flag causes failures that involve multi-line expected +and actual outputs to be displayed using a context diff: + + >>> # Reuse f() from the REPORT_UDIFF example, above. + >>> test = doctest.DocTestFinder().find(f)[0] + >>> flags = doctest.REPORT_CDIFF + >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test) + ... # doctest: +ELLIPSIS + ********************************************************************** + File ..., line 3, in f + Failed example: + print '\n'.join('abcdefg') + Differences (context diff with expected followed by actual): + *************** + *** 1,7 **** + a + ! B + c + d + f + g + - h + --- 1,7 ---- + a + ! b + c + d + + e + f + g + (1, 1) + + +The REPORT_NDIFF flag causes failures to use the difflib.Differ algorithm +used by the popular ndiff.py utility. This does intraline difference +marking, as well as interline differences. + + >>> def f(x): + ... r''' + ... >>> print "a b c d e f g h i j k l m" + ... a b c d e f g h i j k 1 m + ... ''' + >>> test = doctest.DocTestFinder().find(f)[0] + >>> flags = doctest.REPORT_NDIFF + >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test) + ... # doctest: +ELLIPSIS + ********************************************************************** + File ..., line 3, in f + Failed example: + print "a b c d e f g h i j k l m" + Differences (ndiff with -expected +actual): + - a b c d e f g h i j k 1 m + ? ^ + + a b c d e f g h i j k l m + ? + ++ ^ + (1, 1) + +The REPORT_ONLY_FIRST_FAILURE supresses result output after the first +failing example: + + >>> def f(x): + ... r''' + ... >>> print 1 # first success + ... 1 + ... >>> print 2 # first failure + ... 200 + ... >>> print 3 # second failure + ... 300 + ... >>> print 4 # second success + ... 4 + ... >>> print 5 # third failure + ... 500 + ... ''' + >>> test = doctest.DocTestFinder().find(f)[0] + >>> flags = doctest.REPORT_ONLY_FIRST_FAILURE + >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test) + ... # doctest: +ELLIPSIS + ********************************************************************** + File ..., line 5, in f + Failed example: + print 2 # first failure + Expected: + 200 + Got: + 2 + (3, 5) + +However, output from `report_start` is not supressed: + + >>> doctest.DocTestRunner(verbose=True, optionflags=flags).run(test) + ... # doctest: +ELLIPSIS + Trying: + print 1 # first success + Expecting: + 1 + ok + Trying: + print 2 # first failure + Expecting: + 200 + ********************************************************************** + File ..., line 5, in f + Failed example: + print 2 # first failure + Expected: + 200 + Got: + 2 + (3, 5) + +For the purposes of REPORT_ONLY_FIRST_FAILURE, unexpected exceptions +count as failures: + + >>> def f(x): + ... r''' + ... >>> print 1 # first success + ... 1 + ... >>> raise ValueError(2) # first failure + ... 200 + ... >>> print 3 # second failure + ... 300 + ... >>> print 4 # second success + ... 4 + ... >>> print 5 # third failure + ... 500 + ... ''' + >>> test = doctest.DocTestFinder().find(f)[0] + >>> flags = doctest.REPORT_ONLY_FIRST_FAILURE + >>> doctest.DocTestRunner(verbose=False, optionflags=flags).run(test) + ... # doctest: +ELLIPSIS + ********************************************************************** + File ..., line 5, in f + Failed example: + raise ValueError(2) # first failure + Exception raised: + ... + ValueError: 2 + (3, 5) + + """ + + def option_directives(): r""" +Tests of `DocTestRunner`'s option directive mechanism. + +Option directives can be used to turn option flags on or off for a +single example. To turn an option on for an example, follow that +example with a comment of the form ``# doctest: +OPTION``: + + >>> def f(x): r''' + ... >>> print range(10) # should fail: no ellipsis + ... [0, 1, ..., 9] + ... + ... >>> print range(10) # doctest: +ELLIPSIS + ... [0, 1, ..., 9] + ... ''' + >>> test = doctest.DocTestFinder().find(f)[0] + >>> doctest.DocTestRunner(verbose=False).run(test) + ... # doctest: +ELLIPSIS + ********************************************************************** + File ..., line 2, in f + Failed example: + print range(10) # should fail: no ellipsis + Expected: + [0, 1, ..., 9] + Got: + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + (1, 2) + +To turn an option off for an example, follow that example with a +comment of the form ``# doctest: -OPTION``: + + >>> def f(x): r''' + ... >>> print range(10) + ... [0, 1, ..., 9] + ... + ... >>> # should fail: no ellipsis + ... >>> print range(10) # doctest: -ELLIPSIS + ... [0, 1, ..., 9] + ... ''' + >>> test = doctest.DocTestFinder().find(f)[0] + >>> doctest.DocTestRunner(verbose=False, + ... optionflags=doctest.ELLIPSIS).run(test) + ... # doctest: +ELLIPSIS + ********************************************************************** + File ..., line 6, in f + Failed example: + print range(10) # doctest: -ELLIPSIS + Expected: + [0, 1, ..., 9] + Got: + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + (1, 2) + +Option directives affect only the example that they appear with; they +do not change the options for surrounding examples: + + >>> def f(x): r''' + ... >>> print range(10) # Should fail: no ellipsis + ... [0, 1, ..., 9] + ... + ... >>> print range(10) # doctest: +ELLIPSIS + ... [0, 1, ..., 9] + ... + ... >>> print range(10) # Should fail: no ellipsis + ... [0, 1, ..., 9] + ... ''' + >>> test = doctest.DocTestFinder().find(f)[0] + >>> doctest.DocTestRunner(verbose=False).run(test) + ... # doctest: +ELLIPSIS + ********************************************************************** + File ..., line 2, in f + Failed example: + print range(10) # Should fail: no ellipsis + Expected: + [0, 1, ..., 9] + Got: + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + ********************************************************************** + File ..., line 8, in f + Failed example: + print range(10) # Should fail: no ellipsis + Expected: + [0, 1, ..., 9] + Got: + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + (2, 3) + +Multiple options may be modified by a single option directive. They +may be separated by whitespace, commas, or both: + + >>> def f(x): r''' + ... >>> print range(10) # Should fail + ... [0, 1, ..., 9] + ... >>> print range(10) # Should succeed + ... ... # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE + ... [0, 1, ..., 9] + ... ''' + >>> test = doctest.DocTestFinder().find(f)[0] + >>> doctest.DocTestRunner(verbose=False).run(test) + ... # doctest: +ELLIPSIS + ********************************************************************** + File ..., line 2, in f + Failed example: + print range(10) # Should fail + Expected: + [0, 1, ..., 9] + Got: + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + (1, 2) + + >>> def f(x): r''' + ... >>> print range(10) # Should fail + ... [0, 1, ..., 9] + ... >>> print range(10) # Should succeed + ... ... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE + ... [0, 1, ..., 9] + ... ''' + >>> test = doctest.DocTestFinder().find(f)[0] + >>> doctest.DocTestRunner(verbose=False).run(test) + ... # doctest: +ELLIPSIS + ********************************************************************** + File ..., line 2, in f + Failed example: + print range(10) # Should fail + Expected: + [0, 1, ..., 9] + Got: + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + (1, 2) + + >>> def f(x): r''' + ... >>> print range(10) # Should fail + ... [0, 1, ..., 9] + ... >>> print range(10) # Should succeed + ... ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE + ... [0, 1, ..., 9] + ... ''' + >>> test = doctest.DocTestFinder().find(f)[0] + >>> doctest.DocTestRunner(verbose=False).run(test) + ... # doctest: +ELLIPSIS + ********************************************************************** + File ..., line 2, in f + Failed example: + print range(10) # Should fail + Expected: + [0, 1, ..., 9] + Got: + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + (1, 2) + +The option directive may be put on the line following the source, as +long as a continuation prompt is used: + + >>> def f(x): r''' + ... >>> print range(10) + ... ... # doctest: +ELLIPSIS + ... [0, 1, ..., 9] + ... ''' + >>> test = doctest.DocTestFinder().find(f)[0] + >>> doctest.DocTestRunner(verbose=False).run(test) + (0, 1) + +For examples with multi-line source, the option directive may appear +at the end of any line: + + >>> def f(x): r''' + ... >>> for x in range(10): # doctest: +ELLIPSIS + ... ... print x, + ... 0 1 2 ... 9 + ... + ... >>> for x in range(10): + ... ... print x, # doctest: +ELLIPSIS + ... 0 1 2 ... 9 + ... ''' + >>> test = doctest.DocTestFinder().find(f)[0] + >>> doctest.DocTestRunner(verbose=False).run(test) + (0, 2) + +If more than one line of an example with multi-line source has an +option directive, then they are combined: + + >>> def f(x): r''' + ... Should fail (option directive not on the last line): + ... >>> for x in range(10): # doctest: +ELLIPSIS + ... ... print x, # doctest: +NORMALIZE_WHITESPACE + ... 0 1 2...9 + ... ''' + >>> test = doctest.DocTestFinder().find(f)[0] + >>> doctest.DocTestRunner(verbose=False).run(test) + (0, 1) + +It is an error to have a comment of the form ``# doctest:`` that is +*not* followed by words of the form ``+OPTION`` or ``-OPTION``, where +``OPTION`` is an option that has been registered with +`register_option`: + + >>> # Error: Option not registered + >>> s = '>>> print 12 #doctest: +BADOPTION' + >>> test = doctest.DocTestParser().get_doctest(s, {}, 's', 's.py', 0) + Traceback (most recent call last): + ValueError: line 1 of the doctest for s has an invalid option: '+BADOPTION' + + >>> # Error: No + or - prefix + >>> s = '>>> print 12 #doctest: ELLIPSIS' + >>> test = doctest.DocTestParser().get_doctest(s, {}, 's', 's.py', 0) + Traceback (most recent call last): + ValueError: line 1 of the doctest for s has an invalid option: 'ELLIPSIS' + +It is an error to use an option directive on a line that contains no +source: + + >>> s = '>>> # doctest: +ELLIPSIS' + >>> test = doctest.DocTestParser().get_doctest(s, {}, 's', 's.py', 0) + Traceback (most recent call last): + ValueError: line 0 of the doctest for s has an option directive on a line with no example: '# doctest: +ELLIPSIS' +""" + +def test_testsource(): r""" +Unit tests for `testsource()`. + +The testsource() function takes a module and a name, finds the (first) +test with that name in that module, and converts it to a script. The +example code is converted to regular Python code. The surrounding +words and expected output are converted to comments: + + >>> import test.test_doctest + >>> name = 'test.test_doctest.sample_func' + >>> print doctest.testsource(test.test_doctest, name) + # Blah blah + # + print sample_func(22) + # Expected: + ## 44 + # + # Yee ha! + + >>> name = 'test.test_doctest.SampleNewStyleClass' + >>> print doctest.testsource(test.test_doctest, name) + print '1\n2\n3' + # Expected: + ## 1 + ## 2 + ## 3 + + >>> name = 'test.test_doctest.SampleClass.a_classmethod' + >>> print doctest.testsource(test.test_doctest, name) + print SampleClass.a_classmethod(10) + # Expected: + ## 12 + print SampleClass(0).a_classmethod(10) + # Expected: + ## 12 +""" + +def test_debug(): r""" + +Create a docstring that we want to debug: + + >>> s = ''' + ... >>> x = 12 + ... >>> print x + ... 12 + ... ''' + +Create some fake stdin input, to feed to the debugger: + + >>> import tempfile + >>> real_stdin = sys.stdin + >>> sys.stdin = _FakeInput(['next', 'print x', 'continue']) + +Run the debugger on the docstring, and then restore sys.stdin. + + >>> try: doctest.debug_src(s) + ... finally: sys.stdin = real_stdin + > (1)?() + (Pdb) next + 12 + --Return-- + > (1)?()->None + (Pdb) print x + 12 + (Pdb) continue + +""" + +def test_pdb_set_trace(): + """Using pdb.set_trace from a doctest. + + You can use pdb.set_trace from a doctest. To do so, you must + retrieve the set_trace function from the pdb module at the time + you use it. The doctest module changes sys.stdout so that it can + capture program output. It also temporarily replaces pdb.set_trace + with a version that restores stdout. This is necessary for you to + see debugger output. + + >>> doc = ''' + ... >>> x = 42 + ... >>> import pdb; pdb.set_trace() + ... ''' + >>> parser = doctest.DocTestParser() + >>> test = parser.get_doctest(doc, {}, "foo", "foo.py", 0) + >>> runner = doctest.DocTestRunner(verbose=False) + + To demonstrate this, we'll create a fake standard input that + captures our debugger input: + + >>> import tempfile + >>> real_stdin = sys.stdin + >>> sys.stdin = _FakeInput([ + ... 'print x', # print data defined by the example + ... 'continue', # stop debugging + ... '']) + + >>> try: runner.run(test) + ... finally: sys.stdin = real_stdin + --Return-- + > (1)?()->None + -> import pdb; pdb.set_trace() + (Pdb) print x + 42 + (Pdb) continue + (0, 2) + + You can also put pdb.set_trace in a function called from a test: + + >>> def calls_set_trace(): + ... y=2 + ... import pdb; pdb.set_trace() + + >>> doc = ''' + ... >>> x=1 + ... >>> calls_set_trace() + ... ''' + >>> test = parser.get_doctest(doc, globals(), "foo", "foo.py", 0) + >>> real_stdin = sys.stdin + >>> sys.stdin = _FakeInput([ + ... 'print y', # print data defined in the function + ... 'up', # out of function + ... 'print x', # print data defined by the example + ... 'continue', # stop debugging + ... '']) + + >>> try: + ... runner.run(test) + ... finally: + ... sys.stdin = real_stdin + --Return-- + > (3)calls_set_trace()->None + -> import pdb; pdb.set_trace() + (Pdb) print y + 2 + (Pdb) up + > (1)?() + -> calls_set_trace() + (Pdb) print x + 1 + (Pdb) continue + (0, 2) + + During interactive debugging, source code is shown, even for + doctest examples: + + >>> doc = ''' + ... >>> def f(x): + ... ... g(x*2) + ... >>> def g(x): + ... ... print x+3 + ... ... import pdb; pdb.set_trace() + ... >>> f(3) + ... ''' + >>> test = parser.get_doctest(doc, globals(), "foo", "foo.py", 0) + >>> real_stdin = sys.stdin + >>> sys.stdin = _FakeInput([ + ... 'list', # list source from example 2 + ... 'next', # return from g() + ... 'list', # list source from example 1 + ... 'next', # return from f() + ... 'list', # list source from example 3 + ... 'continue', # stop debugging + ... '']) + >>> try: runner.run(test) + ... finally: sys.stdin = real_stdin + ... # doctest: +NORMALIZE_WHITESPACE + --Return-- + > (3)g()->None + -> import pdb; pdb.set_trace() + (Pdb) list + 1 def g(x): + 2 print x+3 + 3 -> import pdb; pdb.set_trace() + [EOF] + (Pdb) next + --Return-- + > (2)f()->None + -> g(x*2) + (Pdb) list + 1 def f(x): + 2 -> g(x*2) + [EOF] + (Pdb) next + --Return-- + > (1)?()->None + -> f(3) + (Pdb) list + 1 -> f(3) + [EOF] + (Pdb) continue + ********************************************************************** + File "foo.py", line 7, in foo + Failed example: + f(3) + Expected nothing + Got: + 9 + (1, 3) + """ + +def test_pdb_set_trace_nested(): + """This illustrates more-demanding use of set_trace with nested functions. + + >>> class C(object): + ... def calls_set_trace(self): + ... y = 1 + ... import pdb; pdb.set_trace() + ... self.f1() + ... y = 2 + ... def f1(self): + ... x = 1 + ... self.f2() + ... x = 2 + ... def f2(self): + ... z = 1 + ... z = 2 + + >>> calls_set_trace = C().calls_set_trace + + >>> doc = ''' + ... >>> a = 1 + ... >>> calls_set_trace() + ... ''' + >>> parser = doctest.DocTestParser() + >>> runner = doctest.DocTestRunner(verbose=False) + >>> test = parser.get_doctest(doc, globals(), "foo", "foo.py", 0) + >>> real_stdin = sys.stdin + >>> sys.stdin = _FakeInput([ + ... 'print y', # print data defined in the function + ... 'step', 'step', 'step', 'step', 'step', 'step', 'print z', + ... 'up', 'print x', + ... 'up', 'print y', + ... 'up', 'print foo', + ... 'continue', # stop debugging + ... '']) + + >>> try: + ... runner.run(test) + ... finally: + ... sys.stdin = real_stdin + > (5)calls_set_trace() + -> self.f1() + (Pdb) print y + 1 + (Pdb) step + --Call-- + > (7)f1() + -> def f1(self): + (Pdb) step + > (8)f1() + -> x = 1 + (Pdb) step + > (9)f1() + -> self.f2() + (Pdb) step + --Call-- + > (11)f2() + -> def f2(self): + (Pdb) step + > (12)f2() + -> z = 1 + (Pdb) step + > (13)f2() + -> z = 2 + (Pdb) print z + 1 + (Pdb) up + > (9)f1() + -> self.f2() + (Pdb) print x + 1 + (Pdb) up + > (5)calls_set_trace() + -> self.f1() + (Pdb) print y + 1 + (Pdb) up + > (1)?() + -> calls_set_trace() + (Pdb) print foo + *** NameError: name 'foo' is not defined + (Pdb) continue + (0, 2) +""" + +def test_DocTestSuite(): + """DocTestSuite creates a unittest test suite from a doctest. + + We create a Suite by providing a module. A module can be provided + by passing a module object: + + >>> import unittest + >>> import test.sample_doctest + >>> suite = doctest.DocTestSuite(test.sample_doctest) + >>> suite.run(unittest.TestResult()) + + + We can also supply the module by name: + + >>> suite = doctest.DocTestSuite('test.sample_doctest') + >>> suite.run(unittest.TestResult()) + + + We can use the current module: + + >>> suite = test.sample_doctest.test_suite() + >>> suite.run(unittest.TestResult()) + + + We can supply global variables. If we pass globs, they will be + used instead of the module globals. Here we'll pass an empty + globals, triggering an extra error: + + >>> suite = doctest.DocTestSuite('test.sample_doctest', globs={}) + >>> suite.run(unittest.TestResult()) + + + Alternatively, we can provide extra globals. Here we'll make an + error go away by providing an extra global variable: + + >>> suite = doctest.DocTestSuite('test.sample_doctest', + ... extraglobs={'y': 1}) + >>> suite.run(unittest.TestResult()) + + + You can pass option flags. Here we'll cause an extra error + by disabling the blank-line feature: + + >>> suite = doctest.DocTestSuite('test.sample_doctest', + ... optionflags=doctest.DONT_ACCEPT_BLANKLINE) + >>> suite.run(unittest.TestResult()) + + + You can supply setUp and tearDown functions: + + >>> def setUp(t): + ... import test.test_doctest + ... test.test_doctest.sillySetup = True + + >>> def tearDown(t): + ... import test.test_doctest + ... del test.test_doctest.sillySetup + + Here, we installed a silly variable that the test expects: + + >>> suite = doctest.DocTestSuite('test.sample_doctest', + ... setUp=setUp, tearDown=tearDown) + >>> suite.run(unittest.TestResult()) + + + But the tearDown restores sanity: + + >>> import test.test_doctest + >>> test.test_doctest.sillySetup + Traceback (most recent call last): + ... + AttributeError: 'module' object has no attribute 'sillySetup' + + The setUp and tearDown funtions are passed test objects. Here + we'll use the setUp function to supply the missing variable y: + + >>> def setUp(test): + ... test.globs['y'] = 1 + + >>> suite = doctest.DocTestSuite('test.sample_doctest', setUp=setUp) + >>> suite.run(unittest.TestResult()) + + + Here, we didn't need to use a tearDown function because we + modified the test globals, which are a copy of the + sample_doctest module dictionary. The test globals are + automatically cleared for us after a test. + + Finally, you can provide an alternate test finder. Here we'll + use a custom test_finder to to run just the test named bar. + However, the test in the module docstring, and the two tests + in the module __test__ dict, aren't filtered, so we actually + run three tests besides bar's. The filtering mechanisms are + poorly conceived, and will go away someday. + + >>> finder = doctest.DocTestFinder( + ... _namefilter=lambda prefix, base: base!='bar') + >>> suite = doctest.DocTestSuite('test.sample_doctest', + ... test_finder=finder) + >>> suite.run(unittest.TestResult()) + + """ + +def test_DocFileSuite(): + """We can test tests found in text files using a DocFileSuite. + + We create a suite by providing the names of one or more text + files that include examples: + + >>> import unittest + >>> suite = doctest.DocFileSuite('test_doctest.txt', + ... 'test_doctest2.txt') + >>> suite.run(unittest.TestResult()) + + + The test files are looked for in the directory containing the + calling module. A package keyword argument can be provided to + specify a different relative location. + + >>> import unittest + >>> suite = doctest.DocFileSuite('test_doctest.txt', + ... 'test_doctest2.txt', + ... package='test') + >>> suite.run(unittest.TestResult()) + + + '/' should be used as a path separator. It will be converted + to a native separator at run time: + + >>> suite = doctest.DocFileSuite('../test/test_doctest.txt') + >>> suite.run(unittest.TestResult()) + + + If DocFileSuite is used from an interactive session, then files + are resolved relative to the directory of sys.argv[0]: + + >>> import new, os.path, test.test_doctest + >>> save_argv = sys.argv + >>> sys.argv = [test.test_doctest.__file__] + >>> suite = doctest.DocFileSuite('test_doctest.txt', + ... package=new.module('__main__')) + >>> sys.argv = save_argv + + By setting `module_relative=False`, os-specific paths may be + used (including absolute paths and paths relative to the + working directory): + + >>> # Get the absolute path of the test package. + >>> test_doctest_path = os.path.abspath(test.test_doctest.__file__) + >>> test_pkg_path = os.path.split(test_doctest_path)[0] + + >>> # Use it to find the absolute path of test_doctest.txt. + >>> test_file = os.path.join(test_pkg_path, 'test_doctest.txt') + + >>> suite = doctest.DocFileSuite(test_file, module_relative=False) + >>> suite.run(unittest.TestResult()) + + + It is an error to specify `package` when `module_relative=False`: + + >>> suite = doctest.DocFileSuite(test_file, module_relative=False, + ... package='test') + Traceback (most recent call last): + ValueError: Package may only be specified for module-relative paths. + + You can specify initial global variables: + + >>> suite = doctest.DocFileSuite('test_doctest.txt', + ... 'test_doctest2.txt', + ... globs={'favorite_color': 'blue'}) + >>> suite.run(unittest.TestResult()) + + + In this case, we supplied a missing favorite color. You can + provide doctest options: + + >>> suite = doctest.DocFileSuite('test_doctest.txt', + ... 'test_doctest2.txt', + ... optionflags=doctest.DONT_ACCEPT_BLANKLINE, + ... globs={'favorite_color': 'blue'}) + >>> suite.run(unittest.TestResult()) + + + And, you can provide setUp and tearDown functions: + + You can supply setUp and teatDoen functions: + + >>> def setUp(t): + ... import test.test_doctest + ... test.test_doctest.sillySetup = True + + >>> def tearDown(t): + ... import test.test_doctest + ... del test.test_doctest.sillySetup + + Here, we installed a silly variable that the test expects: + + >>> suite = doctest.DocFileSuite('test_doctest.txt', + ... 'test_doctest2.txt', + ... setUp=setUp, tearDown=tearDown) + >>> suite.run(unittest.TestResult()) + + + But the tearDown restores sanity: + + >>> import test.test_doctest + >>> test.test_doctest.sillySetup + Traceback (most recent call last): + ... + AttributeError: 'module' object has no attribute 'sillySetup' + + The setUp and tearDown funtions are passed test objects. + Here, we'll use a setUp function to set the favorite color in + test_doctest.txt: + + >>> def setUp(test): + ... test.globs['favorite_color'] = 'blue' + + >>> suite = doctest.DocFileSuite('test_doctest.txt', setUp=setUp) + >>> suite.run(unittest.TestResult()) + + + Here, we didn't need to use a tearDown function because we + modified the test globals. The test globals are + automatically cleared for us after a test. + + """ + +def test_trailing_space_in_test(): + """ + Trailing spaces in expected output are significant: + + >>> x, y = 'foo', '' + >>> print x, y + foo \n + """ + + +def test_unittest_reportflags(): + """Default unittest reporting flags can be set to control reporting + + Here, we'll set the REPORT_ONLY_FIRST_FAILURE option so we see + only the first failure of each test. First, we'll look at the + output without the flag. The file test_doctest.txt file has two + tests. They both fail if blank lines are disabled: + + >>> suite = doctest.DocFileSuite('test_doctest.txt', + ... optionflags=doctest.DONT_ACCEPT_BLANKLINE) + >>> import unittest + >>> result = suite.run(unittest.TestResult()) + >>> print result.failures[0][1] # doctest: +ELLIPSIS + Traceback ... + Failed example: + favorite_color + ... + Failed example: + if 1: + ... + + Note that we see both failures displayed. + + >>> old = doctest.set_unittest_reportflags( + ... doctest.REPORT_ONLY_FIRST_FAILURE) + + Now, when we run the test: + + >>> result = suite.run(unittest.TestResult()) + >>> print result.failures[0][1] # doctest: +ELLIPSIS + Traceback ... + Failed example: + favorite_color + Exception raised: + ... + NameError: name 'favorite_color' is not defined + + + + We get only the first failure. + + If we give any reporting options when we set up the tests, + however: + + >>> suite = doctest.DocFileSuite('test_doctest.txt', + ... optionflags=doctest.DONT_ACCEPT_BLANKLINE | doctest.REPORT_NDIFF) + + Then the default eporting options are ignored: + + >>> result = suite.run(unittest.TestResult()) + >>> print result.failures[0][1] # doctest: +ELLIPSIS + Traceback ... + Failed example: + favorite_color + ... + Failed example: + if 1: + print 'a' + print + print 'b' + Differences (ndiff with -expected +actual): + a + - + + + b + + + + + Test runners can restore the formatting flags after they run: + + >>> ignored = doctest.set_unittest_reportflags(old) + + """ + +def test_testfile(): r""" +Tests for the `testfile()` function. This function runs all the +doctest examples in a given file. In its simple invokation, it is +called with the name of a file, which is taken to be relative to the +calling module. The return value is (#failures, #tests). + + >>> doctest.testfile('test_doctest.txt') # doctest: +ELLIPSIS + ********************************************************************** + File "...", line 6, in test_doctest.txt + Failed example: + favorite_color + Exception raised: + ... + NameError: name 'favorite_color' is not defined + ********************************************************************** + 1 items had failures: + 1 of 2 in test_doctest.txt + ***Test Failed*** 1 failures. + (1, 2) + >>> doctest.master = None # Reset master. + +(Note: we'll be clearing doctest.master after each call to +`doctest.testfile`, to supress warnings about multiple tests with the +same name.) + +Globals may be specified with the `globs` and `extraglobs` parameters: + + >>> globs = {'favorite_color': 'blue'} + >>> doctest.testfile('test_doctest.txt', globs=globs) + (0, 2) + >>> doctest.master = None # Reset master. + + >>> extraglobs = {'favorite_color': 'red'} + >>> doctest.testfile('test_doctest.txt', globs=globs, + ... extraglobs=extraglobs) # doctest: +ELLIPSIS + ********************************************************************** + File "...", line 6, in test_doctest.txt + Failed example: + favorite_color + Expected: + 'blue' + Got: + 'red' + ********************************************************************** + 1 items had failures: + 1 of 2 in test_doctest.txt + ***Test Failed*** 1 failures. + (1, 2) + >>> doctest.master = None # Reset master. + +The file may be made relative to a given module or package, using the +optional `module_relative` parameter: + + >>> doctest.testfile('test_doctest.txt', globs=globs, + ... module_relative='test') + (0, 2) + >>> doctest.master = None # Reset master. + +Verbosity can be increased with the optional `verbose` paremter: + + >>> doctest.testfile('test_doctest.txt', globs=globs, verbose=True) + Trying: + favorite_color + Expecting: + 'blue' + ok + Trying: + if 1: + print 'a' + print + print 'b' + Expecting: + a + + b + ok + 1 items passed all tests: + 2 tests in test_doctest.txt + 2 tests in 1 items. + 2 passed and 0 failed. + Test passed. + (0, 2) + >>> doctest.master = None # Reset master. + +The name of the test may be specified with the optional `name` +parameter: + + >>> doctest.testfile('test_doctest.txt', name='newname') + ... # doctest: +ELLIPSIS + ********************************************************************** + File "...", line 6, in newname + ... + (1, 2) + >>> doctest.master = None # Reset master. + +The summary report may be supressed with the optional `report` +parameter: + + >>> doctest.testfile('test_doctest.txt', report=False) + ... # doctest: +ELLIPSIS + ********************************************************************** + File "...", line 6, in test_doctest.txt + Failed example: + favorite_color + Exception raised: + ... + NameError: name 'favorite_color' is not defined + (1, 2) + >>> doctest.master = None # Reset master. + +The optional keyword argument `raise_on_error` can be used to raise an +exception on the first error (which may be useful for postmortem +debugging): + + >>> doctest.testfile('test_doctest.txt', raise_on_error=True) + ... # doctest: +ELLIPSIS + Traceback (most recent call last): + UnexpectedException: ... + >>> doctest.master = None # Reset master. +""" + +# old_test1, ... used to live in doctest.py, but cluttered it. Note +# that these use the deprecated doctest.Tester, so should go away (or +# be rewritten) someday. + +# Ignore all warnings about the use of class Tester in this module. +# Note that the name of this module may differ depending on how it's +# imported, so the use of __name__ is important. +warnings.filterwarnings("ignore", "class Tester", DeprecationWarning, + __name__, 0) + +def old_test1(): r""" +>>> from doctest import Tester +>>> t = Tester(globs={'x': 42}, verbose=0) +>>> t.runstring(r''' +... >>> x = x * 2 +... >>> print x +... 42 +... ''', 'XYZ') +********************************************************************** +Line 3, in XYZ +Failed example: + print x +Expected: + 42 +Got: + 84 +(1, 2) +>>> t.runstring(">>> x = x * 2\n>>> print x\n84\n", 'example2') +(0, 2) +>>> t.summarize() +********************************************************************** +1 items had failures: + 1 of 2 in XYZ +***Test Failed*** 1 failures. +(1, 4) +>>> t.summarize(verbose=1) +1 items passed all tests: + 2 tests in example2 +********************************************************************** +1 items had failures: + 1 of 2 in XYZ +4 tests in 2 items. +3 passed and 1 failed. +***Test Failed*** 1 failures. +(1, 4) +""" + +def old_test2(): r""" + >>> from doctest import Tester + >>> t = Tester(globs={}, verbose=1) + >>> test = r''' + ... # just an example + ... >>> x = 1 + 2 + ... >>> x + ... 3 + ... ''' + >>> t.runstring(test, "Example") + Running string Example + Trying: + x = 1 + 2 + Expecting nothing + ok + Trying: + x + Expecting: + 3 + ok + 0 of 2 examples failed in string Example + (0, 2) +""" + +def old_test3(): r""" + >>> from doctest import Tester + >>> t = Tester(globs={}, verbose=0) + >>> def _f(): + ... '''Trivial docstring example. + ... >>> assert 2 == 2 + ... ''' + ... return 32 + ... + >>> t.rundoc(_f) # expect 0 failures in 1 example + (0, 1) +""" + +def old_test4(): """ + >>> import new + >>> m1 = new.module('_m1') + >>> m2 = new.module('_m2') + >>> test_data = \""" + ... def _f(): + ... '''>>> assert 1 == 1 + ... ''' + ... def g(): + ... '''>>> assert 2 != 1 + ... ''' + ... class H: + ... '''>>> assert 2 > 1 + ... ''' + ... def bar(self): + ... '''>>> assert 1 < 2 + ... ''' + ... \""" + >>> exec test_data in m1.__dict__ + >>> exec test_data in m2.__dict__ + >>> m1.__dict__.update({"f2": m2._f, "g2": m2.g, "h2": m2.H}) + + Tests that objects outside m1 are excluded: + + >>> from doctest import Tester + >>> t = Tester(globs={}, verbose=0) + >>> t.rundict(m1.__dict__, "rundict_test", m1) # f2 and g2 and h2 skipped + (0, 4) + + Once more, not excluding stuff outside m1: + + >>> t = Tester(globs={}, verbose=0) + >>> t.rundict(m1.__dict__, "rundict_test_pvt") # None are skipped. + (0, 8) + + The exclusion of objects from outside the designated module is + meant to be invoked automagically by testmod. + + >>> doctest.testmod(m1, verbose=False) + (0, 4) +""" + +###################################################################### +## Main +###################################################################### + +def test_main(): + # Check the doctest cases in doctest itself: + test_support.run_doctest(doctest, verbosity=True) + # Check the doctest cases defined here: + from test import test_doctest + test_support.run_doctest(test_doctest, verbosity=True) + +import trace, sys, re, StringIO +def test_coverage(coverdir): + tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix,], + trace=0, count=1) + tracer.run('reload(doctest); test_main()') + r = tracer.results() + print 'Writing coverage results...' + r.write_results(show_missing=True, summary=True, + coverdir=coverdir) + +if __name__ == '__main__': + if '-c' in sys.argv: + test_coverage('/tmp/doctest.cover') + else: + test_main() Added: py/branch/dist-doctest/py/compat/testing/test_doctest.txt ============================================================================== --- (empty file) +++ py/branch/dist-doctest/py/compat/testing/test_doctest.txt Tue Jun 14 11:14:48 2005 @@ -0,0 +1,17 @@ +This is a sample doctest in a text file. + +In this example, we'll rely on a global variable being set for us +already: + + >>> favorite_color + 'blue' + +We can make this fail by disabling the blank-line feature. + + >>> if 1: + ... print 'a' + ... print + ... print 'b' + a + + b Added: py/branch/dist-doctest/py/compat/testing/test_doctest2.py ============================================================================== --- (empty file) +++ py/branch/dist-doctest/py/compat/testing/test_doctest2.py Tue Jun 14 11:14:48 2005 @@ -0,0 +1,122 @@ +# -*- coding: utf-8 -*- +u"""A module to test whether doctest recognizes some 2.2 features, +like static and class methods. + +>>> print 'yup' # 1 +yup + +We include some (random) encoded (utf-8) text in the text surrounding +the example. It should be ignored: + +????? + +""" + +from test import test_support + +class C(object): + u"""Class C. + + >>> print C() # 2 + 42 + + + We include some (random) encoded (utf-8) text in the text surrounding + the example. It should be ignored: + + ????? + + """ + + def __init__(self): + """C.__init__. + + >>> print C() # 3 + 42 + """ + + def __str__(self): + """ + >>> print C() # 4 + 42 + """ + return "42" + + class D(object): + """A nested D class. + + >>> print "In D!" # 5 + In D! + """ + + def nested(self): + """ + >>> print 3 # 6 + 3 + """ + + def getx(self): + """ + >>> c = C() # 7 + >>> c.x = 12 # 8 + >>> print c.x # 9 + -12 + """ + return -self._x + + def setx(self, value): + """ + >>> c = C() # 10 + >>> c.x = 12 # 11 + >>> print c.x # 12 + -12 + """ + self._x = value + + x = property(getx, setx, doc="""\ + >>> c = C() # 13 + >>> c.x = 12 # 14 + >>> print c.x # 15 + -12 + """) + + def statm(): + """ + A static method. + + >>> print C.statm() # 16 + 666 + >>> print C().statm() # 17 + 666 + """ + return 666 + + statm = staticmethod(statm) + + def clsm(cls, val): + """ + A class method. + + >>> print C.clsm(22) # 18 + 22 + >>> print C().clsm(23) # 19 + 23 + """ + return val + + clsm = classmethod(clsm) + +def test_main(): + from test import test_doctest2 + EXPECTED = 19 + f, t = test_support.run_doctest(test_doctest2) + if t != EXPECTED: + raise test_support.TestFailed("expected %d tests to run, not %d" % + (EXPECTED, t)) + +# Pollute the namespace with a bunch of imported functions and classes, +# to make sure they don't get tested. +from doctest import * + +if __name__ == '__main__': + test_main() Added: py/branch/dist-doctest/py/compat/testing/test_doctest2.txt ============================================================================== --- (empty file) +++ py/branch/dist-doctest/py/compat/testing/test_doctest2.txt Tue Jun 14 11:14:48 2005 @@ -0,0 +1,14 @@ +This is a sample doctest in a text file. + +In this example, we'll rely on some silly setup: + + >>> import test.test_doctest + >>> test.test_doctest.sillySetup + True + +This test also has some (random) encoded (utf-8) unicode text: + + ????????????? + +This doesn't cause a problem in the tect surrounding the examples, but +we include it here (in this test text file) to make sure. :) Added: py/branch/dist-doctest/py/compat/textwrap.py ============================================================================== --- (empty file) +++ py/branch/dist-doctest/py/compat/textwrap.py Tue Jun 14 11:14:48 2005 @@ -0,0 +1,354 @@ +"""Text wrapping and filling. +""" + +# Copyright (C) 1999-2001 Gregory P. Ward. +# Copyright (C) 2002, 2003 Python Software Foundation. +# Written by Greg Ward + +__revision__ = "$Id$" + +import string, re + +# Do the right thing with boolean values for all known Python versions +# (so this module can be copied to projects that don't depend on Python +# 2.3, e.g. Optik and Docutils). +try: + True, False +except NameError: + (True, False) = (1, 0) + +__all__ = ['TextWrapper', 'wrap', 'fill'] + +# Hardcode the recognized whitespace characters to the US-ASCII +# whitespace characters. The main reason for doing this is that in +# ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales +# that character winds up in string.whitespace. Respecting +# string.whitespace in those cases would 1) make textwrap treat 0xa0 the +# same as any other whitespace char, which is clearly wrong (it's a +# *non-breaking* space), 2) possibly cause problems with Unicode, +# since 0xa0 is not in range(128). +_whitespace = '\t\n\x0b\x0c\r ' + +class TextWrapper: + """ + Object for wrapping/filling text. The public interface consists of + the wrap() and fill() methods; the other methods are just there for + subclasses to override in order to tweak the default behaviour. + If you want to completely replace the main wrapping algorithm, + you'll probably have to override _wrap_chunks(). + + Several instance attributes control various aspects of wrapping: + width (default: 70) + the maximum width of wrapped lines (unless break_long_words + is false) + initial_indent (default: "") + string that will be prepended to the first line of wrapped + output. Counts towards the line's width. + subsequent_indent (default: "") + string that will be prepended to all lines save the first + of wrapped output; also counts towards each line's width. + expand_tabs (default: true) + Expand tabs in input text to spaces before further processing. + Each tab will become 1 .. 8 spaces, depending on its position in + its line. If false, each tab is treated as a single character. + replace_whitespace (default: true) + Replace all whitespace characters in the input text by spaces + after tab expansion. Note that if expand_tabs is false and + replace_whitespace is true, every tab will be converted to a + single space! + fix_sentence_endings (default: false) + Ensure that sentence-ending punctuation is always followed + by two spaces. Off by default because the algorithm is + (unavoidably) imperfect. + break_long_words (default: true) + Break words longer than 'width'. If false, those words will not + be broken, and some lines might be longer than 'width'. + """ + + whitespace_trans = string.maketrans(_whitespace, ' ' * len(_whitespace)) + + unicode_whitespace_trans = {} + uspace = ord(u' ') + for x in map(ord, _whitespace): + unicode_whitespace_trans[x] = uspace + + # This funky little regex is just the trick for splitting + # text up into word-wrappable chunks. E.g. + # "Hello there -- you goof-ball, use the -b option!" + # splits into + # Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option! + # (after stripping out empty strings). + wordsep_re = re.compile( + r'(\s+|' # any whitespace + r'[^\s\w]*\w+[a-zA-Z]-(?=\w+[a-zA-Z])|' # hyphenated words + r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash + + # XXX this is not locale- or charset-aware -- string.lowercase + # is US-ASCII only (and therefore English-only) + sentence_end_re = re.compile(r'[%s]' # lowercase letter + r'[\.\!\?]' # sentence-ending punct. + r'[\"\']?' # optional end-of-quote + % string.lowercase) + + + def __init__(self, + width=70, + initial_indent="", + subsequent_indent="", + expand_tabs=True, + replace_whitespace=True, + fix_sentence_endings=False, + break_long_words=True): + self.width = width + self.initial_indent = initial_indent + self.subsequent_indent = subsequent_indent + self.expand_tabs = expand_tabs + self.replace_whitespace = replace_whitespace + self.fix_sentence_endings = fix_sentence_endings + self.break_long_words = break_long_words + + + # -- Private methods ----------------------------------------------- + # (possibly useful for subclasses to override) + + def _munge_whitespace(self, text): + """_munge_whitespace(text : string) -> string + + Munge whitespace in text: expand tabs and convert all other + whitespace characters to spaces. Eg. " foo\tbar\n\nbaz" + becomes " foo bar baz". + """ + if self.expand_tabs: + text = text.expandtabs() + if self.replace_whitespace: + if isinstance(text, str): + text = text.translate(self.whitespace_trans) + elif isinstance(text, unicode): + text = text.translate(self.unicode_whitespace_trans) + return text + + + def _split(self, text): + """_split(text : string) -> [string] + + Split the text to wrap into indivisible chunks. Chunks are + not quite the same as words; see wrap_chunks() for full + details. As an example, the text + Look, goof-ball -- use the -b option! + breaks into the following chunks: + 'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ', + 'use', ' ', 'the', ' ', '-b', ' ', 'option!' + """ + chunks = self.wordsep_re.split(text) + chunks = filter(None, chunks) + return chunks + + def _fix_sentence_endings(self, chunks): + """_fix_sentence_endings(chunks : [string]) + + Correct for sentence endings buried in 'chunks'. Eg. when the + original text contains "... foo.\nBar ...", munge_whitespace() + and split() will convert that to [..., "foo.", " ", "Bar", ...] + which has one too few spaces; this method simply changes the one + space to two. + """ + i = 0 + pat = self.sentence_end_re + while i < len(chunks)-1: + if chunks[i+1] == " " and pat.search(chunks[i]): + chunks[i+1] = " " + i += 2 + else: + i += 1 + + def _handle_long_word(self, chunks, cur_line, cur_len, width): + """_handle_long_word(chunks : [string], + cur_line : [string], + cur_len : int, width : int) + + Handle a chunk of text (most likely a word, not whitespace) that + is too long to fit in any line. + """ + space_left = max(width - cur_len, 1) + + # If we're allowed to break long words, then do so: put as much + # of the next chunk onto the current line as will fit. + if self.break_long_words: + cur_line.append(chunks[0][0:space_left]) + chunks[0] = chunks[0][space_left:] + + # Otherwise, we have to preserve the long word intact. Only add + # it to the current line if there's nothing already there -- + # that minimizes how much we violate the width constraint. + elif not cur_line: + cur_line.append(chunks.pop(0)) + + # If we're not allowed to break long words, and there's already + # text on the current line, do nothing. Next time through the + # main loop of _wrap_chunks(), we'll wind up here again, but + # cur_len will be zero, so the next line will be entirely + # devoted to the long word that we can't handle right now. + + def _wrap_chunks(self, chunks): + """_wrap_chunks(chunks : [string]) -> [string] + + Wrap a sequence of text chunks and return a list of lines of + length 'self.width' or less. (If 'break_long_words' is false, + some lines may be longer than this.) Chunks correspond roughly + to words and the whitespace between them: each chunk is + indivisible (modulo 'break_long_words'), but a line break can + come between any two chunks. Chunks should not have internal + whitespace; ie. a chunk is either all whitespace or a "word". + Whitespace chunks will be removed from the beginning and end of + lines, but apart from that whitespace is preserved. + """ + lines = [] + if self.width <= 0: + raise ValueError("invalid width %r (must be > 0)" % self.width) + + while chunks: + + # Start the list of chunks that will make up the current line. + # cur_len is just the length of all the chunks in cur_line. + cur_line = [] + cur_len = 0 + + # Figure out which static string will prefix this line. + if lines: + indent = self.subsequent_indent + else: + indent = self.initial_indent + + # Maximum width for this line. + width = self.width - len(indent) + + # First chunk on line is whitespace -- drop it, unless this + # is the very beginning of the text (ie. no lines started yet). + if chunks[0].strip() == '' and lines: + del chunks[0] + + while chunks: + l = len(chunks[0]) + + # Can at least squeeze this chunk onto the current line. + if cur_len + l <= width: + cur_line.append(chunks.pop(0)) + cur_len += l + + # Nope, this line is full. + else: + break + + # The current line is full, and the next chunk is too big to + # fit on *any* line (not just this one). + if chunks and len(chunks[0]) > width: + self._handle_long_word(chunks, cur_line, cur_len, width) + + # If the last chunk on this line is all whitespace, drop it. + if cur_line and cur_line[-1].strip() == '': + del cur_line[-1] + + # Convert current line back to a string and store it in list + # of all lines (return value). + if cur_line: + lines.append(indent + ''.join(cur_line)) + + return lines + + + # -- Public interface ---------------------------------------------- + + def wrap(self, text): + """wrap(text : string) -> [string] + + Reformat the single paragraph in 'text' so it fits in lines of + no more than 'self.width' columns, and return a list of wrapped + lines. Tabs in 'text' are expanded with string.expandtabs(), + and all other whitespace characters (including newline) are + converted to space. + """ + text = self._munge_whitespace(text) + indent = self.initial_indent + chunks = self._split(text) + if self.fix_sentence_endings: + self._fix_sentence_endings(chunks) + return self._wrap_chunks(chunks) + + def fill(self, text): + """fill(text : string) -> string + + Reformat the single paragraph in 'text' to fit in lines of no + more than 'self.width' columns, and return a new string + containing the entire wrapped paragraph. + """ + return "\n".join(self.wrap(text)) + + +# -- Convenience interface --------------------------------------------- + +def wrap(text, width=70, **kwargs): + """Wrap a single paragraph of text, returning a list of wrapped lines. + + Reformat the single paragraph in 'text' so it fits in lines of no + more than 'width' columns, and return a list of wrapped lines. By + default, tabs in 'text' are expanded with string.expandtabs(), and + all other whitespace characters (including newline) are converted to + space. See TextWrapper class for available keyword args to customize + wrapping behaviour. + """ + w = TextWrapper(width=width, **kwargs) + return w.wrap(text) + +def fill(text, width=70, **kwargs): + """Fill a single paragraph of text, returning a new string. + + Reformat the single paragraph in 'text' to fit in lines of no more + than 'width' columns, and return a new string containing the entire + wrapped paragraph. As with wrap(), tabs are expanded and other + whitespace characters converted to space. See TextWrapper class for + available keyword args to customize wrapping behaviour. + """ + w = TextWrapper(width=width, **kwargs) + return w.fill(text) + + +# -- Loosely related functionality ------------------------------------- + +def dedent(text): + """dedent(text : string) -> string + + Remove any whitespace than can be uniformly removed from the left + of every line in `text`. + + This can be used e.g. to make triple-quoted strings line up with + the left edge of screen/whatever, while still presenting it in the + source code in indented form. + + For example: + + def test(): + # end first line with \ to avoid the empty line! + s = '''\ + hello + world + ''' + print repr(s) # prints ' hello\n world\n ' + print repr(dedent(s)) # prints 'hello\n world\n' + """ + lines = text.expandtabs().split('\n') + margin = None + for line in lines: + content = line.lstrip() + if not content: + continue + indent = len(line) - len(content) + if margin is None: + margin = indent + else: + margin = min(margin, indent) + + if margin is not None and margin > 0: + for i in range(len(lines)): + lines[i] = lines[i][margin:] + + return '\n'.join(lines) Modified: py/branch/dist-doctest/py/initpkg.py ============================================================================== --- py/branch/dist-doctest/py/initpkg.py (original) +++ py/branch/dist-doctest/py/initpkg.py Tue Jun 14 11:14:48 2005 @@ -66,6 +66,9 @@ assert fspath.startswith('./'), \ "%r is not an implementation path (XXX)" % (extpyish,) implmodule = self._loadimpl(fspath[:-3]) + if not modpath: # export the entire module + return implmodule + current = implmodule for x in modpath.split('.'): try: Modified: py/branch/dist-doctest/py/misc/testing/test_initpkg.py ============================================================================== --- py/branch/dist-doctest/py/misc/testing/test_initpkg.py (original) +++ py/branch/dist-doctest/py/misc/testing/test_initpkg.py Tue Jun 14 11:14:48 2005 @@ -48,6 +48,7 @@ base.join('magic', 'greenlet.py'), base.join('bin'), base.join('execnet', 'script'), + base.join('compat'), ) for p in base.visit('*.py', py.path.checker(dotfile=0)): relpath = p.new(ext='').relto(base) From dstanek at codespeak.net Tue Jun 14 11:27:04 2005 From: dstanek at codespeak.net (dstanek at codespeak.net) Date: Tue, 14 Jun 2005 11:27:04 +0200 (CEST) Subject: [py-svn] r13375 - py/branch/dist-doctest/py Message-ID: <20050614092704.3783627B83@code1.codespeak.net> Author: dstanek Date: Tue Jun 14 11:27:02 2005 New Revision: 13375 Modified: py/branch/dist-doctest/py/__init__.py Log: Add compat imports back into the API. Modified: py/branch/dist-doctest/py/__init__.py ============================================================================== --- py/branch/dist-doctest/py/__init__.py (original) +++ py/branch/dist-doctest/py/__init__.py Tue Jun 14 11:27:02 2005 @@ -112,7 +112,7 @@ 'log.StdoutLogger' :('./misc/log_support.py', 'StdoutLogger'), 'log.StderrLogger' :('./misc/log_support.py', 'StderrLogger'), - #'compat.doctest' :('./compat/doctest.py', None), - #'compat.optparse' :('./compat/optparse.py', None), - #'compat.textwrap' :('./compat/textwrap.py', None), + 'compat.doctest' :('./compat/doctest.py', None), + 'compat.optparse' :('./compat/optparse.py', None), + 'compat.textwrap' :('./compat/textwrap.py', None), }) From dstanek at codespeak.net Tue Jun 14 15:35:11 2005 From: dstanek at codespeak.net (dstanek at codespeak.net) Date: Tue, 14 Jun 2005 15:35:11 +0200 (CEST) Subject: [py-svn] r13384 - py/branch/dist-doctest/py/compat/testing Message-ID: <20050614133511.20ADE27B83@code1.codespeak.net> Author: dstanek Date: Tue Jun 14 15:35:07 2005 New Revision: 13384 Added: py/branch/dist-doctest/py/compat/testing/test_optparse.py - copied, changed from r13383, vendor/cpython/Python-r241/dist/src/Lib/test/test_optparse.py py/branch/dist-doctest/py/compat/testing/test_textwrap.py - copied, changed from r13383, vendor/cpython/Python-r241/dist/src/Lib/test/test_textwrap.py Log: Added compat testing modules from Python-r241. Copied: py/branch/dist-doctest/py/compat/testing/test_optparse.py (from r13383, vendor/cpython/Python-r241/dist/src/Lib/test/test_optparse.py) ============================================================================== --- vendor/cpython/Python-r241/dist/src/Lib/test/test_optparse.py (original) +++ py/branch/dist-doctest/py/compat/testing/test_optparse.py Tue Jun 14 15:35:07 2005 @@ -17,10 +17,23 @@ from pprint import pprint from test import test_support -from optparse import make_option, Option, IndentedHelpFormatter, \ - TitledHelpFormatter, OptionParser, OptionContainer, OptionGroup, \ - SUPPRESS_HELP, SUPPRESS_USAGE, OptionError, OptionConflictError, \ - BadOptionError, OptionValueError, Values, _match_abbrev +import py +optparse = py.compat.optparse +make_option = optparse.make_option +Option = optparse.Option +IndentedHelpFormatter = optparse.IndentedHelpFormatter +TitledHelpFormatter = optparse.TitledHelpFormatter +OptionParser = optparse.OptionParser +OptionContainer = optparse.OptionContainer +OptionGroup = optparse.OptionGroup +SUPPRESS_HELP = optparse.SUPPRESS_HELP +SUPPRESS_USAGE = optparse.SUPPRESS_USAGE +OptionError = optparse.OptionError +OptionConflictError = optparse.OptionConflictError +BadOptionError = optparse.BadOptionError +OptionValueError = optparse.OptionValueError +Values = optparse.Values +_match_abbrev = optparse._match_abbrev # Do the right thing with boolean values for all known Python versions. try: Copied: py/branch/dist-doctest/py/compat/testing/test_textwrap.py (from r13383, vendor/cpython/Python-r241/dist/src/Lib/test/test_textwrap.py) ============================================================================== --- vendor/cpython/Python-r241/dist/src/Lib/test/test_textwrap.py (original) +++ py/branch/dist-doctest/py/compat/testing/test_textwrap.py Tue Jun 14 15:35:07 2005 @@ -11,7 +11,12 @@ import unittest from test import test_support -from textwrap import TextWrapper, wrap, fill, dedent +import py +textwrap = py.compat.textwrap +TextWrapper = textwrap.TextWrapper +wrap = textwrap.wrap +fill = textwrap.fill +dedent = textwrap.dedent class BaseTestCase(unittest.TestCase): From hpk at codespeak.net Wed Jun 15 00:08:30 2005 From: hpk at codespeak.net (hpk at codespeak.net) Date: Wed, 15 Jun 2005 00:08:30 +0200 (CEST) Subject: [py-svn] r13415 - py/dist/py/compat Message-ID: <20050614220830.1C44A27BDD@code1.codespeak.net> Author: hpk Date: Wed Jun 15 00:08:29 2005 New Revision: 13415 Modified: py/dist/py/compat/ (props changed) py/dist/py/compat/__init__.py (props changed) py/dist/py/compat/doctest.py (props changed) py/dist/py/compat/optparse.py (props changed) py/dist/py/compat/textwrap.py (props changed) Log: setting svn:eol-style and svn:ignore properties From hpk at codespeak.net Wed Jun 15 00:12:54 2005 From: hpk at codespeak.net (hpk at codespeak.net) Date: Wed, 15 Jun 2005 00:12:54 +0200 (CEST) Subject: [py-svn] r13416 - in py/dist/py: . misc misc/testing Message-ID: <20050614221254.BF04627BE2@code1.codespeak.net> Author: hpk Date: Wed Jun 15 00:12:53 2005 New Revision: 13416 Modified: py/dist/py/__init__.py py/dist/py/misc/log.py py/dist/py/misc/log_support.py py/dist/py/misc/testing/test_log.py Log: streamlining logging API and tests a bit Modified: py/dist/py/__init__.py ============================================================================== --- py/dist/py/__init__.py (original) +++ py/dist/py/__init__.py Wed Jun 15 00:12:53 2005 @@ -100,18 +100,19 @@ 'xml.escape' : ('./xmlobj/misc.py', 'escape'), # logging API ('producers' and 'consumers') + 'log.Producer' : ('./misc/log.py', 'LogProducer'), 'log.debug' : ('./misc/log.py', 'debug'), - 'log.info' :('./misc/log.py', 'info'), - 'log.warn' :('./misc/log.py', 'warn'), - 'log.error' :('./misc/log.py', 'error'), - 'log.critical' :('./misc/log.py', 'critical'), - 'log.set_logger' :('./misc/log.py', 'set_logger'), - 'log.getstate' :('./misc/log.py', '_getstate_'), - 'log.setstate' :('./misc/log.py', '_setstate_'), - 'log.FileLogger' :('./misc/log_support.py', 'FileLogger'), - 'log.StdoutLogger' :('./misc/log_support.py', 'StdoutLogger'), - 'log.StderrLogger' :('./misc/log_support.py', 'StderrLogger'), - 'log.EmailLogger' :('./misc/log_support.py', 'EmailLogger'), - 'log.SyslogLogger' :('./misc/log_support.py', 'SyslogLogger'), - 'log.WinEventLogger' :('./misc/log_support.py', 'WinEventLogger'), + 'log.info' : ('./misc/log.py', 'info'), + 'log.warn' : ('./misc/log.py', 'warn'), + 'log.error' : ('./misc/log.py', 'error'), + 'log.critical' : ('./misc/log.py', 'critical'), + 'log.set_logger' : ('./misc/log.py', 'set_logger'), + 'log.getstate' : ('./misc/log.py', 'getstate'), + 'log.setstate' : ('./misc/log.py', 'setstate'), + 'log.File' : ('./misc/log_support.py', 'File'), + 'log.Stdout' : ('./misc/log_support.py', 'Stdout'), + 'log.Stderr' : ('./misc/log_support.py', 'Stderr'), + 'log.Email' : ('./misc/log_support.py', 'Email'), + 'log.Syslog' : ('./misc/log_support.py', 'Syslog'), + 'log.WinEvent' : ('./misc/log_support.py', 'WinEvent'), }) Modified: py/dist/py/misc/log.py ============================================================================== --- py/dist/py/misc/log.py (original) +++ py/dist/py/misc/log.py Wed Jun 15 00:12:53 2005 @@ -32,22 +32,14 @@ Message = Message # to allow later customization _registry = {} - def __init__(self, keywords=()): + def __init__(self, keywords): + if isinstance(keywords, str): + keywords = tuple(keywords.split()) self.keywords = keywords def __repr__(self): return "" % ":".join(self.keywords) - def set_logger(self, name, func): - assert callable(func) - keywords = self.keywords + (name,) - self._registry[keywords] = func - # if default logger is set, also reset the other ones - if keywords == ('default',): - for k in [('debug',), ('info',), ('warn',), - ('error',), ('critical',)]: - self._registry[k] = func - def __getattr__(self, name): if name[0] == '_': raise AttributeError, name @@ -66,21 +58,28 @@ return func(message) + +def set_logger(name, func): + assert callable(func) + keywords = tuple(map(None, name.split())) + LogProducer._registry[keywords] = func + # if default logger is set, also reset the other ones + # XXX is this a good idea? + if keywords == ('default',): + for k in [('debug',), ('info',), ('warn',), + ('error',), ('critical',)]: + LogProducer._registry[k] = func + +def getstate(): + """ return logging registry state. """ # class methods dealing with registry - def _getstate_(cls): - return cls._registry.copy() - _getstate_ = classmethod(_getstate_) - - def _setstate_(cls, state): - cls._registry = state - _setstate_ = classmethod(_setstate_) - -producer = LogProducer() -debug = producer.debug -info = producer.info -warn = producer.warn -error = producer.error -critical = producer.critical -set_logger = producer.set_logger -_getstate_ = producer._getstate_ -_setstate_ = producer._setstate_ + return LogProducer._registry.copy() + +def setstate(state): + """ set logging registry state. """ + LogProducer._registry = state + +# some default severity producers +_ = globals() +for x in 'debug info warn split error critical'.split(): + _[x] = LogProducer(x) Modified: py/dist/py/misc/log_support.py ============================================================================== --- py/dist/py/misc/log_support.py (original) +++ py/dist/py/misc/log_support.py Wed Jun 15 00:12:53 2005 @@ -12,7 +12,7 @@ def __init__(self): self.formatter = logging.Formatter('%(message)s') - def FileLogger(self, filename, mode='a'): + def File(self, filename, mode='a'): filename = str(filename) logger_name = "py.log.file.%s" % filename handler_type = logging.FileHandler @@ -21,7 +21,7 @@ } return self._logger_func(logger_name, handler_type, **handler_args) - def StdoutLogger(self): + def Stdout(self): # Add str(sys.stdout) to logger name because sys.stdout might be redirected # to a file, and in this case we need to distinguish between files logger_name = 'py.log.stdout.%s' % str(sys.stdout) @@ -30,7 +30,7 @@ } return self._logger_func(logger_name, handler_type, **handler_args) - def StderrLogger(self): + def Stderr(self): # Add str(sys.stderr) to logger name because sys.stderr might be redirected # to a file, and in this case we need to distinguish between files logger_name = 'py.log.stderr.%s' % str(sys.stderr) @@ -39,7 +39,7 @@ } return self._logger_func(logger_name, handler_type, **handler_args) - def SyslogLogger(self, address=('localhost', 514), facility=1): + def Syslog(self, address=('localhost', 514), facility=1): logger_name = 'py.log.syslog' handler_type = logging.handlers.SysLogHandler handler_args = {'address': address, @@ -47,7 +47,7 @@ } return self._logger_func(logger_name, handler_type, **handler_args) - def WinEventLogger(self, appname='pylib', logtype='Application'): + def WinEvent(self, appname='pylib', logtype='Application'): logger_name = 'py.log.winevent' handler_type = logging.handlers.NTEventLogHandler handler_args = {'appname': appname, @@ -55,7 +55,7 @@ } return self._logger_func(logger_name, handler_type, **handler_args) - def EmailLogger(self, mailhost, fromaddr, toaddrs, subject): + def Email(self, mailhost, fromaddr, toaddrs, subject): logger_name = 'py.log.email' handler_type = logging.handlers.SMTPHandler handler_args = {'mailhost': mailhost, @@ -96,9 +96,9 @@ logger.critical(message) consumer = LogConsumer() -FileLogger = consumer.FileLogger -StdoutLogger = consumer.StdoutLogger -StderrLogger = consumer.StderrLogger -SyslogLogger = consumer.SyslogLogger -WinEventLogger = consumer.WinEventLogger -EmailLogger = consumer.EmailLogger \ No newline at end of file +File = consumer.File +Stdout = consumer.Stdout +Stderr = consumer.Stderr +Syslog = consumer.Syslog +WinEvent = consumer.WinEvent +Email = consumer.Email Modified: py/dist/py/misc/testing/test_log.py ============================================================================== --- py/dist/py/misc/testing/test_log.py (original) +++ py/dist/py/misc/testing/test_log.py Wed Jun 15 00:12:53 2005 @@ -16,9 +16,7 @@ def test_produce_one_keyword(self): l = [] - def f(msg): - l.append(msg) - py.log.set_logger('debug', f) + py.log.set_logger('debug', l.append) py.log.debug("hello world") assert len(l) == 1 msg = l[0] @@ -26,12 +24,18 @@ assert msg.prefix() == '[debug] ' assert str(msg) == "[debug] hello world" + def test_producer_class(self): + p = py.log.Producer('x1') + l = [] + py.log.set_logger('x1', l.append) + p("hello") + assert len(l) == 1 + assert len(l[0].keywords) == 1 + assert 'x1' == l[0].keywords[0] + def test_default_logger(self): l = [] - def f(msg): - l.append(msg) - - py.log.set_logger("default", f) + py.log.set_logger("default", l.append) py.log.debug("hello") py.log.warn("world") py.log.info("I") @@ -63,7 +67,7 @@ sys.stdout = open(redirect, 'w') # Start of the 'consumer' code - py.log.set_logger("default", py.log.StdoutLogger()) + py.log.set_logger("default", py.log.Stdout()) py.log.debug("hello world #1") py.log.info("hello world #2") py.log.warn("hello world #3") @@ -86,7 +90,7 @@ sys.stderr = open(redirect, 'w') # Start of the 'consumer' code - py.log.set_logger("default", py.log.StderrLogger()) + py.log.set_logger("default", py.log.Stderr()) py.log.debug("hello world #1") py.log.info("hello world #2") py.log.warn("hello world #3") @@ -104,7 +108,7 @@ custom_log = tempdir.join('log.out') # Start of the 'consumer' code - py.log.set_logger("default", py.log.FileLogger(custom_log)) + py.log.set_logger("default", py.log.File(custom_log)) py.log.debug("hello world #1") py.log.info("hello world #2") py.log.warn("hello world #3") @@ -120,8 +124,8 @@ def test_log_file_append_mode(self): logfilefn = tempdir.join('log_append.out') - # The append mode is on by default, so we don't need to specify it for FileLogger - py.log.set_logger("default", py.log.FileLogger(logfilefn)) + # The append mode is on by default, so we don't need to specify it for File + py.log.set_logger("default", py.log.File(logfilefn)) py.log.debug("hello world #1") py.log.info("hello world #2") py.log.warn("hello world #3") @@ -134,7 +138,7 @@ '[critical] hello world #5\n'] # We log 5 more lines that should be appended to the log - py.log.set_logger("default", py.log.FileLogger(logfilefn)) + py.log.set_logger("default", py.log.File(logfilefn)) py.log.debug("hello world #6") py.log.info("hello world #7") py.log.warn("hello world #8") @@ -154,8 +158,8 @@ logfilefn = tempdir.join('log_write.out') logfilefn.write("This line should be zapped when we start logging\n") - # We specify mode='w' for the FileLogger - py.log.set_logger("default", py.log.FileLogger(logfilefn, mode='w')) + # We specify mode='w' for the File + py.log.set_logger("default", py.log.File(logfilefn, mode='w')) py.log.debug("hello world #1") py.log.info("hello world #2") py.log.warn("hello world #3") @@ -175,11 +179,11 @@ logfileerror = tempdir.join('log_error.out') logfilecritical = tempdir.join('log_critical.out') - py.log.set_logger("debug", py.log.FileLogger(logfiledebug)) - py.log.set_logger("info", py.log.FileLogger(logfileinfo)) - py.log.set_logger("warn", py.log.FileLogger(logfilewarn)) - py.log.set_logger("error", py.log.FileLogger(logfileerror)) - py.log.set_logger("critical", py.log.FileLogger(logfilecritical)) + py.log.set_logger("debug", py.log.File(logfiledebug)) + py.log.set_logger("info", py.log.File(logfileinfo)) + py.log.set_logger("warn", py.log.File(logfilewarn)) + py.log.set_logger("error", py.log.File(logfileerror)) + py.log.set_logger("critical", py.log.File(logfilecritical)) py.log.debug("hello world #1") py.log.info("hello world #2") @@ -206,7 +210,7 @@ logfiledefault1 = tempdir.join('default_log1.out') # We set a file logger as the default logger - py.log.set_logger("default", py.log.FileLogger(logfiledefault1)) + py.log.set_logger("default", py.log.File(logfiledefault1)) py.log.debug("hello world #1") py.log.info("hello world #2") py.log.warn("hello world #3") @@ -222,7 +226,7 @@ # that the new one receives messages and the old one does not receive them anymore logfiledefault2 = tempdir.join('default_log2.out') - py.log.set_logger("default", py.log.FileLogger(logfiledefault2)) + py.log.set_logger("default", py.log.File(logfiledefault2)) py.log.debug("hello world #6") py.log.info("hello world #7") py.log.warn("hello world #8") @@ -246,7 +250,7 @@ saved = sys.stderr sys.stderr = open(redirect, 'w') - py.log.set_logger("default", py.log.StderrLogger()) + py.log.set_logger("default", py.log.Stderr()) py.log.debug("hello world #11") py.log.info("hello world #12") py.log.warn("hello world #13") @@ -274,10 +278,10 @@ logfiledebug1 = tempdir.join('debug_log1.out') # We set a file logger as the default logger in non-append mode - py.log.set_logger("default", py.log.FileLogger(logfiledefault, mode='w')) + py.log.set_logger("default", py.log.File(logfiledefault, mode='w')) # We set a file logger as the debug logger - py.log.set_logger("debug", py.log.FileLogger(logfiledebug1)) + py.log.set_logger("debug", py.log.File(logfiledebug1)) py.log.debug("hello world #1") py.log.info("hello world #2") py.log.warn("hello world #3") @@ -298,7 +302,7 @@ # that the new one receives messages and the old one does not receive them anymore logfiledebug2 = tempdir.join('debug_log2.out') - py.log.set_logger("debug", py.log.FileLogger(logfiledebug2)) + py.log.set_logger("debug", py.log.File(logfiledebug2)) py.log.debug("hello world #6") py.log.info("hello world #7") py.log.warn("hello world #8") @@ -329,7 +333,7 @@ saved = sys.stdout sys.stdout = open(redirect, 'w') - py.log.set_logger("debug", py.log.StdoutLogger()) + py.log.set_logger("debug", py.log.Stdout()) py.log.debug("hello world #11") py.log.info("hello world #12") py.log.warn("hello world #13") @@ -363,7 +367,7 @@ # disabled for now; the syslog log file can usually be read only by root # I manually inspected /var/log/messages and the entries were there def no_test_log_syslog(self): - py.log.set_logger("default", py.log.SyslogLogger()) + py.log.set_logger("default", py.log.Syslog()) py.log.debug("hello world #1") py.log.info("hello world #2") py.log.warn("hello world #3") @@ -374,7 +378,7 @@ # Event Logs on Windows # I manually inspected the Application Log and the entries were there def no_test_log_winevent(self): - py.log.set_logger("default", py.log.WinEventLogger()) + py.log.set_logger("default", py.log.WinEvent()) py.log.debug("hello world #1") py.log.info("hello world #2") py.log.warn("hello world #3") @@ -383,7 +387,7 @@ # disabled for now until I figure out how to properly pass the parameters def no_test_log_email(self): - py.log.set_logger("default", py.log.EmailLogger(mailhost="gheorghiu.net", + py.log.set_logger("default", py.log.Email(mailhost="gheorghiu.net", fromaddr="grig", toaddrs="grig", subject = "py.log email")) From hpk at codespeak.net Wed Jun 15 00:17:18 2005 From: hpk at codespeak.net (hpk at codespeak.net) Date: Wed, 15 Jun 2005 00:17:18 +0200 (CEST) Subject: [py-svn] r13417 - in py/dist/py: . log log/testing misc misc/testing Message-ID: <20050614221718.2409B27BE6@code1.codespeak.net> Author: hpk Date: Wed Jun 15 00:17:16 2005 New Revision: 13417 Added: py/dist/py/log/ (props changed) py/dist/py/log/__init__.py (contents, props changed) py/dist/py/log/consumer.py (props changed) - copied unchanged from r13416, py/dist/py/misc/log_support.py py/dist/py/log/producer.py (props changed) - copied unchanged from r13416, py/dist/py/misc/log.py py/dist/py/log/testing/ (props changed) py/dist/py/log/testing/__init__.py (contents, props changed) py/dist/py/log/testing/test_log.py (props changed) - copied unchanged from r13416, py/dist/py/misc/testing/test_log.py Removed: py/dist/py/misc/log.py py/dist/py/misc/log_support.py py/dist/py/misc/testing/test_log.py Modified: py/dist/py/__init__.py Log: move py.log stuff to its own directory Modified: py/dist/py/__init__.py ============================================================================== --- py/dist/py/__init__.py (original) +++ py/dist/py/__init__.py Wed Jun 15 00:17:16 2005 @@ -100,19 +100,21 @@ 'xml.escape' : ('./xmlobj/misc.py', 'escape'), # logging API ('producers' and 'consumers') - 'log.Producer' : ('./misc/log.py', 'LogProducer'), - 'log.debug' : ('./misc/log.py', 'debug'), - 'log.info' : ('./misc/log.py', 'info'), - 'log.warn' : ('./misc/log.py', 'warn'), - 'log.error' : ('./misc/log.py', 'error'), - 'log.critical' : ('./misc/log.py', 'critical'), - 'log.set_logger' : ('./misc/log.py', 'set_logger'), - 'log.getstate' : ('./misc/log.py', 'getstate'), - 'log.setstate' : ('./misc/log.py', 'setstate'), - 'log.File' : ('./misc/log_support.py', 'File'), - 'log.Stdout' : ('./misc/log_support.py', 'Stdout'), - 'log.Stderr' : ('./misc/log_support.py', 'Stderr'), - 'log.Email' : ('./misc/log_support.py', 'Email'), - 'log.Syslog' : ('./misc/log_support.py', 'Syslog'), - 'log.WinEvent' : ('./misc/log_support.py', 'WinEvent'), + 'log.Producer' : ('./log/producer.py', 'LogProducer'), + 'log.debug' : ('./log/producer.py', 'debug'), + 'log.info' : ('./log/producer.py', 'info'), + 'log.warn' : ('./log/producer.py', 'warn'), + 'log.error' : ('./log/producer.py', 'error'), + 'log.critical' : ('./log/producer.py', 'critical'), + + 'log.set_logger' : ('./log/producer.py', 'set_logger'), + 'log.getstate' : ('./log/producer.py', 'getstate'), + 'log.setstate' : ('./log/producer.py', 'setstate'), + + 'log.File' : ('./log/consumer.py', 'File'), + 'log.Stdout' : ('./log/consumer.py', 'Stdout'), + 'log.Stderr' : ('./log/consumer.py', 'Stderr'), + 'log.Email' : ('./log/consumer.py', 'Email'), + 'log.Syslog' : ('./log/consumer.py', 'Syslog'), + 'log.WinEvent' : ('./log/consumer.py', 'WinEvent'), }) Added: py/dist/py/log/__init__.py ============================================================================== --- (empty file) +++ py/dist/py/log/__init__.py Wed Jun 15 00:17:16 2005 @@ -0,0 +1 @@ +# Added: py/dist/py/log/testing/__init__.py ============================================================================== --- (empty file) +++ py/dist/py/log/testing/__init__.py Wed Jun 15 00:17:16 2005 @@ -0,0 +1 @@ +# Deleted: /py/dist/py/misc/log.py ============================================================================== --- /py/dist/py/misc/log.py Wed Jun 15 00:17:16 2005 +++ (empty file) @@ -1,85 +0,0 @@ -""" -py lib's basic logging/tracing functionality - - EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL (especially the dispatching) - -WARNING: this module is not allowed to contain any 'py' imports, - Instead, it is very self-contained and should not depend on - CPython/stdlib versions, either. One reason for these - restrictions is that this module should be sendable - via py.execnet across the network in an very early phase. -""" - -class Message(object): - def __init__(self, keywords, args): - self.keywords = keywords - self.args = args - - def content(self): - return " ".join(map(str, self.args)) - - def prefix(self): - return "[%s] " % (":".join(self.keywords)) - - def __str__(self): - return self.prefix() + self.content() - -class LogProducer(object): - """Log "producer" API which sends messages to be logged - to a 'consumer' object, which then prints them to stdout, - stderr, files, etc.""" - - Message = Message # to allow later customization - _registry = {} - - def __init__(self, keywords): - if isinstance(keywords, str): - keywords = tuple(keywords.split()) - self.keywords = keywords - - def __repr__(self): - return "" % ":".join(self.keywords) - - def __getattr__(self, name): - if name[0] == '_': - raise AttributeError, name - return LogProducer(self.keywords + (name,)) - - def __call__(self, *args): - message = self.Message(self.keywords, args) - try: - func = self._registry[message.keywords] - except KeyError: - # XXX find best match, for now it's a hack/simplistic - try: - func = self._registry[("default",)] - except KeyError: - print str(message) - return - func(message) - - -def set_logger(name, func): - assert callable(func) - keywords = tuple(map(None, name.split())) - LogProducer._registry[keywords] = func - # if default logger is set, also reset the other ones - # XXX is this a good idea? - if keywords == ('default',): - for k in [('debug',), ('info',), ('warn',), - ('error',), ('critical',)]: - LogProducer._registry[k] = func - -def getstate(): - """ return logging registry state. """ - # class methods dealing with registry - return LogProducer._registry.copy() - -def setstate(state): - """ set logging registry state. """ - LogProducer._registry = state - -# some default severity producers -_ = globals() -for x in 'debug info warn split error critical'.split(): - _[x] = LogProducer(x) Deleted: /py/dist/py/misc/log_support.py ============================================================================== --- /py/dist/py/misc/log_support.py Wed Jun 15 00:17:16 2005 +++ (empty file) @@ -1,104 +0,0 @@ -import py -import os, sys, logging -import logging.handlers - -class LogConsumer(object): - """Log "consumer" API which receives messages from - a 'producer' object and displays them using various - logging mechanisms (stdout, stderr, files, syslog, etc.)""" - - _handlers = {} - - def __init__(self): - self.formatter = logging.Formatter('%(message)s') - - def File(self, filename, mode='a'): - filename = str(filename) - logger_name = "py.log.file.%s" % filename - handler_type = logging.FileHandler - handler_args = {'filename': filename, - 'mode': mode, - } - return self._logger_func(logger_name, handler_type, **handler_args) - - def Stdout(self): - # Add str(sys.stdout) to logger name because sys.stdout might be redirected - # to a file, and in this case we need to distinguish between files - logger_name = 'py.log.stdout.%s' % str(sys.stdout) - handler_type = logging.StreamHandler - handler_args = {'strm': sys.stdout, - } - return self._logger_func(logger_name, handler_type, **handler_args) - - def Stderr(self): - # Add str(sys.stderr) to logger name because sys.stderr might be redirected - # to a file, and in this case we need to distinguish between files - logger_name = 'py.log.stderr.%s' % str(sys.stderr) - handler_type = logging.StreamHandler - handler_args = {'strm': sys.stderr, - } - return self._logger_func(logger_name, handler_type, **handler_args) - - def Syslog(self, address=('localhost', 514), facility=1): - logger_name = 'py.log.syslog' - handler_type = logging.handlers.SysLogHandler - handler_args = {'address': address, - 'facility': facility, - } - return self._logger_func(logger_name, handler_type, **handler_args) - - def WinEvent(self, appname='pylib', logtype='Application'): - logger_name = 'py.log.winevent' - handler_type = logging.handlers.NTEventLogHandler - handler_args = {'appname': appname, - 'logtype': logtype, - } - return self._logger_func(logger_name, handler_type, **handler_args) - - def Email(self, mailhost, fromaddr, toaddrs, subject): - logger_name = 'py.log.email' - handler_type = logging.handlers.SMTPHandler - handler_args = {'mailhost': mailhost, - 'fromaddr': fromaddr, - 'toaddrs': toaddrs, - 'subject': subject, - } - return self._logger_func(logger_name, handler_type, **handler_args) - - def _logger_func(self, logger_name, handler_type, **handler_args): - logger = logging.getLogger(logger_name) - #print "got logger " + str(logger) + "for name " + logger_name - logger.setLevel(logging.DEBUG) - - # Add handler to logger only if it hasn't been already set for - # the same logger name - if not self._handlers.has_key(logger_name): - #print "adding handler for logger " + logger_name - handler = handler_type(**handler_args) - handler.setFormatter(self.formatter) - logger.addHandler(handler) - self._handlers[logger_name] = handler - def message_processing_func(message): - self.log_message(logger, message) - return message_processing_func - - def log_message(self, logger, message): - for keyword in message.keywords: - if keyword.startswith('debug'): - logger.debug(message) - if keyword.startswith('info'): - logger.info(message) - if keyword.startswith('warn'): - logger.warn(message) - if keyword.startswith('err'): - logger.error(message) - if keyword.startswith('crit'): - logger.critical(message) - -consumer = LogConsumer() -File = consumer.File -Stdout = consumer.Stdout -Stderr = consumer.Stderr -Syslog = consumer.Syslog -WinEvent = consumer.WinEvent -Email = consumer.Email Deleted: /py/dist/py/misc/testing/test_log.py ============================================================================== --- /py/dist/py/misc/testing/test_log.py Wed Jun 15 00:17:16 2005 +++ (empty file) @@ -1,398 +0,0 @@ -import py -import sys - -def setup_module(mod): - mod.tempdir = py.test.ensuretemp("py.log-test") - -class TestLogProducer: - def setup_method(self, meth): - self.state = py.log.getstate() - def teardown_method(self, meth): - py.log.setstate(self.state) - - def test_producer_repr(self): - d = py.log.debug - assert repr(d).find('debug') != -1 - - def test_produce_one_keyword(self): - l = [] - py.log.set_logger('debug', l.append) - py.log.debug("hello world") - assert len(l) == 1 - msg = l[0] - assert msg.content().startswith('hello world') - assert msg.prefix() == '[debug] ' - assert str(msg) == "[debug] hello world" - - def test_producer_class(self): - p = py.log.Producer('x1') - l = [] - py.log.set_logger('x1', l.append) - p("hello") - assert len(l) == 1 - assert len(l[0].keywords) == 1 - assert 'x1' == l[0].keywords[0] - - def test_default_logger(self): - l = [] - py.log.set_logger("default", l.append) - py.log.debug("hello") - py.log.warn("world") - py.log.info("I") - py.log.error("am") - py.log.critical("Sam") - assert len(l) == 5 - msg1, msg2, msg3, msg4, msg5 = l - - assert 'debug' in msg1.keywords - assert 'warn' in msg2.keywords - assert 'info' in msg3.keywords - assert 'error' in msg4.keywords - assert 'critical' in msg5.keywords - - assert msg1.content() == 'hello' - assert msg2.content() == 'world' - assert msg3.content() == 'I' - assert msg4.content() == 'am' - assert msg5.content() == 'Sam' - -class TestLogConsumer: - - def test_log_stdout(self): - # We redirect stdout so that we can verify that - # the log messages have been printed to it - p = tempdir.join('log_stdout.out') - redirect = str(p) - sys.saved = sys.stdout - sys.stdout = open(redirect, 'w') - - # Start of the 'consumer' code - py.log.set_logger("default", py.log.Stdout()) - py.log.debug("hello world #1") - py.log.info("hello world #2") - py.log.warn("hello world #3") - py.log.error("hello world #4") - py.log.critical("hello world #5") - # End of the 'consumer' code - - sys.stdout = sys.saved - lines = open(redirect).readlines() - assert lines == ['[debug] hello world #1\n', '[info] hello world #2\n', - '[warn] hello world #3\n', '[error] hello world #4\n', - '[critical] hello world #5\n'] - - def test_log_stderr(self): - # We redirect stderr so that we can verify that - # the log messages have been printed to it - p = tempdir.join('log_stderr.out') - redirect = str(p) - sys.saved = sys.stderr - sys.stderr = open(redirect, 'w') - - # Start of the 'consumer' code - py.log.set_logger("default", py.log.Stderr()) - py.log.debug("hello world #1") - py.log.info("hello world #2") - py.log.warn("hello world #3") - py.log.error("hello world #4") - py.log.critical("hello world #5") - # End of the 'consumer' code - - sys.stderr = sys.saved - lines = open(redirect).readlines() - assert lines == ['[debug] hello world #1\n', '[info] hello world #2\n', - '[warn] hello world #3\n', '[error] hello world #4\n', - '[critical] hello world #5\n'] - - def test_log_file(self): - custom_log = tempdir.join('log.out') - - # Start of the 'consumer' code - py.log.set_logger("default", py.log.File(custom_log)) - py.log.debug("hello world #1") - py.log.info("hello world #2") - py.log.warn("hello world #3") - py.log.error("hello world #4") - py.log.critical("hello world #5") - # End of the 'consumer' code - - lines = custom_log.readlines() - assert lines == ['[debug] hello world #1\n', '[info] hello world #2\n', - '[warn] hello world #3\n', '[error] hello world #4\n', - '[critical] hello world #5\n'] - - def test_log_file_append_mode(self): - logfilefn = tempdir.join('log_append.out') - - # The append mode is on by default, so we don't need to specify it for File - py.log.set_logger("default", py.log.File(logfilefn)) - py.log.debug("hello world #1") - py.log.info("hello world #2") - py.log.warn("hello world #3") - py.log.error("hello world #4") - py.log.critical("hello world #5") - - lines = logfilefn.readlines() - assert lines == ['[debug] hello world #1\n', '[info] hello world #2\n', - '[warn] hello world #3\n', '[error] hello world #4\n', - '[critical] hello world #5\n'] - - # We log 5 more lines that should be appended to the log - py.log.set_logger("default", py.log.File(logfilefn)) - py.log.debug("hello world #6") - py.log.info("hello world #7") - py.log.warn("hello world #8") - py.log.error("hello world #9") - py.log.critical("hello world #10") - - lines = logfilefn.readlines() - assert lines == ['[debug] hello world #1\n', '[info] hello world #2\n', - '[warn] hello world #3\n', '[error] hello world #4\n', - '[critical] hello world #5\n', - '[debug] hello world #6\n', '[info] hello world #7\n', - '[warn] hello world #8\n', '[error] hello world #9\n', - '[critical] hello world #10\n'] - - - def test_log_file_write_mode(self): - logfilefn = tempdir.join('log_write.out') - logfilefn.write("This line should be zapped when we start logging\n") - - # We specify mode='w' for the File - py.log.set_logger("default", py.log.File(logfilefn, mode='w')) - py.log.debug("hello world #1") - py.log.info("hello world #2") - py.log.warn("hello world #3") - py.log.error("hello world #4") - py.log.critical("hello world #5") - - lines = logfilefn.readlines() - assert lines == ['[debug] hello world #1\n', '[info] hello world #2\n', - '[warn] hello world #3\n', '[error] hello world #4\n', - '[critical] hello world #5\n'] - - - def test_keyword_based_log_files(self): - logfiledebug = tempdir.join('log_debug.out') - logfileinfo = tempdir.join('log_info.out') - logfilewarn = tempdir.join('log_warn.out') - logfileerror = tempdir.join('log_error.out') - logfilecritical = tempdir.join('log_critical.out') - - py.log.set_logger("debug", py.log.File(logfiledebug)) - py.log.set_logger("info", py.log.File(logfileinfo)) - py.log.set_logger("warn", py.log.File(logfilewarn)) - py.log.set_logger("error", py.log.File(logfileerror)) - py.log.set_logger("critical", py.log.File(logfilecritical)) - - py.log.debug("hello world #1") - py.log.info("hello world #2") - py.log.warn("hello world #3") - py.log.error("hello world #4") - py.log.critical("hello world #5") - - lines = logfiledebug.readlines() - assert lines == ['[debug] hello world #1\n'] - - lines = logfileinfo.readlines() - assert lines == ['[info] hello world #2\n'] - - lines = logfilewarn.readlines() - assert lines == ['[warn] hello world #3\n'] - - lines = logfileerror.readlines() - assert lines == ['[error] hello world #4\n'] - - lines = logfilecritical.readlines() - assert lines == ['[critical] hello world #5\n'] - - def test_reassign_default_logger(self): - logfiledefault1 = tempdir.join('default_log1.out') - - # We set a file logger as the default logger - py.log.set_logger("default", py.log.File(logfiledefault1)) - py.log.debug("hello world #1") - py.log.info("hello world #2") - py.log.warn("hello world #3") - py.log.error("hello world #4") - py.log.critical("hello world #5") - - lines = logfiledefault1.readlines() - assert lines == ['[debug] hello world #1\n', '[info] hello world #2\n', - '[warn] hello world #3\n', '[error] hello world #4\n', - '[critical] hello world #5\n'] - - # We set a different file logger as the default logger and verify - # that the new one receives messages and the old one does not receive them anymore - logfiledefault2 = tempdir.join('default_log2.out') - - py.log.set_logger("default", py.log.File(logfiledefault2)) - py.log.debug("hello world #6") - py.log.info("hello world #7") - py.log.warn("hello world #8") - py.log.error("hello world #9") - py.log.critical("hello world #10") - - lines = logfiledefault1.readlines() - assert lines == ['[debug] hello world #1\n', '[info] hello world #2\n', - '[warn] hello world #3\n', '[error] hello world #4\n', - '[critical] hello world #5\n'] - - lines = logfiledefault2.readlines() - assert lines == ['[debug] hello world #6\n', '[info] hello world #7\n', - '[warn] hello world #8\n', '[error] hello world #9\n', - '[critical] hello world #10\n'] - - # We set stderr as the default logger and verify that messages go to stderr - # and not to the previous 2 file loggers - p = tempdir.join('log_stderr_default.out') - redirect = str(p) - saved = sys.stderr - sys.stderr = open(redirect, 'w') - - py.log.set_logger("default", py.log.Stderr()) - py.log.debug("hello world #11") - py.log.info("hello world #12") - py.log.warn("hello world #13") - py.log.error("hello world #14") - py.log.critical("hello world #15") - - sys.stderr = saved - lines = open(redirect).readlines() - assert lines == ['[debug] hello world #11\n', '[info] hello world #12\n', - '[warn] hello world #13\n', '[error] hello world #14\n', - '[critical] hello world #15\n'] - - lines = logfiledefault1.readlines() - assert lines == ['[debug] hello world #1\n', '[info] hello world #2\n', - '[warn] hello world #3\n', '[error] hello world #4\n', - '[critical] hello world #5\n'] - - lines = logfiledefault2.readlines() - assert lines == ['[debug] hello world #6\n', '[info] hello world #7\n', - '[warn] hello world #8\n', '[error] hello world #9\n', - '[critical] hello world #10\n'] - - def test_reassign_debug_logger(self): - logfiledefault = tempdir.join('default.out') - logfiledebug1 = tempdir.join('debug_log1.out') - - # We set a file logger as the default logger in non-append mode - py.log.set_logger("default", py.log.File(logfiledefault, mode='w')) - - # We set a file logger as the debug logger - py.log.set_logger("debug", py.log.File(logfiledebug1)) - py.log.debug("hello world #1") - py.log.info("hello world #2") - py.log.warn("hello world #3") - py.log.error("hello world #4") - py.log.critical("hello world #5") - - # The debug message should have gone to the debug file logger - lines = logfiledebug1.readlines() - assert lines == ['[debug] hello world #1\n'] - - # All other messages should have gone to the default file logger - lines = logfiledefault.readlines() - assert lines == ['[info] hello world #2\n', - '[warn] hello world #3\n', '[error] hello world #4\n', - '[critical] hello world #5\n'] - - # We set a different file logger as the debug logger and verify - # that the new one receives messages and the old one does not receive them anymore - logfiledebug2 = tempdir.join('debug_log2.out') - - py.log.set_logger("debug", py.log.File(logfiledebug2)) - py.log.debug("hello world #6") - py.log.info("hello world #7") - py.log.warn("hello world #8") - py.log.error("hello world #9") - py.log.critical("hello world #10") - - # The debug message should have gone to the new debug file logger - lines = logfiledebug2.readlines() - assert lines == ['[debug] hello world #6\n'] - - # All other messages should have gone to the default file logger - lines = logfiledefault.readlines() - assert lines == ['[info] hello world #2\n', - '[warn] hello world #3\n', '[error] hello world #4\n', - '[critical] hello world #5\n', - '[info] hello world #7\n', - '[warn] hello world #8\n', '[error] hello world #9\n', - '[critical] hello world #10\n'] - - # The old debug file logger should be unchanged - lines = logfiledebug1.readlines() - assert lines == ['[debug] hello world #1\n'] - - # We set stdout as the debug logger and verify that messages go to stdout - # and not to the previous 2 file loggers - p = tempdir.join('log_stdout_debug.out') - redirect = str(p) - saved = sys.stdout - sys.stdout = open(redirect, 'w') - - py.log.set_logger("debug", py.log.Stdout()) - py.log.debug("hello world #11") - py.log.info("hello world #12") - py.log.warn("hello world #13") - py.log.error("hello world #14") - py.log.critical("hello world #15") - - sys.stdout = saved - # The debug message should have gone to stdout - lines = open(redirect).readlines() - assert lines == ['[debug] hello world #11\n'] - - # All other messages should have gone to the default file logger - lines = logfiledefault.readlines() - assert lines == ['[info] hello world #2\n', - '[warn] hello world #3\n', '[error] hello world #4\n', - '[critical] hello world #5\n', - '[info] hello world #7\n', - '[warn] hello world #8\n', '[error] hello world #9\n', - '[critical] hello world #10\n', - '[info] hello world #12\n', - '[warn] hello world #13\n', '[error] hello world #14\n', - '[critical] hello world #15\n'] - - # The 2 old debug file logger should be unchanged - lines = logfiledebug1.readlines() - assert lines == ['[debug] hello world #1\n'] - - lines = logfiledebug2.readlines() - assert lines == ['[debug] hello world #6\n'] - - # disabled for now; the syslog log file can usually be read only by root - # I manually inspected /var/log/messages and the entries were there - def no_test_log_syslog(self): - py.log.set_logger("default", py.log.Syslog()) - py.log.debug("hello world #1") - py.log.info("hello world #2") - py.log.warn("hello world #3") - py.log.error("hello world #4") - py.log.critical("hello world #5") - - # disabled for now until I figure out how to read entries in the - # Event Logs on Windows - # I manually inspected the Application Log and the entries were there - def no_test_log_winevent(self): - py.log.set_logger("default", py.log.WinEvent()) - py.log.debug("hello world #1") - py.log.info("hello world #2") - py.log.warn("hello world #3") - py.log.error("hello world #4") - py.log.critical("hello world #5") - - # disabled for now until I figure out how to properly pass the parameters - def no_test_log_email(self): - py.log.set_logger("default", py.log.Email(mailhost="gheorghiu.net", - fromaddr="grig", - toaddrs="grig", - subject = "py.log email")) - py.log.debug("hello world #1") - py.log.info("hello world #2") - py.log.warn("hello world #3") - py.log.error("hello world #4") - py.log.critical("hello world #5") From arigo at codespeak.net Wed Jun 15 13:35:37 2005 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 15 Jun 2005 13:35:37 +0200 (CEST) Subject: [py-svn] r13434 - py/dist/py/c-extension/greenlet Message-ID: <20050615113537.77CD227B6E@code1.codespeak.net> Author: arigo Date: Wed Jun 15 13:35:37 2005 New Revision: 13434 Added: py/dist/py/c-extension/greenlet/test_generator_nested.py Log: An improved generator example, by Richard Emslie. Added: py/dist/py/c-extension/greenlet/test_generator_nested.py ============================================================================== --- (empty file) +++ py/dist/py/c-extension/greenlet/test_generator_nested.py Wed Jun 15 13:35:37 2005 @@ -0,0 +1,151 @@ +from py.magic import greenlet + +class genlet(greenlet): + + def __init__(self, *args, **kwds): + self.args = args + self.kwds = kwds + self.child = None + + def run(self): + fn, = self.fn + fn(*self.args, **self.kwds) + + def __iter__(self): + return self + + def set_child(self, child): + self.child = child + + def next(self): + if self.child: + child, self.child = self.child, None + result = child.switch() + else: + self.parent = greenlet.getcurrent() + result = self.switch() + + if self: + return result + else: + raise StopIteration + +def Yield(value, level = 1): + g = greenlet.getcurrent() + + while level != 0: + if not isinstance(g, genlet): + raise RuntimeError, 'yield outside a genlet' + if level > 1: + g.parent.set_child(g) + g = g.parent + level -= 1 + + g.switch(value) + +def Genlet(func): + class Genlet(genlet): + fn = (func,) + return Genlet + +# ____________________________________________________________ + +def g1(n, seen): + for i in range(n): + seen.append(i+1) + yield i + +def g2(n, seen): + for i in range(n): + seen.append(i+1) + Yield(i) + +g2 = Genlet(g2) + +def nested(i): + Yield(i) + +def g3(n, seen): + for i in range(n): + seen.append(i+1) + nested(i) +g3 = Genlet(g3) + +def test_genlet_simple(): + + for g in [g1, g2, g3]: + seen = [] + for k in range(3): + for j in g(5, seen): + seen.append(j) + + assert seen == 3 * [1, 0, 2, 1, 3, 2, 4, 3, 5, 4] + +def test_genlet_bad(): + try: + Yield(10) + except RuntimeError: + pass + +test_genlet_bad() +test_genlet_simple() +test_genlet_bad() + +def a(n): + if n == 0: + return + for ii in ax(n-1): + Yield(ii) + Yield(n) +ax = Genlet(a) + +def test_nested_genlets(): + seen = [] + for ii in ax(5): + seen.append(ii) + +test_nested_genlets() + +def perms(l): + if len(l) > 1: + for e in l: + # No syntactical sugar for generator expressions + [Yield([e] + p) for p in perms([x for x in l if x!=e])] + else: + Yield(l) + +perms = Genlet(perms) + +def test_perms(): + gen_perms = perms(range(4)) + permutations = list(gen_perms) + assert len(permutations) == 4*3*2*1 + assert [0,1,2,3] in permutations + assert [3,2,1,0] in permutations + res = [] + for ii in zip(perms(range(4)), perms(range(3))): + res.append(ii) + # XXX Test to make sure we are working as a generator expression +test_perms() + + +def gr1(n): + for ii in range(1, n): + Yield(ii) + Yield(ii * ii, 2) + +gr1 = Genlet(gr1) + +def gr2(n, seen): + for ii in gr1(n): + seen.append(ii) + +gr2 = Genlet(gr2) + +def test_layered_genlets(): + seen = [] + for ii in gr2(5, seen): + seen.append(ii) + assert seen == [1, 1, 2, 4, 3, 9, 4, 16] + +test_layered_genlets() From rxe at codespeak.net Wed Jun 15 13:59:46 2005 From: rxe at codespeak.net (rxe at codespeak.net) Date: Wed, 15 Jun 2005 13:59:46 +0200 (CEST) Subject: [py-svn] r13435 - py/dist/py/documentation Message-ID: <20050615115946.E96AE27B53@code1.codespeak.net> Author: rxe Date: Wed Jun 15 13:59:45 2005 New Revision: 13435 Modified: py/dist/py/documentation/getting-started.txt Log: Small reST fix. [also checking if i have commit rights!] Modified: py/dist/py/documentation/getting-started.txt ============================================================================== --- py/dist/py/documentation/getting-started.txt (original) +++ py/dist/py/documentation/getting-started.txt Wed Jun 15 13:59:45 2005 @@ -104,9 +104,10 @@ .. _`zope3`: http://www.zope3.org .. _twisted: http://www.twistedmatrix.org -.. _`get an account`: .. _future: future.html +.. _`get an account`: + get an account on codespeak --------------------------- From rxe at codespeak.net Wed Jun 15 14:41:23 2005 From: rxe at codespeak.net (rxe at codespeak.net) Date: Wed, 15 Jun 2005 14:41:23 +0200 (CEST) Subject: [py-svn] r13436 - py/dist/py/c-extension/greenlet Message-ID: <20050615124123.0EA8E27B62@code1.codespeak.net> Author: rxe Date: Wed Jun 15 14:41:21 2005 New Revision: 13436 Modified: py/dist/py/c-extension/greenlet/test_generator_nested.py Log: oups - at least let the nesting go further than one level deep. Modified: py/dist/py/c-extension/greenlet/test_generator_nested.py ============================================================================== --- py/dist/py/c-extension/greenlet/test_generator_nested.py (original) +++ py/dist/py/c-extension/greenlet/test_generator_nested.py Wed Jun 15 14:41:21 2005 @@ -19,7 +19,12 @@ def next(self): if self.child: - child, self.child = self.child, None + child = self.child + while child.child: + tmp = child + child = child.child + tmp.child = None + result = child.switch() else: self.parent = greenlet.getcurrent() From dstanek at codespeak.net Wed Jun 15 19:17:10 2005 From: dstanek at codespeak.net (dstanek at codespeak.net) Date: Wed, 15 Jun 2005 19:17:10 +0200 (CEST) Subject: [py-svn] r13447 - in py/dist/py: . compat compat/testing misc/testing Message-ID: <20050615171710.2F08627B7D@code1.codespeak.net> Author: dstanek Date: Wed Jun 15 19:17:01 2005 New Revision: 13447 Added: py/dist/py/compat/conftest.py (props changed) - copied unchanged from r13444, py/branch/dist-doctest/py/compat/conftest.py py/dist/py/compat/testing/ (props changed) - copied from r13444, py/branch/dist-doctest/py/compat/testing/ py/dist/py/compat/testing/test_doctest.py (props changed) - copied unchanged from r13444, py/branch/dist-doctest/py/compat/testing/test_doctest.py py/dist/py/compat/testing/test_doctest.txt (props changed) - copied unchanged from r13444, py/branch/dist-doctest/py/compat/testing/test_doctest.txt py/dist/py/compat/testing/test_doctest2.py (props changed) - copied unchanged from r13444, py/branch/dist-doctest/py/compat/testing/test_doctest2.py py/dist/py/compat/testing/test_doctest2.txt (props changed) - copied unchanged from r13444, py/branch/dist-doctest/py/compat/testing/test_doctest2.txt py/dist/py/compat/testing/test_optparse.py (props changed) - copied unchanged from r13444, py/branch/dist-doctest/py/compat/testing/test_optparse.py py/dist/py/compat/testing/test_textwrap.py (props changed) - copied unchanged from r13444, py/branch/dist-doctest/py/compat/testing/test_textwrap.py Modified: py/dist/py/__init__.py py/dist/py/compat/__init__.py (props changed) py/dist/py/compat/doctest.py (props changed) py/dist/py/compat/optparse.py (props changed) py/dist/py/compat/textwrap.py (contents, props changed) py/dist/py/initpkg.py py/dist/py/misc/testing/test_initpkg.py Log: Merged dist-doctest branch (-r13294:13384) into dist. The changeset added support for Python compatibility modules. See issue 8. Modified: py/dist/py/__init__.py ============================================================================== --- py/dist/py/__init__.py (original) +++ py/dist/py/__init__.py Wed Jun 15 19:17:01 2005 @@ -117,4 +117,8 @@ 'log.Email' : ('./log/consumer.py', 'Email'), 'log.Syslog' : ('./log/consumer.py', 'Syslog'), 'log.WinEvent' : ('./log/consumer.py', 'WinEvent'), + + 'compat.doctest' : ('./compat/doctest.py', None), + 'compat.optparse' : ('./compat/optparse.py', None), + 'compat.textwrap' : ('./compat/textwrap.py', None), }) Modified: py/dist/py/compat/textwrap.py ============================================================================== --- py/dist/py/compat/textwrap.py (original) +++ py/dist/py/compat/textwrap.py Wed Jun 15 19:17:01 2005 @@ -5,7 +5,7 @@ # Copyright (C) 2002, 2003 Python Software Foundation. # Written by Greg Ward -__revision__ = "$Id: textwrap.py,v 1.35.4.1 2005/03/05 02:38:32 gward Exp $" +__revision__ = "$Id$" import string, re Modified: py/dist/py/initpkg.py ============================================================================== --- py/dist/py/initpkg.py (original) +++ py/dist/py/initpkg.py Wed Jun 15 19:17:01 2005 @@ -66,6 +66,9 @@ assert fspath.startswith('./'), \ "%r is not an implementation path (XXX)" % (extpyish,) implmodule = self._loadimpl(fspath[:-3]) + if not modpath: # export the entire module + return implmodule + current = implmodule for x in modpath.split('.'): try: Modified: py/dist/py/misc/testing/test_initpkg.py ============================================================================== --- py/dist/py/misc/testing/test_initpkg.py (original) +++ py/dist/py/misc/testing/test_initpkg.py Wed Jun 15 19:17:01 2005 @@ -48,6 +48,7 @@ base.join('magic', 'greenlet.py'), base.join('bin'), base.join('execnet', 'script'), + base.join('compat'), ) for p in base.visit('*.py', py.path.checker(dotfile=0)): relpath = p.new(ext='').relto(base) From hpk at codespeak.net Thu Jun 16 01:29:07 2005 From: hpk at codespeak.net (hpk at codespeak.net) Date: Thu, 16 Jun 2005 01:29:07 +0200 (CEST) Subject: [py-svn] r13461 - in py/dist/py/compat: . testing Message-ID: <20050615232907.1074D27B95@code1.codespeak.net> Author: hpk Date: Thu Jun 16 01:29:04 2005 New Revision: 13461 Added: py/dist/py/compat/testing/conftest.py - copied unchanged from r13460, py/dist/py/compat/conftest.py Removed: py/dist/py/compat/conftest.py Log: it is enough to have conftest in the testing directory Deleted: /py/dist/py/compat/conftest.py ============================================================================== --- /py/dist/py/compat/conftest.py Thu Jun 16 01:29:04 2005 +++ (empty file) @@ -1,5 +0,0 @@ -import py - -class Directory(py.test.collect.Directory): - def run(self): - py.test.skip("compat tests currently need to be run manually") From hpk at codespeak.net Thu Jun 16 14:10:26 2005 From: hpk at codespeak.net (hpk at codespeak.net) Date: Thu, 16 Jun 2005 14:10:26 +0200 (CEST) Subject: [py-svn] r13475 - py/dist/py/compat/testing Message-ID: <20050616121026.7F42927B95@code1.codespeak.net> Author: hpk Date: Thu Jun 16 14:10:25 2005 New Revision: 13475 Added: py/dist/py/compat/testing/__init__.py py/dist/py/compat/testing/_findpy.py - copied unchanged from r13471, py/dist/py/bin/_findpy.py Modified: py/dist/py/compat/testing/test_doctest2.py Log: some help for David to move in the probably-right direction. - make sure we are using our local version of the py lib via _findpy.py - use the py.compat.doctest version, however, it fails with an import problem because the py/__init__.py + initpkg.py does not expose py.compat.doctest as a module but it should. (probably initpkg.initpkg() may need to be special cased for the None-selection argument) Added: py/dist/py/compat/testing/__init__.py ============================================================================== --- (empty file) +++ py/dist/py/compat/testing/__init__.py Thu Jun 16 14:10:25 2005 @@ -0,0 +1 @@ +# Modified: py/dist/py/compat/testing/test_doctest2.py ============================================================================== --- py/dist/py/compat/testing/test_doctest2.py (original) +++ py/dist/py/compat/testing/test_doctest2.py Thu Jun 16 14:10:25 2005 @@ -11,6 +11,8 @@ ????? """ +from _findpy import py +print py.__file__ from test import test_support @@ -107,7 +109,7 @@ clsm = classmethod(clsm) def test_main(): - from test import test_doctest2 + from py.__.compat.testing import test_doctest2 EXPECTED = 19 f, t = test_support.run_doctest(test_doctest2) if t != EXPECTED: @@ -116,7 +118,7 @@ # Pollute the namespace with a bunch of imported functions and classes, # to make sure they don't get tested. -from doctest import * +from py.compat.doctest import * if __name__ == '__main__': test_main() From hpk at codespeak.net Fri Jun 17 07:44:37 2005 From: hpk at codespeak.net (hpk at codespeak.net) Date: Fri, 17 Jun 2005 07:44:37 +0200 (CEST) Subject: [py-svn] r13515 - in py/dist/py: . log log/testing Message-ID: <20050617054437.EE33027B7D@code1.codespeak.net> Author: hpk Date: Fri Jun 17 07:44:36 2005 New Revision: 13515 Modified: py/dist/py/__init__.py py/dist/py/log/consumer.py py/dist/py/log/producer.py py/dist/py/log/testing/test_log.py Log: issue3 in-progress refactored logging API - no severity levels offered by default anymore - no logging module used at the moment - simplified API - there now is a 'py.log.default' defaultlogger Modified: py/dist/py/__init__.py ============================================================================== --- py/dist/py/__init__.py (original) +++ py/dist/py/__init__.py Fri Jun 17 07:44:36 2005 @@ -99,24 +99,15 @@ 'xml.Namespace' : ('./xmlobj/xml.py', 'Namespace'), 'xml.escape' : ('./xmlobj/misc.py', 'escape'), - # logging API ('producers' and 'consumers') - 'log.Producer' : ('./log/producer.py', 'LogProducer'), - 'log.debug' : ('./log/producer.py', 'debug'), - 'log.info' : ('./log/producer.py', 'info'), - 'log.warn' : ('./log/producer.py', 'warn'), - 'log.error' : ('./log/producer.py', 'error'), - 'log.critical' : ('./log/producer.py', 'critical'), - - 'log.set_logger' : ('./log/producer.py', 'set_logger'), - 'log.getstate' : ('./log/producer.py', 'getstate'), - 'log.setstate' : ('./log/producer.py', 'setstate'), - - 'log.File' : ('./log/consumer.py', 'File'), - 'log.Stdout' : ('./log/consumer.py', 'Stdout'), - 'log.Stderr' : ('./log/consumer.py', 'Stderr'), - 'log.Email' : ('./log/consumer.py', 'Email'), - 'log.Syslog' : ('./log/consumer.py', 'Syslog'), - 'log.WinEvent' : ('./log/consumer.py', 'WinEvent'), + # logging API ('producers' and 'consumers' connected via keywords) + 'log.Producer' : ('./log/producer.py', 'Producer'), + 'log.default' : ('./log/producer.py', 'default'), + 'log._getstate' : ('./log/producer.py', '_getstate'), + 'log._setstate' : ('./log/producer.py', '_setstate'), + 'log.setconsumer' : ('./log/consumer.py', 'setconsumer'), + 'log.Path' : ('./log/consumer.py', 'Path'), + 'log.STDOUT' : ('./log/consumer.py', 'STDOUT'), + 'log.STDERR' : ('./log/consumer.py', 'STDERR'), 'compat.doctest' : ('./compat/doctest.py', None), 'compat.optparse' : ('./compat/optparse.py', None), Modified: py/dist/py/log/consumer.py ============================================================================== --- py/dist/py/log/consumer.py (original) +++ py/dist/py/log/consumer.py Fri Jun 17 07:44:36 2005 @@ -1,104 +1,36 @@ import py -import os, sys, logging -import logging.handlers +import sys -class LogConsumer(object): - """Log "consumer" API which receives messages from - a 'producer' object and displays them using various - logging mechanisms (stdout, stderr, files, syslog, etc.)""" +class File(object): + def __init__(self, f): + assert hasattr(f, 'write') + assert isinstance(f, file) or not hasattr(f, 'open') + self._file = f + + def __call__(self, msg): + print >>self._file, str(msg) + +class Path(File): + def __init__(self, filename, append=False): + mode = append and 'a' or 'w' + f = open(str(filename), mode, buffering=1) + super(Path, self).__init__(f) - _handlers = {} +def STDOUT(msg): + print >>sys.stdout, str(msg) + +def STDERR(msg): + print >>sys.stderr, str(msg) - def __init__(self): - self.formatter = logging.Formatter('%(message)s') +def setconsumer(keywords, consumer): + # normalize to tuples + if isinstance(keywords, str): + keywords = tuple(map(None, keywords.split())) + elif not isinstance(keywords, tuple): + raise TypeError("key %r is not a string or tuple" % (keywords,)) + if consumer is not None and not callable(consumer): + if not hasattr(consumer, 'write'): + raise TypeError("%r should be None, callable or file-like" % (consumer,)) + consumer = File(consumer) + py.log.Producer.keywords2consumer[keywords] = consumer - def File(self, filename, mode='a'): - filename = str(filename) - logger_name = "py.log.file.%s" % filename - handler_type = logging.FileHandler - handler_args = {'filename': filename, - 'mode': mode, - } - return self._logger_func(logger_name, handler_type, **handler_args) - - def Stdout(self): - # Add str(sys.stdout) to logger name because sys.stdout might be redirected - # to a file, and in this case we need to distinguish between files - logger_name = 'py.log.stdout.%s' % str(sys.stdout) - handler_type = logging.StreamHandler - handler_args = {'strm': sys.stdout, - } - return self._logger_func(logger_name, handler_type, **handler_args) - - def Stderr(self): - # Add str(sys.stderr) to logger name because sys.stderr might be redirected - # to a file, and in this case we need to distinguish between files - logger_name = 'py.log.stderr.%s' % str(sys.stderr) - handler_type = logging.StreamHandler - handler_args = {'strm': sys.stderr, - } - return self._logger_func(logger_name, handler_type, **handler_args) - - def Syslog(self, address=('localhost', 514), facility=1): - logger_name = 'py.log.syslog' - handler_type = logging.handlers.SysLogHandler - handler_args = {'address': address, - 'facility': facility, - } - return self._logger_func(logger_name, handler_type, **handler_args) - - def WinEvent(self, appname='pylib', logtype='Application'): - logger_name = 'py.log.winevent' - handler_type = logging.handlers.NTEventLogHandler - handler_args = {'appname': appname, - 'logtype': logtype, - } - return self._logger_func(logger_name, handler_type, **handler_args) - - def Email(self, mailhost, fromaddr, toaddrs, subject): - logger_name = 'py.log.email' - handler_type = logging.handlers.SMTPHandler - handler_args = {'mailhost': mailhost, - 'fromaddr': fromaddr, - 'toaddrs': toaddrs, - 'subject': subject, - } - return self._logger_func(logger_name, handler_type, **handler_args) - - def _logger_func(self, logger_name, handler_type, **handler_args): - logger = logging.getLogger(logger_name) - #print "got logger " + str(logger) + "for name " + logger_name - logger.setLevel(logging.DEBUG) - - # Add handler to logger only if it hasn't been already set for - # the same logger name - if not self._handlers.has_key(logger_name): - #print "adding handler for logger " + logger_name - handler = handler_type(**handler_args) - handler.setFormatter(self.formatter) - logger.addHandler(handler) - self._handlers[logger_name] = handler - def message_processing_func(message): - self.log_message(logger, message) - return message_processing_func - - def log_message(self, logger, message): - for keyword in message.keywords: - if keyword.startswith('debug'): - logger.debug(message) - if keyword.startswith('info'): - logger.info(message) - if keyword.startswith('warn'): - logger.warn(message) - if keyword.startswith('err'): - logger.error(message) - if keyword.startswith('crit'): - logger.critical(message) - -consumer = LogConsumer() -File = consumer.File -Stdout = consumer.Stdout -Stderr = consumer.Stderr -Syslog = consumer.Syslog -WinEvent = consumer.WinEvent -Email = consumer.Email Modified: py/dist/py/log/producer.py ============================================================================== --- py/dist/py/log/producer.py (original) +++ py/dist/py/log/producer.py Fri Jun 17 07:44:36 2005 @@ -24,13 +24,14 @@ def __str__(self): return self.prefix() + self.content() -class LogProducer(object): - """Log "producer" API which sends messages to be logged - to a 'consumer' object, which then prints them to stdout, - stderr, files, etc.""" +class Producer(object): + """ Log producer API which sends messages to be logged + to a 'consumer' object, which then prints them to stdout, + stderr, files, etc. + """ Message = Message # to allow later customization - _registry = {} + keywords2consumer = {} def __init__(self, keywords): if isinstance(keywords, str): @@ -38,48 +39,38 @@ self.keywords = keywords def __repr__(self): - return "" % ":".join(self.keywords) + return "" % ":".join(self.keywords) def __getattr__(self, name): if name[0] == '_': raise AttributeError, name - return LogProducer(self.keywords + (name,)) + producer = self.__class__(self.keywords + (name,)) + self.name = producer + return producer def __call__(self, *args): - message = self.Message(self.keywords, args) - try: - func = self._registry[message.keywords] - except KeyError: - # XXX find best match, for now it's a hack/simplistic + func = self._getconsumer(self.keywords) + if func is not None: + func(self.Message(self.keywords, args)) + + def _getconsumer(self, keywords): + for i in range(len(self.keywords)): try: - func = self._registry[("default",)] + return self.keywords2consumer[self.keywords[:i+1]] except KeyError: - print str(message) - return - func(message) - - -def set_logger(name, func): - assert callable(func) - keywords = tuple(map(None, name.split())) - LogProducer._registry[keywords] = func - # if default logger is set, also reset the other ones - # XXX is this a good idea? - if keywords == ('default',): - for k in [('debug',), ('info',), ('warn',), - ('error',), ('critical',)]: - LogProducer._registry[k] = func - -def getstate(): - """ return logging registry state. """ - # class methods dealing with registry - return LogProducer._registry.copy() - -def setstate(state): - """ set logging registry state. """ - LogProducer._registry = state - -# some default severity producers -_ = globals() -for x in 'debug info warn split error critical'.split(): - _[x] = LogProducer(x) + continue + return self.keywords2consumer.get('default', default_consumer) + +default = Producer('default') + +def _getstate(): + return Producer.keywords2consumer.copy() + +def _setstate(state): + Producer.keywords2consumer.clear() + Producer.keywords2consumer.update(state) + +def default_consumer(msg): + print str(msg) + +Producer.keywords2consumer['default'] = default_consumer Modified: py/dist/py/log/testing/test_log.py ============================================================================== --- py/dist/py/log/testing/test_log.py (original) +++ py/dist/py/log/testing/test_log.py Fri Jun 17 07:44:36 2005 @@ -1,4 +1,5 @@ import py +from py.__.misc.simplecapture import callcapture import sys def setup_module(mod): @@ -6,393 +7,125 @@ class TestLogProducer: def setup_method(self, meth): - self.state = py.log.getstate() + self.state = py.log._getstate() + def teardown_method(self, meth): - py.log.setstate(self.state) + py.log._setstate(self.state) def test_producer_repr(self): - d = py.log.debug - assert repr(d).find('debug') != -1 + d = py.log.default + assert repr(d).find('default') != -1 def test_produce_one_keyword(self): l = [] - py.log.set_logger('debug', l.append) - py.log.debug("hello world") + py.log.setconsumer('s1', l.append) + py.log.Producer('s1')("hello world") assert len(l) == 1 msg = l[0] assert msg.content().startswith('hello world') - assert msg.prefix() == '[debug] ' - assert str(msg) == "[debug] hello world" + assert msg.prefix() == '[s1] ' + assert str(msg) == "[s1] hello world" def test_producer_class(self): p = py.log.Producer('x1') l = [] - py.log.set_logger('x1', l.append) + py.log.setconsumer(p.keywords, l.append) p("hello") assert len(l) == 1 assert len(l[0].keywords) == 1 assert 'x1' == l[0].keywords[0] - def test_default_logger(self): - l = [] - py.log.set_logger("default", l.append) - py.log.debug("hello") - py.log.warn("world") - py.log.info("I") - py.log.error("am") - py.log.critical("Sam") - assert len(l) == 5 - msg1, msg2, msg3, msg4, msg5 = l - - assert 'debug' in msg1.keywords - assert 'warn' in msg2.keywords - assert 'info' in msg3.keywords - assert 'error' in msg4.keywords - assert 'critical' in msg5.keywords - - assert msg1.content() == 'hello' - assert msg2.content() == 'world' - assert msg3.content() == 'I' - assert msg4.content() == 'am' - assert msg5.content() == 'Sam' - class TestLogConsumer: + def setup_method(self, meth): + self.state = py.log._getstate() + def teardown_method(self, meth): + py.log._setstate(self.state) - def test_log_stdout(self): - # We redirect stdout so that we can verify that - # the log messages have been printed to it - p = tempdir.join('log_stdout.out') - redirect = str(p) - sys.saved = sys.stdout - sys.stdout = open(redirect, 'w') - - # Start of the 'consumer' code - py.log.set_logger("default", py.log.Stdout()) - py.log.debug("hello world #1") - py.log.info("hello world #2") - py.log.warn("hello world #3") - py.log.error("hello world #4") - py.log.critical("hello world #5") - # End of the 'consumer' code - - sys.stdout = sys.saved - lines = open(redirect).readlines() - assert lines == ['[debug] hello world #1\n', '[info] hello world #2\n', - '[warn] hello world #3\n', '[error] hello world #4\n', - '[critical] hello world #5\n'] + def test_log_none(self): + log = py.log.Producer("XXX") + l = [] + py.log.setconsumer('XXX', l.append) + log("1") + assert l + l[:] = [] + py.log.setconsumer('XXX', None) + log("2") + assert not l + + def test_log_default_stdout(self): + res, out, err = callcapture(py.log.default, "hello") + assert out.strip() == "[default] hello" + + def test_simple_consumer_match(self): + l = [] + py.log.setconsumer("x1", l.append) + p = py.log.Producer("x1 x2") + p("hello") + assert l + assert l[0].content() == "hello" def test_log_stderr(self): - # We redirect stderr so that we can verify that - # the log messages have been printed to it - p = tempdir.join('log_stderr.out') - redirect = str(p) - sys.saved = sys.stderr - sys.stderr = open(redirect, 'w') - - # Start of the 'consumer' code - py.log.set_logger("default", py.log.Stderr()) - py.log.debug("hello world #1") - py.log.info("hello world #2") - py.log.warn("hello world #3") - py.log.error("hello world #4") - py.log.critical("hello world #5") - # End of the 'consumer' code - - sys.stderr = sys.saved - lines = open(redirect).readlines() - assert lines == ['[debug] hello world #1\n', '[info] hello world #2\n', - '[warn] hello world #3\n', '[error] hello world #4\n', - '[critical] hello world #5\n'] + py.log.setconsumer("default", py.log.STDERR) + res, out, err = callcapture(py.log.default, "hello") + assert not out + assert err.strip() == '[default] hello' def test_log_file(self): custom_log = tempdir.join('log.out') - - # Start of the 'consumer' code - py.log.set_logger("default", py.log.File(custom_log)) - py.log.debug("hello world #1") - py.log.info("hello world #2") - py.log.warn("hello world #3") - py.log.error("hello world #4") - py.log.critical("hello world #5") - # End of the 'consumer' code - - lines = custom_log.readlines() - assert lines == ['[debug] hello world #1\n', '[info] hello world #2\n', - '[warn] hello world #3\n', '[error] hello world #4\n', - '[critical] hello world #5\n'] + py.log.setconsumer("default", open(str(custom_log), 'w', buffering=1)) + py.log.default("hello world #1") + assert custom_log.readlines() == ['[default] hello world #1\n'] + + py.log.setconsumer("default", py.log.Path(custom_log)) + py.log.default("hello world #2") + assert custom_log.readlines() == ['[default] hello world #2\n'] # no append by default! def test_log_file_append_mode(self): logfilefn = tempdir.join('log_append.out') # The append mode is on by default, so we don't need to specify it for File - py.log.set_logger("default", py.log.File(logfilefn)) - py.log.debug("hello world #1") - py.log.info("hello world #2") - py.log.warn("hello world #3") - py.log.error("hello world #4") - py.log.critical("hello world #5") - + py.log.setconsumer("default", py.log.Path(logfilefn, append=True)) + py.log.default("hello world #1") lines = logfilefn.readlines() - assert lines == ['[debug] hello world #1\n', '[info] hello world #2\n', - '[warn] hello world #3\n', '[error] hello world #4\n', - '[critical] hello world #5\n'] - - # We log 5 more lines that should be appended to the log - py.log.set_logger("default", py.log.File(logfilefn)) - py.log.debug("hello world #6") - py.log.info("hello world #7") - py.log.warn("hello world #8") - py.log.error("hello world #9") - py.log.critical("hello world #10") - - lines = logfilefn.readlines() - assert lines == ['[debug] hello world #1\n', '[info] hello world #2\n', - '[warn] hello world #3\n', '[error] hello world #4\n', - '[critical] hello world #5\n', - '[debug] hello world #6\n', '[info] hello world #7\n', - '[warn] hello world #8\n', '[error] hello world #9\n', - '[critical] hello world #10\n'] - - - def test_log_file_write_mode(self): - logfilefn = tempdir.join('log_write.out') - logfilefn.write("This line should be zapped when we start logging\n") - - # We specify mode='w' for the File - py.log.set_logger("default", py.log.File(logfilefn, mode='w')) - py.log.debug("hello world #1") - py.log.info("hello world #2") - py.log.warn("hello world #3") - py.log.error("hello world #4") - py.log.critical("hello world #5") - + assert lines == ['[default] hello world #1\n'] + py.log.setconsumer("default", py.log.Path(logfilefn, append=True)) + py.log.default("hello world #1") lines = logfilefn.readlines() - assert lines == ['[debug] hello world #1\n', '[info] hello world #2\n', - '[warn] hello world #3\n', '[error] hello world #4\n', - '[critical] hello world #5\n'] - - + assert lines == ['[default] hello world #1\n', + '[default] hello world #1\n'] + def test_keyword_based_log_files(self): - logfiledebug = tempdir.join('log_debug.out') - logfileinfo = tempdir.join('log_info.out') - logfilewarn = tempdir.join('log_warn.out') - logfileerror = tempdir.join('log_error.out') - logfilecritical = tempdir.join('log_critical.out') - - py.log.set_logger("debug", py.log.File(logfiledebug)) - py.log.set_logger("info", py.log.File(logfileinfo)) - py.log.set_logger("warn", py.log.File(logfilewarn)) - py.log.set_logger("error", py.log.File(logfileerror)) - py.log.set_logger("critical", py.log.File(logfilecritical)) - - py.log.debug("hello world #1") - py.log.info("hello world #2") - py.log.warn("hello world #3") - py.log.error("hello world #4") - py.log.critical("hello world #5") - - lines = logfiledebug.readlines() - assert lines == ['[debug] hello world #1\n'] - - lines = logfileinfo.readlines() - assert lines == ['[info] hello world #2\n'] - - lines = logfilewarn.readlines() - assert lines == ['[warn] hello world #3\n'] - - lines = logfileerror.readlines() - assert lines == ['[error] hello world #4\n'] - - lines = logfilecritical.readlines() - assert lines == ['[critical] hello world #5\n'] - - def test_reassign_default_logger(self): - logfiledefault1 = tempdir.join('default_log1.out') - - # We set a file logger as the default logger - py.log.set_logger("default", py.log.File(logfiledefault1)) - py.log.debug("hello world #1") - py.log.info("hello world #2") - py.log.warn("hello world #3") - py.log.error("hello world #4") - py.log.critical("hello world #5") - - lines = logfiledefault1.readlines() - assert lines == ['[debug] hello world #1\n', '[info] hello world #2\n', - '[warn] hello world #3\n', '[error] hello world #4\n', - '[critical] hello world #5\n'] - - # We set a different file logger as the default logger and verify - # that the new one receives messages and the old one does not receive them anymore - logfiledefault2 = tempdir.join('default_log2.out') - - py.log.set_logger("default", py.log.File(logfiledefault2)) - py.log.debug("hello world #6") - py.log.info("hello world #7") - py.log.warn("hello world #8") - py.log.error("hello world #9") - py.log.critical("hello world #10") - - lines = logfiledefault1.readlines() - assert lines == ['[debug] hello world #1\n', '[info] hello world #2\n', - '[warn] hello world #3\n', '[error] hello world #4\n', - '[critical] hello world #5\n'] - - lines = logfiledefault2.readlines() - assert lines == ['[debug] hello world #6\n', '[info] hello world #7\n', - '[warn] hello world #8\n', '[error] hello world #9\n', - '[critical] hello world #10\n'] - - # We set stderr as the default logger and verify that messages go to stderr - # and not to the previous 2 file loggers - p = tempdir.join('log_stderr_default.out') - redirect = str(p) - saved = sys.stderr - sys.stderr = open(redirect, 'w') - - py.log.set_logger("default", py.log.Stderr()) - py.log.debug("hello world #11") - py.log.info("hello world #12") - py.log.warn("hello world #13") - py.log.error("hello world #14") - py.log.critical("hello world #15") - - sys.stderr = saved - lines = open(redirect).readlines() - assert lines == ['[debug] hello world #11\n', '[info] hello world #12\n', - '[warn] hello world #13\n', '[error] hello world #14\n', - '[critical] hello world #15\n'] - - lines = logfiledefault1.readlines() - assert lines == ['[debug] hello world #1\n', '[info] hello world #2\n', - '[warn] hello world #3\n', '[error] hello world #4\n', - '[critical] hello world #5\n'] - - lines = logfiledefault2.readlines() - assert lines == ['[debug] hello world #6\n', '[info] hello world #7\n', - '[warn] hello world #8\n', '[error] hello world #9\n', - '[critical] hello world #10\n'] - - def test_reassign_debug_logger(self): - logfiledefault = tempdir.join('default.out') - logfiledebug1 = tempdir.join('debug_log1.out') - - # We set a file logger as the default logger in non-append mode - py.log.set_logger("default", py.log.File(logfiledefault, mode='w')) - - # We set a file logger as the debug logger - py.log.set_logger("debug", py.log.File(logfiledebug1)) - py.log.debug("hello world #1") - py.log.info("hello world #2") - py.log.warn("hello world #3") - py.log.error("hello world #4") - py.log.critical("hello world #5") - - # The debug message should have gone to the debug file logger - lines = logfiledebug1.readlines() - assert lines == ['[debug] hello world #1\n'] - - # All other messages should have gone to the default file logger - lines = logfiledefault.readlines() - assert lines == ['[info] hello world #2\n', - '[warn] hello world #3\n', '[error] hello world #4\n', - '[critical] hello world #5\n'] - - # We set a different file logger as the debug logger and verify - # that the new one receives messages and the old one does not receive them anymore - logfiledebug2 = tempdir.join('debug_log2.out') - - py.log.set_logger("debug", py.log.File(logfiledebug2)) - py.log.debug("hello world #6") - py.log.info("hello world #7") - py.log.warn("hello world #8") - py.log.error("hello world #9") - py.log.critical("hello world #10") - - # The debug message should have gone to the new debug file logger - lines = logfiledebug2.readlines() - assert lines == ['[debug] hello world #6\n'] - - # All other messages should have gone to the default file logger - lines = logfiledefault.readlines() - assert lines == ['[info] hello world #2\n', - '[warn] hello world #3\n', '[error] hello world #4\n', - '[critical] hello world #5\n', - '[info] hello world #7\n', - '[warn] hello world #8\n', '[error] hello world #9\n', - '[critical] hello world #10\n'] - - # The old debug file logger should be unchanged - lines = logfiledebug1.readlines() - assert lines == ['[debug] hello world #1\n'] - - # We set stdout as the debug logger and verify that messages go to stdout - # and not to the previous 2 file loggers - p = tempdir.join('log_stdout_debug.out') - redirect = str(p) - saved = sys.stdout - sys.stdout = open(redirect, 'w') - - py.log.set_logger("debug", py.log.Stdout()) - py.log.debug("hello world #11") - py.log.info("hello world #12") - py.log.warn("hello world #13") - py.log.error("hello world #14") - py.log.critical("hello world #15") - - sys.stdout = saved - # The debug message should have gone to stdout - lines = open(redirect).readlines() - assert lines == ['[debug] hello world #11\n'] - - # All other messages should have gone to the default file logger - lines = logfiledefault.readlines() - assert lines == ['[info] hello world #2\n', - '[warn] hello world #3\n', '[error] hello world #4\n', - '[critical] hello world #5\n', - '[info] hello world #7\n', - '[warn] hello world #8\n', '[error] hello world #9\n', - '[critical] hello world #10\n', - '[info] hello world #12\n', - '[warn] hello world #13\n', '[error] hello world #14\n', - '[critical] hello world #15\n'] - - # The 2 old debug file logger should be unchanged - lines = logfiledebug1.readlines() - assert lines == ['[debug] hello world #1\n'] + logfiles = [] + keywords = 'k1 k2 k3'.split() + for key in keywords: + path = tempdir.join(key) + py.log.setconsumer(key, py.log.Path(path)) + + py.log.Producer('k1')('1') + py.log.Producer('k2')('2') + py.log.Producer('k3')('3') + + for key in keywords: + path = tempdir.join(key) + assert path.read().strip() == '[%s] %s' % (key, key[-1]) - lines = logfiledebug2.readlines() - assert lines == ['[debug] hello world #6\n'] - # disabled for now; the syslog log file can usually be read only by root # I manually inspected /var/log/messages and the entries were there def no_test_log_syslog(self): - py.log.set_logger("default", py.log.Syslog()) - py.log.debug("hello world #1") - py.log.info("hello world #2") - py.log.warn("hello world #3") - py.log.error("hello world #4") - py.log.critical("hello world #5") + py.log.setconsumer("default", py.log.Syslog()) + py.log.default("hello world #1") # disabled for now until I figure out how to read entries in the # Event Logs on Windows # I manually inspected the Application Log and the entries were there def no_test_log_winevent(self): - py.log.set_logger("default", py.log.WinEvent()) - py.log.debug("hello world #1") - py.log.info("hello world #2") - py.log.warn("hello world #3") - py.log.error("hello world #4") - py.log.critical("hello world #5") + py.log.setconsumer("default", py.log.WinEvent()) + py.log.default("hello world #1") # disabled for now until I figure out how to properly pass the parameters def no_test_log_email(self): - py.log.set_logger("default", py.log.Email(mailhost="gheorghiu.net", - fromaddr="grig", - toaddrs="grig", - subject = "py.log email")) - py.log.debug("hello world #1") - py.log.info("hello world #2") - py.log.warn("hello world #3") - py.log.error("hello world #4") - py.log.critical("hello world #5") + py.log.setconsumer("default", py.log.Email(mailhost="gheorghiu.net", + fromaddr="grig", + toaddrs="grig", + subject = "py.log email")) + py.log.default("hello world #1") From hpk at codespeak.net Sat Jun 18 21:24:44 2005 From: hpk at codespeak.net (hpk at codespeak.net) Date: Sat, 18 Jun 2005 21:24:44 +0200 (CEST) Subject: [py-svn] r13588 - in py/dist/py/log: . testing Message-ID: <20050618192444.B4A5827B55@code1.codespeak.net> Author: hpk Date: Sat Jun 18 21:24:43 2005 New Revision: 13588 Modified: py/dist/py/log/producer.py py/dist/py/log/testing/test_log.py Log: fix multi dispatch Modified: py/dist/py/log/producer.py ============================================================================== --- py/dist/py/log/producer.py (original) +++ py/dist/py/log/producer.py Sat Jun 18 21:24:43 2005 @@ -54,9 +54,9 @@ func(self.Message(self.keywords, args)) def _getconsumer(self, keywords): - for i in range(len(self.keywords)): + for i in range(len(self.keywords), 0, -1): try: - return self.keywords2consumer[self.keywords[:i+1]] + return self.keywords2consumer[self.keywords[:i]] except KeyError: continue return self.keywords2consumer.get('default', default_consumer) Modified: py/dist/py/log/testing/test_log.py ============================================================================== --- py/dist/py/log/testing/test_log.py (original) +++ py/dist/py/log/testing/test_log.py Sat Jun 18 21:24:43 2005 @@ -64,6 +64,17 @@ assert l assert l[0].content() == "hello" + def test_multi_consumer(self): + l = [] + py.log.setconsumer("x1", l.append) + py.log.setconsumer("x1 x2", None) + p = py.log.Producer("x1 x2") + p("hello") + assert not l + py.log.Producer("x1")("hello") + assert l + assert l[0].content() == "hello" + def test_log_stderr(self): py.log.setconsumer("default", py.log.STDERR) res, out, err = callcapture(py.log.default, "hello") From cfbolz at codespeak.net Mon Jun 20 11:32:10 2005 From: cfbolz at codespeak.net (cfbolz at codespeak.net) Date: Mon, 20 Jun 2005 11:32:10 +0200 (CEST) Subject: [py-svn] r13609 - in py/dist/py/log: . testing Message-ID: <20050620093210.A32C027B5A@code1.codespeak.net> Author: cfbolz Date: Mon Jun 20 11:32:06 2005 New Revision: 13609 Modified: py/dist/py/log/producer.py py/dist/py/log/testing/test_log.py Log: fixed producer's __getattr__ + test Modified: py/dist/py/log/producer.py ============================================================================== --- py/dist/py/log/producer.py (original) +++ py/dist/py/log/producer.py Mon Jun 20 11:32:06 2005 @@ -45,7 +45,7 @@ if name[0] == '_': raise AttributeError, name producer = self.__class__(self.keywords + (name,)) - self.name = producer + setattr(self, name, producer) return producer def __call__(self, *args): Modified: py/dist/py/log/testing/test_log.py ============================================================================== --- py/dist/py/log/testing/test_log.py (original) +++ py/dist/py/log/testing/test_log.py Mon Jun 20 11:32:06 2005 @@ -35,6 +35,11 @@ assert len(l[0].keywords) == 1 assert 'x1' == l[0].keywords[0] + def test_producer_caching(self): + p = py.log.Producer('x1') + x2 = p.x2 + assert x2 is p.x2 + class TestLogConsumer: def setup_method(self, meth): self.state = py.log._getstate() From dstanek at codespeak.net Mon Jun 20 11:43:06 2005 From: dstanek at codespeak.net (dstanek at codespeak.net) Date: Mon, 20 Jun 2005 11:43:06 +0200 (CEST) Subject: [py-svn] r13610 - in py/dist/py: . misc/testing Message-ID: <20050620094306.F242427B5A@code1.codespeak.net> Author: dstanek Date: Mon Jun 20 11:43:04 2005 New Revision: 13610 Modified: py/dist/py/__init__.py py/dist/py/initpkg.py py/dist/py/misc/testing/test_initpkg.py Log: Made changes to the export system to allow the compatibility modules to be imported in more natural ways. Before this change they could only be used by importing py and then using their fully qualified path to access them. Modified: py/dist/py/__init__.py ============================================================================== --- py/dist/py/__init__.py (original) +++ py/dist/py/__init__.py Mon Jun 20 11:43:04 2005 @@ -3,7 +3,7 @@ py.test, an interactive testing tool which supports unit-testing with practically no boilerplate. """ -from initpkg import initpkg +from initpkg import initpkg, RealModule initpkg(__name__, description = "py.test and the py lib", @@ -109,7 +109,7 @@ 'log.STDOUT' : ('./log/consumer.py', 'STDOUT'), 'log.STDERR' : ('./log/consumer.py', 'STDERR'), - 'compat.doctest' : ('./compat/doctest.py', None), - 'compat.optparse' : ('./compat/optparse.py', None), - 'compat.textwrap' : ('./compat/textwrap.py', None), + 'compat.doctest' : ('./compat/doctest.py', RealModule), + 'compat.optparse' : ('./compat/optparse.py', RealModule), + 'compat.textwrap' : ('./compat/textwrap.py', RealModule), }) Modified: py/dist/py/initpkg.py ============================================================================== --- py/dist/py/initpkg.py (original) +++ py/dist/py/initpkg.py Mon Jun 20 11:43:04 2005 @@ -22,7 +22,8 @@ """ from __future__ import generators -import sys, os +import sys +import os assert sys.version_info >= (2,2,0), "py lib requires python 2.2 or higher" ModuleType = type(sys.modules[__name__]) @@ -66,7 +67,7 @@ assert fspath.startswith('./'), \ "%r is not an implementation path (XXX)" % (extpyish,) implmodule = self._loadimpl(fspath[:-3]) - if not modpath: # export the entire module + if not isinstance(modpath, basestring): # export the entire module return implmodule current = implmodule @@ -204,6 +205,39 @@ __dict__ = property(getdict) del getdict +class RealModule(ModuleType): + def __init__(self, pkg, name, extpy): + self.__package__ = pkg + self.__name__ = name + self.__extpy__ = extpy + self.__isimported__ = False + + def __getattr__(self, name): + dct = self.__doimport() + if not name in dct: + raise AttributeError, name + return dct[name] + + def __repr__(self): + return '' % (self.__name__, ) + + def __doimport(self): + dictdescr = ModuleType.__dict__['__dict__'] + dct = dictdescr.__get__(self) # avoid infinite recursion + if not self.__isimported__: + module = self.__package__._resolve(self.__extpy__) + dct.update(module.__dict__) + self.__isimported__ = True + return dct + + def getdict(self): + self.__doimport() + dictdescr = ModuleType.__dict__['__dict__'] + return dictdescr.__get__(self) + + __dict__ = property(getdict) + del getdict + # --------------------------------------------------- # Bootstrap Virtual Module Hierarchy # --------------------------------------------------- @@ -230,6 +264,18 @@ seen[current] = mod = Module(pkg, current) setattr(seen[previous], name, mod) setmodule(current, mod) + + if not isinstance(extpy[1], basestring): + klass = extpy[1] + previous = current + name = pyparts[-1] + current += '.' + name + if current not in seen: + seen[current] = mod = klass(pkg, current, extpy) + setattr(seen[previous], name, mod) + setmodule(current, mod) + continue + mod = seen[current] if not hasattr(mod, '__map__'): assert mod is pkg.module, \ Modified: py/dist/py/misc/testing/test_initpkg.py ============================================================================== --- py/dist/py/misc/testing/test_initpkg.py (original) +++ py/dist/py/misc/testing/test_initpkg.py Mon Jun 20 11:43:04 2005 @@ -2,6 +2,7 @@ import py import types +import sys def checksubpackage(name): obj = getattr(py, name) @@ -98,6 +99,133 @@ from py.process import cmdexec as cmdexec2 assert cmdexec is cmdexec2 +# +# test support for importing modules +# + +class TestRealModule: + + def setup_class(cls): + cls.tmpdir = py.test.ensuretemp('test_initpkg') + sys.path = [str(cls.tmpdir)] + sys.path + pkgdir = cls.tmpdir.ensure('realtest', dir=1) + + tfile = pkgdir.join('__init__.py') + tfile.write(py.code.Source("""if True: + import py + py.initpkg('realtest', { + 'module': ('./testmodule.py', py.__.initpkg.RealModule) + }) + """)) + + tfile = pkgdir.join('testmodule.py') + tfile.write(py.code.Source("""if True: + __all__ = ['mytest0', 'mytest1', 'MyTest'] + + def mytest0(): + pass + def mytest1(): + pass + class MyTest: + pass + + """)) + + import realtest # need to mimic what a user would do + #py.initpkg('realtest', { + # 'module': ('./testmodule.py', None) + #}) + + def setup_method(self, *args): + """Unload the test modules before each test.""" + module_names = ['realtest.module', 'realtest'] + for modname in module_names: + if modname in sys.modules: + del sys.modules[modname] + + def test_realmodule(self): + """Testing 'import realtest.module'""" + import realtest.module + assert 'realtest.module' in sys.modules + assert getattr(realtest.module, 'mytest0') + + def test_realmodule_from(self): + """Testing 'from test import module'.""" + from realtest import module + assert getattr(module, 'mytest1') + + def test_realmodule_star(self): + """Testing 'from test.module import *'.""" + tfile = self.tmpdir.join('startest.py') + tfile.write(py.code.Source("""if True: + from realtest.module import * + globals()['mytest0'] + globals()['mytest1'] + globals()['MyTest'] + """)) + import startest # an exception will be raise if an error occurs + + def test_realmodule_dict_import(self): + "Test verifying that accessing the __dict__ invokes the import" + import realtest.module + assert realtest.module.__isimported__ == False + moddict = dir(realtest.module) + assert realtest.module.__isimported__ == True + assert 'mytest0' in moddict + assert 'mytest1' in moddict + assert 'MyTest' in moddict + + +#class TestStdHook: +# """Tests imports for the standard Python library hook.""" +# +# def setup_method(self, *args): +# """Unload the test modules before each test.""" +# module_names = ['py.std.StringIO', 'py.std', 'py'] +# for modname in module_names: +# if modname in sys.modules: +# del sys.modules[modname] +# +# def test_std_import_simple(self): +# import py +# StringIO = py.std.StringIO +# assert 'py' in sys.modules +# assert 'py.std' in sys.modules +# assert 'py.std.StringIO' in sys.modules +# assert hasattr(py.std.StringIO, 'StringIO') +# +# def test_std_import0(self): +# """Testing 'import py.std.StringIO'.""" +# import py.std.StringIO +# assert 'py' in sys.modules +# assert 'py.std' in sys.modules +# assert 'py.std.StringIO' in sys.modules +# assert hasattr(py.std.StringIO, 'StringIO') +# +# def test_std_import1(self): +# """Testing 'from py import std'.""" +# from py import std +# assert 'py' in sys.modules +# assert 'py.std' in sys.modules +# +# def test_std_from(self): +# """Testing 'from py.std import StringIO'.""" +# from py.std import StringIO +# assert getattr(StringIO, 'StringIO') +# +# def test_std_star(self): +# "Test from py.std.string import *" +# """Testing 'from test.module import *'.""" +# tmpdir = py.test.ensuretemp('test_initpkg') +# tfile = tmpdir.join('stdstartest.py') +# tfile.write(py.code.Source("""if True: +# from realtest.module import * +# globals()['mytest0'] +# globals()['mytest1'] +# globals()['MyTest'] +# """)) +# import stdstartest # an exception will be raise if an error occurs + ##def test_help(): # help(std.path) # #assert False From hpk at codespeak.net Mon Jun 20 11:50:08 2005 From: hpk at codespeak.net (hpk at codespeak.net) Date: Mon, 20 Jun 2005 11:50:08 +0200 (CEST) Subject: [py-svn] r13611 - py/dist/py/misc/testing Message-ID: <20050620095008.0A3B227B5A@code1.codespeak.net> Author: hpk Date: Mon Jun 20 11:50:07 2005 New Revision: 13611 Modified: py/dist/py/misc/testing/test_initpkg.py Log: py.code.Source usually takes care of re-indenting things appropriately Modified: py/dist/py/misc/testing/test_initpkg.py ============================================================================== --- py/dist/py/misc/testing/test_initpkg.py (original) +++ py/dist/py/misc/testing/test_initpkg.py Mon Jun 20 11:50:07 2005 @@ -111,7 +111,7 @@ pkgdir = cls.tmpdir.ensure('realtest', dir=1) tfile = pkgdir.join('__init__.py') - tfile.write(py.code.Source("""if True: + tfile.write(py.code.Source(""" import py py.initpkg('realtest', { 'module': ('./testmodule.py', py.__.initpkg.RealModule) @@ -119,7 +119,7 @@ """)) tfile = pkgdir.join('testmodule.py') - tfile.write(py.code.Source("""if True: + tfile.write(py.code.Source(""" __all__ = ['mytest0', 'mytest1', 'MyTest'] def mytest0(): From arigo at codespeak.net Mon Jun 20 23:14:48 2005 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 20 Jun 2005 23:14:48 +0200 (CEST) Subject: [py-svn] r13634 - in py/branch/execnet-refactoring: . testing Message-ID: <20050620211448.6C28D27B51@code1.codespeak.net> Author: arigo Date: Mon Jun 20 23:14:45 2005 New Revision: 13634 Added: py/branch/execnet-refactoring/ - copied from r13633, py/dist/py/execnet/ py/branch/execnet-refactoring/NOTES Modified: py/branch/execnet-refactoring/channel.py py/branch/execnet-refactoring/gateway.py py/branch/execnet-refactoring/message.py py/branch/execnet-refactoring/register.py py/branch/execnet-refactoring/testing/test_gateway.py Log: Refactoring execnet with two ideas in mind: - shutdown should not be driven by explicit messages, but by the closing of the underlying InputOutput. This seems to work now. - channels should be garbage-collected even if they are not closed manually. See NOTES. This needs more debugging, hence the branch. Added: py/branch/execnet-refactoring/NOTES ============================================================================== --- (empty file) +++ py/branch/execnet-refactoring/NOTES Mon Jun 20 23:14:45 2005 @@ -0,0 +1,95 @@ + +The public API of channels make them appear either opened or closed. +When a channel is closed, we can't send any more items, and it will not +receive any more items than already queued. + +Callbacks make the situation slightly more subtle. Callbacks are +attached to the ChannelFactory object, so that Channel objects can be +garbage-collected and still leave behind an active callback that can +continue to receive items. + +The CHANNEL_CLOSE message is sent when a channel id is about to be removed +from the ChannelFactory, which means when the Channel object has been +garbage-collected *and* there is no callback any more. + +If a Channel object is garbage-collected but the ChannelFactory has a +callback for it, a CHANNEL_LAST_MESSAGE message is sent. It is only useful +if both sides' Channel objects have an associated callback. In this +situation, CHANNEL_LAST_MESSAGE allows its receiver to un-register its own +callback; if/when in addition the receiver side also looses the last +reference to its Channel object, the Channel is closed. So in this particular +situation both sides must forget about the Channel object for it to be +automatically closed. + + + +gateway <---> channelfactory ---> {id: weakref(channel)} + ---> {id: callback} + + +Channel: + + __del__(): + if not closed: + if has_callback: + send a CHANNEL_LAST_MESSAGE + else: + send a CHANNEL_CLOSE + if stickyerror: + warning("unhandled:", stickyerror) + + close(): + lock + if not closed: + send a CHANNEL_CLOSE + closed = True + channelfactory.closed(id) + receive_closed.set() + unlock + + send(): + lock + if closed: + raise! + send a CHANNEL_DATA + unlock + + waitclose(): + wait for receive_closed + if stickyerror: + stickyerror = None + raise stickyerror + + receive(): + x = queue.pop() + if x is END_MARKER: + queue.push(x) + stickyerror = None + raise stickyerror + + +receive CHANNEL_DATA(id, data): + if id in callbacks: + callbacks[id](data) + else: + c = channels[id] + if no KeyError: + c.queue.push(data) + +receive CHANNEL_CLOSE(id, error=EOFError()): + del callbacks[id] + c = channels.pop(id) + if not KeyError: + c.stickyerror = error + c.closed = True + c.receive_closed.set() + c.queue.push(END_MARKER) + elif error: + warning("unhandled:", error) + +receive CHANNEL_LAST_MESSAGE(id): + del callbacks[id] + c = channels.pop(id) + if not KeyError: + c.receive_closed.set() + c.queue.push(END_MARKER) Modified: py/branch/execnet-refactoring/channel.py ============================================================================== --- py/dist/py/execnet/channel.py (original) +++ py/branch/execnet-refactoring/channel.py Mon Jun 20 23:14:45 2005 @@ -1,4 +1,4 @@ -import threading +import threading, weakref, sys import Queue if 'Message' not in globals(): from py.__.execnet.message import Message @@ -15,47 +15,50 @@ def __repr__(self): return "%s: %s" %(self.__class__.__name__, self.formatted) + def warn(self): + # XXX do this better + print >> sys.stderr, "Warning: unhandled %r" % (self,) + + class Channel(object): """Communication channel between two possibly remote threads of code. """ RemoteError = RemoteError - def __init__(self, gateway, id, receiver=None): + + def __init__(self, gateway, id, has_callback=False): assert isinstance(id, int) self.gateway = gateway self.id = id - self._receiver = receiver - self._items = Queue.Queue() - self._closeevent = threading.Event() + if has_callback: + self._items = None + else: + self._items = Queue.Queue() + self._receiveclosed = threading.Event() + self._closed = False + self._sendlock = threading.Lock() def __repr__(self): flag = self.isclosed() and "closed" or "open" return "" % (self.id, flag) - # - # internal methods, called from the receiver thread - # - def _local_close(self, stickyerror=EOFError()): - self.gateway._del_channelmapping(self.id) - self._stickyerror = stickyerror - self._items.put(ENDMARKER) - self._closeevent.set() - - def _local_receivechannel(self, newid): - """ receive a remotely created new (sub)channel. """ - # executes in receiver thread - newchannel = Channel(self.gateway, newid) - self.gateway.channelfactory[newid] = newchannel - self._local_receivedata(newchannel) - - def _local_receivedata(self, data): - # executes in receiver thread - if self._receiver is not None: - self._receiver(data) - else: - self._items.put(data) + def __del__(self): + self.gateway.trace("Channel(%d).__del__" % self.id) + if not self._closed: + if self.gateway is not None: # can be None in tests + if self._items is None: # has_callback + Msg = Message.CHANNEL_LAST_MESSAGE + else: + Msg = Message.CHANNEL_CLOSE + self.gateway._outgoing.put(Msg(self.id)) + else: + error = self._getstickyerror() + if isinstance(error, RemoteError): + error.warn() - def _local_schedulexec(self, sourcetask): - # executes in receiver thread - self.gateway._local_schedulexec(channel=self, sourcetask=sourcetask) + def _getstickyerror(self): + try: + return self.__dict__.pop('_stickyerror') + except KeyError: + return EOFError() # # public API for channel objects @@ -64,7 +67,7 @@ """ return True if the channel is closed. A closed channel may still hold items. """ - return self._closeevent.isSet() + return self._closed def makefile(self, mode='w', proxyclose=False): """ return a file-like object. Only supported mode right @@ -77,39 +80,53 @@ def close(self, error=None): """ close down this channel on both sides. """ - if self.id in self.gateway.channelfactory: - put = self.gateway._outgoing.put - if error is not None: - put(Message.CHANNEL_CLOSE_ERROR(self.id, str(error))) - else: - put(Message.CHANNEL_CLOSE(self.id)) - self._local_close() + self._sendlock.acquire() + try: + if not self._closed: + put = self.gateway._outgoing.put + if error is not None: + put(Message.CHANNEL_CLOSE_ERROR(self.id, str(error))) + else: + put(Message.CHANNEL_CLOSE(self.id)) + if isinstance(error, RemoteError): + self._stickyerror = error + self._closed = True + self.gateway.channelfactory._closed(self.id) + self._receiveclosed.set() + finally: + self._sendlock.release() def waitclose(self, timeout): - """ wait until this channel is closed. A closed - channel may still hold receiveable items. waitclose() - reraises exceptions from executing code on the other - side as channel.RemoteErrors containing a a textual + """ wait until this channel is closed (or the remote side + otherwise signalled that no more data was being sent). + The channel may still hold receiveable items, but not receive + more. waitclose() reraises exceptions from executing code on + the other side as channel.RemoteErrors containing a a textual representation of the remote traceback. """ - self._closeevent.wait(timeout=timeout) - if not self._closeevent.isSet(): + self._receiveclosed.wait(timeout=timeout) + if not self._receiveclosed.isSet(): raise IOError, "Timeout" - if isinstance(self._stickyerror, self.RemoteError): - raise self._stickyerror + error = self._getstickyerror() + if isinstance(error, self.RemoteError): + raise error def send(self, item): """sends the given item to the other side of the channel, possibly blocking if the sender queue is full. Note that an item needs to be marshallable. """ - if self.isclosed(): - raise IOError, "cannot send to %r" %(self,) - if isinstance(item, Channel): - data = Message.CHANNEL_NEW(self.id, item.id) - else: - data = Message.CHANNEL_DATA(self.id, item) - self.gateway._outgoing.put(data) + self._sendlock.acquire() + try: + if self.isclosed(): + raise IOError, "cannot send to %r" %(self,) + if isinstance(item, Channel): + data = Message.CHANNEL_NEW(self.id, item.id) + else: + data = Message.CHANNEL_DATA(self.id, item) + self.gateway._outgoing.put(data) + finally: + self._sendlock.release() def receive(self): """receives an item that was sent from the other side, @@ -118,12 +135,12 @@ reraised as channel.RemoteError exceptions containing a textual representation of the remote traceback. """ - if self._receiver: + if self._items is None: raise IOError("calling receive() on channel with receiver callback") x = self._items.get() if x is ENDMARKER: self._items.put(x) # for other receivers - raise self._stickyerror + raise self._getstickyerror() else: return x @@ -143,8 +160,11 @@ ENDMARKER = object() class ChannelFactory(object): + RemoteError = RemoteError + def __init__(self, gateway, startcount=1): - self._dict = dict() + self._channels = weakref.WeakValueDictionary() + self._callbacks = {} self._writelock = threading.Lock() self.gateway = gateway self.count = startcount @@ -156,37 +176,70 @@ if id is None: id = self.count self.count += 2 - channel = Channel(self.gateway, id, receiver=receiver) - self._dict[id] = channel + has_callback = receiver is not None + if has_callback: + self._callbacks[id] = receiver + channel = Channel(self.gateway, id, has_callback) + self._channels[id] = channel return channel finally: self._writelock.release() - def __contains__(self, key): - return key in self._dict + def channels(self): + return self._channels.values() - def values(self): - self._writelock.acquire() + # + # internal methods, called from the receiver thread + # + def _closed(self, id): try: - return self._dict.values() - finally: - self._writelock.release() - - def __getitem__(self, key): - return self._dict[key] - - def __setitem__(self, key, value): - self._writelock.acquire() + del self._callbacks[id] + except KeyError: + pass try: - self._dict[key] = value - finally: - self._writelock.release() - def __delitem__(self, key): - self._writelock.acquire() + channel = self._channels.pop(id) + except KeyError: + channel = None + return channel + + def _local_close(self, id, stickyerror=None): + channel = self._closed(id) + if channel is None: + if isinstance(stickyerror, RemoteError): + stickyerror.warn() + else: + if isinstance(stickyerror, RemoteError): + channel._stickyerror = stickyerror + channel._closed = True + channel._receiveclosed.set() + if channel._items is not None: + channel._items.put(ENDMARKER) + + def _local_last_message(self, id): + channel = self._closed(id) + if channel is not None: + channel._receiveclosed.set() + if channel._items is not None: + channel._items.put(ENDMARKER) + + def _local_receive(self, id, data): + # executes in receiver thread try: - del self._dict[key] - finally: - self._writelock.release() + callback = self._callbacks[id] + except KeyError: + try: + channel = self._channels[id] + except KeyError: + pass # drop data + else: + channel._items.put(data) + else: + callback(data) + + def _finished_receiving(self): + self._callbacks.clear() + for id in self._channels.keys(): + self._local_last_message(id) class ChannelFile: Modified: py/branch/execnet-refactoring/gateway.py ============================================================================== --- py/dist/py/execnet/gateway.py (original) +++ py/branch/execnet-refactoring/gateway.py Mon Jun 20 23:14:45 2005 @@ -1,9 +1,9 @@ -import sys import os import threading import Queue import traceback import atexit +import weakref # note that the whole code of this module (as well as some @@ -26,7 +26,7 @@ NamedThreadPool = py._thread.NamedThreadPool import os -debug = 0 # open('/tmp/execnet-debug-%d' % os.getpid() , 'wa') +debug = open('/tmp/execnet-debug-%d' % os.getpid() , 'wa') sysex = (KeyboardInterrupt, SystemExit) @@ -35,27 +35,29 @@ ThreadOut = ThreadOut def __init__(self, io, startcount=2, maxthreads=None): + global registered_cleanup self._execpool = WorkerPool() - self.running = True +## self.running = True self.io = io self._outgoing = Queue.Queue() self.channelfactory = ChannelFactory(self, startcount) - self._exitlock = threading.Lock() - self.pool = NamedThreadPool(receiver = self.thread_receiver, - sender = self.thread_sender) - if not _gateways: +## self._exitlock = threading.Lock() + if not registered_cleanup: atexit.register(cleanup_atexit) - _gateways.append(self) + registered_cleanup = True + _active_sendqueues[self._outgoing] = True + self.pool = NamedThreadPool(receiver = self.thread_receiver, + sender = self.thread_sender) def __repr__(self): R = len(self.pool.getstarted('receiver')) and "receiving" or "not receiving" S = len(self.pool.getstarted('sender')) and "sending" or "not sending" - i = len(self.channelfactory.values()) + i = len(self.channelfactory.channels()) return "<%s %s/%s (%d active channels)>" %(self.__class__.__name__, R, S, i) - def _local_trystopexec(self): - self._execpool.shutdown() +## def _local_trystopexec(self): +## self._execpool.shutdown() def trace(self, *args): if debug: @@ -70,13 +72,18 @@ except: traceback.print_exc() def traceex(self, excinfo): - l = traceback.format_exception(*excinfo) - errortext = "".join(l) + try: + l = traceback.format_exception(*excinfo) + errortext = "".join(l) + except: + errortext = '%s: %s' % (excinfo[0].__name__, + excinfo[1]) self.trace(errortext) def thread_receiver(self): """ thread to read and handle Messages half-sync-half-async. """ try: + from sys import exc_info while 1: try: msg = Message.readfrom(self.io) @@ -84,21 +91,29 @@ msg.received(self) except sysex: raise + except EOFError: + break except: - self.traceex(sys.exc_info()) + self.traceex(exc_info()) break finally: + self._outgoing.put(None) + self.channelfactory._finished_receiving() self.trace('leaving %r' % threading.currentThread()) def thread_sender(self): """ thread to send Messages over the wire. """ try: + from sys import exc_info while 1: msg = self._outgoing.get() try: + if msg is None: + self.io.close_write() + break msg.writeto(self.io) except: - excinfo = sys.exc_info() + excinfo = exc_info() self.traceex(excinfo) msg.post_sent(self, excinfo) raise @@ -112,7 +127,7 @@ l = [] for name, id in ('stdout', outid), ('stderr', errid): if id: - channel = self._local_makechannelobject(outid) + channel = self.channelfactory.new(outid) out = ThreadOut(sys, name) out.setwritefunc(channel.send) l.append((out, channel)) @@ -122,13 +137,9 @@ channel.close() return close - def _local_makechannelobject(self, newid): - newchannel = Channel(self, newid) - self.channelfactory[newid] = newchannel - return newchannel - def thread_executor(self, channel, (source, outid, errid)): """ worker thread to execute source objects from the execution queue. """ + from sys import exc_info try: loc = { 'channel' : channel } self.trace("execution starts:", repr(source)[:50]) @@ -142,26 +153,18 @@ except (KeyboardInterrupt, SystemExit): raise except: - excinfo = sys.exc_info() + excinfo = exc_info() l = traceback.format_exception(*excinfo) errortext = "".join(l) channel.close(errortext) self.trace(errortext) else: - channel.close() + channel.close() # XXX -- should usually be closed by Channel.__del__ def _local_schedulexec(self, channel, sourcetask): self.trace("dispatching exec") self._execpool.dispatch(self.thread_executor, channel, sourcetask) - def _del_channelmapping(self, id, ignoremissing=False): - self.trace("deleting channel mapping %r" %(id,)) - try: - del self.channelfactory[id] - except KeyError: - if not ignoremissing: - raise - def _newredirectchannelid(self, callback): if callback is None: return @@ -234,27 +237,34 @@ c.waitclose(1.0) return Handle() +## def exit(self): +## """ initiate full gateway teardown. +## Note that the teardown of sender/receiver threads happens +## asynchronously and timeouts on stopping worker execution +## threads are ignored. You can issue join() or join(joinexec=False) +## if you want to wait for a full teardown (possibly excluding +## execution threads). +## """ +## # note that threads may still be scheduled to start +## # during our execution! +## self._exitlock.acquire() +## try: +## if self.running: +## self.running = False +## if not self.pool.getstarted('sender'): +## raise IOError("sender thread not alive anymore!") +## self._outgoing.put(None) +## self.trace("exit procedure triggered, pid %d " % (os.getpid(),)) +## _gateways.remove(self) +## finally: +## self._exitlock.release() + def exit(self): - """ initiate full gateway teardown. - Note that the teardown of sender/receiver threads happens - asynchronously and timeouts on stopping worker execution - threads are ignored. You can issue join() or join(joinexec=False) - if you want to wait for a full teardown (possibly excluding - execution threads). - """ - # note that threads may still be scheduled to start - # during our execution! - self._exitlock.acquire() + self._outgoing.put(None) try: - if self.running: - self.running = False - if not self.pool.getstarted('sender'): - raise IOError("sender thread not alive anymore!") - self._outgoing.put(Message.EXIT_GATEWAY()) - self.trace("exit procedure triggered, pid %d " % (os.getpid(),)) - _gateways.remove(self) - finally: - self._exitlock.release() + del _active_sendqueues[self._outgoing] + except KeyError: + pass def join(self, joinexec=True): current = threading.currentThread() @@ -275,11 +285,12 @@ cache[name][id(gw)] = x = "%s:%s.%d" %(os.getpid(), gw.__class__.__name__, len(cache[name])) return x -_gateways = [] +registered_cleanup = False +_active_sendqueues = weakref.WeakKeyDictionary() def cleanup_atexit(): if debug: print >>debug, "="*20 + "cleaning up" + "=" * 20 debug.flush() - while _gateways: - x = _gateways[-1] - x.exit() + while _active_sendqueues: + queue, ignored = _active_sendqueues.popitem() + queue.put(None) Modified: py/branch/execnet-refactoring/message.py ============================================================================== --- py/dist/py/execnet/message.py (original) +++ py/branch/execnet-refactoring/message.py Mon Jun 20 23:14:45 2005 @@ -53,67 +53,35 @@ def _setupmessages(): - # - # EXIT_GATEWAY and STOP_RECEIVING are messages to cleanly - # bring down the IO and gateway connection - # - # First an EXIT_GATEWAY message is send which results - # on the other side's receive_handle to send - # a STOP_RECEIVING message - class EXIT_GATEWAY(Message): - def received(self, gateway): - # executes in receiver thread - for x in gateway.channelfactory.values(): - x._local_close() - gateway._outgoing.put(self.STOP_RECEIVING()) - gateway._local_trystopexec() - raise SystemExit - def post_sent(self, gateway, excinfo=None): - # executes in sender thread - gateway._local_trystopexec() - gateway.io.close_write() - raise SystemExit - - class STOP_RECEIVING(Message): - def received(self, gateway): - # note that we don't need to close io.close_read() - # as the sender side will have closed the io - # already. With sockets closing it would raise - # a Transport Not Connected exception - for x in gateway.channelfactory.values(): - x._local_close() - raise SystemExit - def post_sent(self, gateway, excinfo=None): - # after we sent STOP_RECEIVING we don't - # want to write anything more anymore. - gateway.io.close_write() - raise SystemExit class CHANNEL_OPEN(Message): def received(self, gateway): channel = gateway.channelfactory.new(self.channelid) - channel._local_schedulexec(self.data) + gateway._local_schedulexec(channel=channel, sourcetask=self.data) class CHANNEL_NEW(Message): def received(self, gateway): + """ receive a remotely created new (sub)channel. """ newid = self.data - channel = gateway.channelfactory[self.channelid] - channel._local_receivechannel(newid) + newchannel = gateway.channelfactory.new(newid) + gateway.channelfactory._local_receive(self.channelid, newchannel) class CHANNEL_DATA(Message): def received(self, gateway): - channel = gateway.channelfactory[self.channelid] - channel._local_receivedata(self.data) + gateway.channelfactory._local_receive(self.channelid, self.data) class CHANNEL_CLOSE(Message): def received(self, gateway): - channel = gateway.channelfactory[self.channelid] - channel._local_close() + gateway.channelfactory._local_close(self.channelid) class CHANNEL_CLOSE_ERROR(Message): def received(self, gateway): - channel = gateway.channelfactory[self.channelid] - channel._local_close(channel.RemoteError(self.data)) + remote_error = gateway.channelfactory.RemoteError(self.data) + gateway.channelfactory._local_close(self.channelid, remote_error) + + class CHANNEL_LAST_MESSAGE(Message): + def received(self, gateway): + gateway.channelfactory._local_last_message(self.channelid) classes = [x for x in locals().values() if hasattr(x, '__bases__')] classes.sort(lambda x,y : cmp(x.__name__, y.__name__)) Modified: py/branch/execnet-refactoring/register.py ============================================================================== --- py/dist/py/execnet/register.py (original) +++ py/branch/execnet-refactoring/register.py Mon Jun 20 23:14:45 2005 @@ -52,31 +52,31 @@ infile, outfile = os.popen2(cmd) io = inputoutput.Popen2IO(infile, outfile) super(PopenCmdGateway, self).__init__(io=io) - self._pidchannel = self.remote_exec(""" - import os - channel.send(os.getpid()) - """) - - def exit(self): - try: - self._pidchannel.waitclose(timeout=0.5) - pid = self._pidchannel.receive() - except IOError: - self.trace("IOError: could not receive child PID") - self.trace(sys.exc_info()) - pid = None - super(PopenCmdGateway, self).exit() - if pid is not None: - self.trace("waiting for pid %s" % pid) - try: - os.waitpid(pid, 0) - except KeyboardInterrupt: - if sys.platform != "win32": - os.kill(pid, 15) - raise - except OSError, e: - self.trace("child process %s already dead? error:%s" % - (pid, str(e))) +## self._pidchannel = self.remote_exec(""" +## import os +## channel.send(os.getpid()) +## """) + +## def exit(self): +## try: +## self._pidchannel.waitclose(timeout=0.5) +## pid = self._pidchannel.receive() +## except IOError: +## self.trace("IOError: could not receive child PID:") +## self.traceex(sys.exc_info()) +## pid = None +## super(PopenCmdGateway, self).exit() +## if pid is not None: +## self.trace("waiting for pid %s" % pid) +## try: +## os.waitpid(pid, 0) +## except KeyboardInterrupt: +## if sys.platform != "win32": +## os.kill(pid, 15) +## raise +## except OSError, e: +## self.trace("child process %s already dead? error:%s" % +## (pid, str(e))) class PopenGateway(PopenCmdGateway): # use sysfind/sysexec/subprocess instead of os.popen? Modified: py/branch/execnet-refactoring/testing/test_gateway.py ============================================================================== --- py/dist/py/execnet/testing/test_gateway.py (original) +++ py/branch/execnet-refactoring/testing/test_gateway.py Mon Jun 20 23:14:45 2005 @@ -49,20 +49,9 @@ def test_factory_getitem(self): chan1 = self.fac.new() - assert self.fac[chan1.id] == chan1 + assert self.fac._channels[chan1.id] == chan1 chan2 = self.fac.new() - assert self.fac[chan2.id] == chan2 - - def test_factory_delitem(self): - chan1 = self.fac.new() - assert self.fac[chan1.id] == chan1 - del self.fac[chan1.id] - py.test.raises(KeyError, self.fac.__getitem__, chan1.id) - - def test_factory_setitem(self): - channel = gateway.Channel(None, 12) - self.fac[channel.id] = channel - assert self.fac[channel.id] == channel + assert self.fac._channels[chan2.id] == chan2 def test_channel_timeouterror(self): channel = self.fac.new() @@ -72,8 +61,8 @@ def setup_class(cls): cls.gw = py.execnet.PopenGateway() - def teardown_class(cls): - cls.gw.exit() +## def teardown_class(cls): +## cls.gw.exit() class BasicRemoteExecution: def test_correct_setup(self): @@ -127,12 +116,13 @@ def test_channel__local_close(self): channel = self.gw.channelfactory.new() - channel._local_close() + self.gw.channelfactory._local_close(channel.id) channel.waitclose(0.1) def test_channel__local_close_error(self): channel = self.gw.channelfactory.new() - channel._local_close(channel.RemoteError("error")) + self.gw.channelfactory._local_close(channel.id, + channel.RemoteError("error")) py.test.raises(channel.RemoteError, channel.waitclose, 0.01) def test_channel_iter(self): @@ -155,10 +145,10 @@ # check that the both sides previous channels are really gone channel.waitclose(0.3) - assert channel.id not in self.gw.channelfactory + assert channel.id not in self.gw.channelfactory._channels #assert c.id not in self.gw.channelfactory newchan = self.gw.remote_exec(''' - assert %d not in channel.gateway.channelfactory + assert %d not in channel.gateway.channelfactory._channels ''' % (channel.id)) newchan.waitclose(0.3) @@ -254,17 +244,21 @@ for gw in l: channel = gw.remote_exec("""channel.send(42)""") channels.append(channel) - try: - while channels: - channel = channels.pop() - try: - ret = channel.receive() - assert ret == 42 - finally: - channel.gateway.exit() - finally: - for x in channels: - x.gateway.exit() +## try: +## while channels: +## channel = channels.pop() +## try: +## ret = channel.receive() +## assert ret == 42 +## finally: +## channel.gateway.exit() +## finally: +## for x in channels: +## x.gateway.exit() + while channels: + channel = channels.pop() + ret = channel.receive() + assert ret == 42 class SocketGatewaySetup: def setup_class(cls): @@ -272,9 +266,9 @@ cls.proxygw = py.execnet.PopenGateway() cls.gw = py.execnet.SocketGateway.remote_install(cls.proxygw) - def teardown_class(cls): - cls.gw.exit() - cls.proxygw.exit() +## def teardown_class(cls): +## cls.gw.exit() +## cls.proxygw.exit() class TestSocketGateway(SocketGatewaySetup, BasicRemoteExecution): disabled = sys.platform == "win32" From arigo at codespeak.net Mon Jun 20 23:23:46 2005 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 20 Jun 2005 23:23:46 +0200 (CEST) Subject: [py-svn] r13635 - py/dist/py/thread Message-ID: <20050620212346.EDD6427B51@code1.codespeak.net> Author: arigo Date: Mon Jun 20 23:23:45 2005 New Revision: 13635 Modified: py/dist/py/thread/pool.py Log: This change makes the WorkerThread avoid to hold on references to objects from the last executed job. Modified: py/dist/py/thread/pool.py ============================================================================== --- py/dist/py/thread/pool.py (original) +++ py/dist/py/thread/pool.py Mon Jun 20 23:23:45 2005 @@ -49,23 +49,27 @@ self._pool = pool self.setDaemon(1) + def _run_once(self): + reply = self._queue.get() + if reply is SystemExit: + return False + assert self not in self._pool._ready + task = reply.task + try: + func, args, kwargs = task + result = func(*args, **kwargs) + except (SystemExit, KeyboardInterrupt): + return False + except: + reply.setexcinfo(sys.exc_info()) + else: + reply.set(result) + # at this point, reply, task and all other local variables go away + return True + def run(self): try: - while 1: - reply = self._queue.get() - if reply is SystemExit: - break - assert self not in self._pool._ready - task = reply.task - try: - func, args, kwargs = task - result = func(*args, **kwargs) - except (SystemExit, KeyboardInterrupt): - break - except: - reply.setexcinfo(sys.exc_info()) - else: - reply.set(result) + while self._run_once(): self._pool._ready[self] = True finally: del self._pool._alive[self] From arigo at codespeak.net Mon Jun 20 23:27:16 2005 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 20 Jun 2005 23:27:16 +0200 (CEST) Subject: [py-svn] r13636 - py/branch/execnet-refactoring Message-ID: <20050620212716.82B0B27B51@code1.codespeak.net> Author: arigo Date: Mon Jun 20 23:27:14 2005 New Revision: 13636 Modified: py/branch/execnet-refactoring/channel.py py/branch/execnet-refactoring/gateway.py Log: Good! With the last fix in py.thread.pool, the automatic closing of channels seems to work nicely. At least, the tests are happy. Modified: py/branch/execnet-refactoring/channel.py ============================================================================== --- py/branch/execnet-refactoring/channel.py (original) +++ py/branch/execnet-refactoring/channel.py Mon Jun 20 23:27:14 2005 @@ -41,14 +41,15 @@ return "" % (self.id, flag) def __del__(self): + if self.gateway is None: # can be None in tests + return self.gateway.trace("Channel(%d).__del__" % self.id) if not self._closed: - if self.gateway is not None: # can be None in tests - if self._items is None: # has_callback - Msg = Message.CHANNEL_LAST_MESSAGE - else: - Msg = Message.CHANNEL_CLOSE - self.gateway._outgoing.put(Msg(self.id)) + if self._items is None: # has_callback + Msg = Message.CHANNEL_LAST_MESSAGE + else: + Msg = Message.CHANNEL_CLOSE + self.gateway._outgoing.put(Msg(self.id)) else: error = self._getstickyerror() if isinstance(error, RemoteError): Modified: py/branch/execnet-refactoring/gateway.py ============================================================================== --- py/branch/execnet-refactoring/gateway.py (original) +++ py/branch/execnet-refactoring/gateway.py Mon Jun 20 23:27:14 2005 @@ -159,7 +159,7 @@ channel.close(errortext) self.trace(errortext) else: - channel.close() # XXX -- should usually be closed by Channel.__del__ + pass #channel.close() -- should usually be closed by Channel.__del__ def _local_schedulexec(self, channel, sourcetask): self.trace("dispatching exec") From arigo at codespeak.net Mon Jun 20 23:53:18 2005 From: arigo at codespeak.net (arigo at codespeak.net) Date: Mon, 20 Jun 2005 23:53:18 +0200 (CEST) Subject: [py-svn] r13637 - py/branch/execnet-refactoring Message-ID: <20050620215318.719D827B51@code1.codespeak.net> Author: arigo Date: Mon Jun 20 23:53:17 2005 New Revision: 13637 Modified: py/branch/execnet-refactoring/gateway.py Log: Oups, naive. Not thread- nor GC-safe. Modified: py/branch/execnet-refactoring/gateway.py ============================================================================== --- py/branch/execnet-refactoring/gateway.py (original) +++ py/branch/execnet-refactoring/gateway.py Mon Jun 20 23:53:17 2005 @@ -291,6 +291,9 @@ if debug: print >>debug, "="*20 + "cleaning up" + "=" * 20 debug.flush() - while _active_sendqueues: - queue, ignored = _active_sendqueues.popitem() + while True: + try: + queue, ignored = _active_sendqueues.popitem() + except KeyError: + break queue.put(None) From hpk at codespeak.net Tue Jun 21 02:20:32 2005 From: hpk at codespeak.net (hpk at codespeak.net) Date: Tue, 21 Jun 2005 02:20:32 +0200 (CEST) Subject: [py-svn] r13645 - in py/dist/py: . misc/testing Message-ID: <20050621002032.360EB27B5A@code1.codespeak.net> Author: hpk Date: Tue Jun 21 02:20:30 2005 New Revision: 13645 Modified: py/dist/py/__init__.py py/dist/py/initpkg.py py/dist/py/misc/testing/test_initpkg.py Log: reworked and (i think) simplified the implementation for clean imports from whole-modules. You now use '*' to specify that you want to export a whole module's names. You cannot do this currently at root-level, i think, but that is ok. let's avoid using '*' for specifying names, anyway. Modified: py/dist/py/__init__.py ============================================================================== --- py/dist/py/__init__.py (original) +++ py/dist/py/__init__.py Tue Jun 21 02:20:30 2005 @@ -3,7 +3,7 @@ py.test, an interactive testing tool which supports unit-testing with practically no boilerplate. """ -from initpkg import initpkg, RealModule +from initpkg import initpkg initpkg(__name__, description = "py.test and the py lib", @@ -109,7 +109,7 @@ 'log.STDOUT' : ('./log/consumer.py', 'STDOUT'), 'log.STDERR' : ('./log/consumer.py', 'STDERR'), - 'compat.doctest' : ('./compat/doctest.py', RealModule), - 'compat.optparse' : ('./compat/optparse.py', RealModule), - 'compat.textwrap' : ('./compat/textwrap.py', RealModule), + 'compat.doctest' : ('./compat/doctest.py', '*'), + 'compat.optparse' : ('./compat/optparse.py', '*'), + 'compat.textwrap' : ('./compat/textwrap.py', '*'), }) Modified: py/dist/py/initpkg.py ============================================================================== --- py/dist/py/initpkg.py (original) +++ py/dist/py/initpkg.py Tue Jun 21 02:20:30 2005 @@ -168,16 +168,24 @@ self.__map__ = {} def __getattr__(self, name): - try: - extpy = self.__map__[name] - except KeyError: - __tracebackhide__ = True - raise AttributeError(name) - #print "getattr(%r, %r)" %(self, name) - result = self.__package__._resolve(extpy) + if '*' in self.__map__: + extpy = self.__map__['*'][0], name + result = self.__package__._resolve(extpy) + else: + try: + extpy = self.__map__[name] + except KeyError: + __tracebackhide__ = True + raise AttributeError(name) + else: + result = self.__package__._resolve(extpy) + del self.__map__[name] setattr(self, name, result) - del self.__map__[name] - # XXX modify some attrs to make a class appear at virtual module level + self._fixinspection(result, name) + return result + + def _fixinspection(self, result, name): + # modify some attrs to make a class appear at export level if hasattr(result, '__module__'): try: setattr(result, '__module__', self.__name__) @@ -188,52 +196,22 @@ setattr(result, '__name__', name) except (AttributeError, TypeError): pass - # print "setting virtual module on %r" % result - return result def __repr__(self): return '' % (self.__name__, ) def getdict(self): # force all the content of the module to be loaded when __dict__ is read - for name in self.__map__.keys(): - hasattr(self, name) # force attribute to be loaded, ignore errors - assert not self.__map__, "%r not empty" % self.__map__ - dictdescr = ModuleType.__dict__['__dict__'] - return dictdescr.__get__(self) - - __dict__ = property(getdict) - del getdict - -class RealModule(ModuleType): - def __init__(self, pkg, name, extpy): - self.__package__ = pkg - self.__name__ = name - self.__extpy__ = extpy - self.__isimported__ = False - - def __getattr__(self, name): - dct = self.__doimport() - if not name in dct: - raise AttributeError, name - return dct[name] - - def __repr__(self): - return '' % (self.__name__, ) - - def __doimport(self): - dictdescr = ModuleType.__dict__['__dict__'] - dct = dictdescr.__get__(self) # avoid infinite recursion - if not self.__isimported__: - module = self.__package__._resolve(self.__extpy__) - dct.update(module.__dict__) - self.__isimported__ = True - return dct - - def getdict(self): - self.__doimport() dictdescr = ModuleType.__dict__['__dict__'] - return dictdescr.__get__(self) + dict = dictdescr.__get__(self) + if '*' not in self.__map__: + for name in self.__map__.keys(): + hasattr(self, name) # force attribute to be loaded, ignore errors + assert not self.__map__, "%r not empty" % self.__map__ + else: + fsname = self.__map__['*'][0] + dict.update(self.__package__._loadimpl(fsname[:-3]).__dict__) + return dict __dict__ = property(getdict) del getdict @@ -253,7 +231,11 @@ for pypath, extpy in pkg.exportitems(): pyparts = pypath.split('.') - modparts = pyparts[:-1] + modparts = pyparts[:] + if extpy[1] != '*': + lastmodpart = modparts.pop() + else: + lastmodpart = '*' current = pkgname # ensure modules @@ -265,23 +247,13 @@ setattr(seen[previous], name, mod) setmodule(current, mod) - if not isinstance(extpy[1], basestring): - klass = extpy[1] - previous = current - name = pyparts[-1] - current += '.' + name - if current not in seen: - seen[current] = mod = klass(pkg, current, extpy) - setattr(seen[previous], name, mod) - setmodule(current, mod) - continue - mod = seen[current] if not hasattr(mod, '__map__'): assert mod is pkg.module, \ "only root modules are allowed to be non-lazy. " deferred_imports.append((mod, pyparts[-1], extpy)) else: - mod.__map__[pyparts[-1]] = extpy + mod.__map__[lastmodpart] = extpy + for mod, pypart, extpy in deferred_imports: setattr(mod, pypart, pkg._resolve(extpy)) Modified: py/dist/py/misc/testing/test_initpkg.py ============================================================================== --- py/dist/py/misc/testing/test_initpkg.py (original) +++ py/dist/py/misc/testing/test_initpkg.py Tue Jun 21 02:20:30 2005 @@ -114,7 +114,7 @@ tfile.write(py.code.Source(""" import py py.initpkg('realtest', { - 'module': ('./testmodule.py', py.__.initpkg.RealModule) + 'x.module': ('./testmodule.py', '*'), }) """)) @@ -138,27 +138,27 @@ def setup_method(self, *args): """Unload the test modules before each test.""" - module_names = ['realtest.module', 'realtest'] + module_names = ['realtest', 'realtest.x', 'realtest.x.module'] for modname in module_names: if modname in sys.modules: del sys.modules[modname] def test_realmodule(self): - """Testing 'import realtest.module'""" - import realtest.module - assert 'realtest.module' in sys.modules - assert getattr(realtest.module, 'mytest0') + """Testing 'import realtest.x.module'""" + import realtest.x.module + assert 'realtest.x.module' in sys.modules + assert getattr(realtest.x.module, 'mytest0') def test_realmodule_from(self): """Testing 'from test import module'.""" - from realtest import module + from realtest.x import module assert getattr(module, 'mytest1') def test_realmodule_star(self): """Testing 'from test.module import *'.""" tfile = self.tmpdir.join('startest.py') - tfile.write(py.code.Source("""if True: - from realtest.module import * + tfile.write(py.code.Source(""" + from realtest.x.module import * globals()['mytest0'] globals()['mytest1'] globals()['MyTest'] @@ -167,15 +167,12 @@ def test_realmodule_dict_import(self): "Test verifying that accessing the __dict__ invokes the import" - import realtest.module - assert realtest.module.__isimported__ == False - moddict = dir(realtest.module) - assert realtest.module.__isimported__ == True + import realtest.x.module + moddict = realtest.x.module.__dict__ assert 'mytest0' in moddict assert 'mytest1' in moddict assert 'MyTest' in moddict - #class TestStdHook: # """Tests imports for the standard Python library hook.""" # From arigo at codespeak.net Tue Jun 21 18:15:17 2005 From: arigo at codespeak.net (arigo at codespeak.net) Date: Tue, 21 Jun 2005 18:15:17 +0200 (CEST) Subject: [py-svn] r13661 - py/branch/execnet-refactoring Message-ID: <20050621161517.E86BE27B60@code1.codespeak.net> Author: arigo Date: Tue Jun 21 18:15:15 2005 New Revision: 13661 Modified: py/branch/execnet-refactoring/NOTES py/branch/execnet-refactoring/channel.py Log: Updated the NOTES with precise states and invariants. This helped clarify the order in which to do some operations in channel.py. Modified: py/branch/execnet-refactoring/NOTES ============================================================================== --- py/branch/execnet-refactoring/NOTES (original) +++ py/branch/execnet-refactoring/NOTES Tue Jun 21 18:15:15 2005 @@ -1,3 +1,7 @@ +============================================================================= + Channel implementation notes +============================================================================= + The public API of channels make them appear either opened or closed. When a channel is closed, we can't send any more items, and it will not @@ -27,69 +31,92 @@ ---> {id: callback} -Channel: - __del__(): - if not closed: - if has_callback: - send a CHANNEL_LAST_MESSAGE - else: - send a CHANNEL_CLOSE - if stickyerror: - warning("unhandled:", stickyerror) - - close(): - lock - if not closed: - send a CHANNEL_CLOSE - closed = True - channelfactory.closed(id) - receive_closed.set() - unlock - - send(): - lock - if closed: - raise! - send a CHANNEL_DATA - unlock - - waitclose(): - wait for receive_closed - if stickyerror: - stickyerror = None - raise stickyerror - - receive(): - x = queue.pop() - if x is END_MARKER: - queue.push(x) - stickyerror = None - raise stickyerror - - -receive CHANNEL_DATA(id, data): - if id in callbacks: - callbacks[id](data) - else: - c = channels[id] - if no KeyError: - c.queue.push(data) - -receive CHANNEL_CLOSE(id, error=EOFError()): - del callbacks[id] - c = channels.pop(id) - if not KeyError: - c.stickyerror = error - c.closed = True - c.receive_closed.set() - c.queue.push(END_MARKER) - elif error: - warning("unhandled:", error) - -receive CHANNEL_LAST_MESSAGE(id): - del callbacks[id] - c = channels.pop(id) - if not KeyError: - c.receive_closed.set() - c.queue.push(END_MARKER) +State and invariants of Channel objects +--------------------------------------- + +_channels and _callbacks are dictionaries on the ChannelFactory. +Other attributes are on the Channel objects. + +All states are valid at any time (even with multithreading) unless +marked with {E}, which means that they may be temporary invalid. +They are eventually restored. + + +States ("sendonly" means opened but won't receive any more items): + + opened sendonly closed deleted + ================= ============== ================== =============== + not _closed not _closed _closed + not _receiveclosed _receiveclosed {E} _receiveclosed + +In the presence of callbacks, "deleted" does not imply "closed" nor "sendonly". +It only means that no more items can be sent. The (logical) channel can +continue to receive data via the call-back even if the channel object no +longer exists. + + +The two kinds of channels, with or without callback: + + items read by receive() has a callback + ============================= ======================================= + _items is a Queue _items is None + id not in _callbacks + state==opened: id in _callbacks + {E} state==sendonly: there is {E} state!=opened: id not in _callbacks + an ENDMARKER in _items + {E} state==closed: there is + an ENDMARKER in _items + +Callback calls should be considered asynchronuous. The channel can be in any +state and change its state while the callback runs. + + +The ChannelFactory's WeakValueDictionary _channels maps some ids to their +channel object, depending on their state: + + opened sendonly closed deleted + ================= ============== ================ =============== + id in _channels {E} not in {E} not in not in + + +All received RemoteErrors are handled exactly once: they are normally +re-raised once in waitclose() or receive(). If it is not possible, they are +at the moment dumped to stderr. (XXX should use logging/tracing) +Only channels in {E} "closed" state can hold RemoteErrors. + + +Methods: + + * close() returns with the channel in "closed" state + * send() either send the data or raise if "closed" + * receive() wait for the next item. If no item left and the state + changes to non-"opened", raise + * waitclose() wait for a non-"opened" state + + +Assuming the channel is connected and the connexion is alive, the local state +eventually influences the state of the corresponding remote channel object: + + local | opened sendonly closed deleted +remote | +======================================================= + | + opened | ok n/a (1) (2) + | + sendonly | n/a n/a n/a ok + | + closed | (1) n/a ok ok + | + deleted | (2) ok ok ok + +(1) The side with the closed channel object must send a CHANNEL_CLOSE message, + which will eventually put the other side's channel in "closed" state if + it is still "opened". + +(2) If the deleted channel has no callback, this is equivalent to (1). + Otherwide, the side with the deleted channel must send a + CHANNEL_LAST_MESSAGE, which will eventually put the other side's channel in + "sendonly" state if it is still "opened". + +n/a These configuration should never occur. Modified: py/branch/execnet-refactoring/channel.py ============================================================================== --- py/branch/execnet-refactoring/channel.py (original) +++ py/branch/execnet-refactoring/channel.py Tue Jun 21 18:15:15 2005 @@ -32,9 +32,9 @@ self._items = None else: self._items = Queue.Queue() - self._receiveclosed = threading.Event() self._closed = False - self._sendlock = threading.Lock() + self._receiveclosed = threading.Event() + self._remoteerrors = [] def __repr__(self): flag = self.isclosed() and "closed" or "open" @@ -44,22 +44,28 @@ if self.gateway is None: # can be None in tests return self.gateway.trace("Channel(%d).__del__" % self.id) - if not self._closed: + # no multithreading issues here, because we have the last ref to 'self' + if self._closed: + # state transition "closed" --> "deleted" + for error in self._remoteerrors: + error.warn() + elif self._receiveclosed.isSet(): + # state transition "sendonly" --> "deleted" + # the remote channel is already in "deleted" state, nothing to do + pass + else: + # state transition "opened" --> "deleted" if self._items is None: # has_callback Msg = Message.CHANNEL_LAST_MESSAGE else: Msg = Message.CHANNEL_CLOSE self.gateway._outgoing.put(Msg(self.id)) - else: - error = self._getstickyerror() - if isinstance(error, RemoteError): - error.warn() - def _getstickyerror(self): + def _getremoteerror(self): try: - return self.__dict__.pop('_stickyerror') - except KeyError: - return EOFError() + return self._remoteerrors.pop(0) + except IndexError: + return None # # public API for channel objects @@ -81,21 +87,22 @@ def close(self, error=None): """ close down this channel on both sides. """ - self._sendlock.acquire() - try: - if not self._closed: - put = self.gateway._outgoing.put - if error is not None: - put(Message.CHANNEL_CLOSE_ERROR(self.id, str(error))) - else: - put(Message.CHANNEL_CLOSE(self.id)) - if isinstance(error, RemoteError): - self._stickyerror = error - self._closed = True - self.gateway.channelfactory._closed(self.id) - self._receiveclosed.set() - finally: - self._sendlock.release() + if not self._closed: + # state transition "opened/sendonly" --> "closed" + # threads warning: the channel might be closed under our feet, + # but it's never damaging to send too many CHANNEL_CLOSE messages + put = self.gateway._outgoing.put + if error is not None: + put(Message.CHANNEL_CLOSE_ERROR(self.id, str(error))) + else: + put(Message.CHANNEL_CLOSE(self.id)) + if isinstance(error, RemoteError): + self._remoteerrors.append(error) + self._closed = True # --> "closed" + self._receiveclosed.set() + if self._items is not None: + self._items.put(ENDMARKER) + self.gateway.channelfactory._no_longer_opened(self.id) def waitclose(self, timeout): """ wait until this channel is closed (or the remote side @@ -105,11 +112,11 @@ the other side as channel.RemoteErrors containing a a textual representation of the remote traceback. """ - self._receiveclosed.wait(timeout=timeout) + self._receiveclosed.wait(timeout=timeout) # wait for non-"opened" state if not self._receiveclosed.isSet(): raise IOError, "Timeout" - error = self._getstickyerror() - if isinstance(error, self.RemoteError): + error = self._getremoteerror() + if error: raise error def send(self, item): @@ -117,17 +124,13 @@ possibly blocking if the sender queue is full. Note that an item needs to be marshallable. """ - self._sendlock.acquire() - try: - if self.isclosed(): - raise IOError, "cannot send to %r" %(self,) - if isinstance(item, Channel): - data = Message.CHANNEL_NEW(self.id, item.id) - else: - data = Message.CHANNEL_DATA(self.id, item) - self.gateway._outgoing.put(data) - finally: - self._sendlock.release() + if self.isclosed(): + raise IOError, "cannot send to %r" %(self,) + if isinstance(item, Channel): + data = Message.CHANNEL_NEW(self.id, item.id) + else: + data = Message.CHANNEL_DATA(self.id, item) + self.gateway._outgoing.put(data) def receive(self): """receives an item that was sent from the other side, @@ -141,7 +144,7 @@ x = self._items.get() if x is ENDMARKER: self._items.put(x) # for other receivers - raise self._getstickyerror() + raise self._getremoteerror() or EOFError() else: return x @@ -192,55 +195,60 @@ # # internal methods, called from the receiver thread # - def _closed(self, id): + def _no_longer_opened(self, id): try: - del self._callbacks[id] + del self._channels[id] except KeyError: pass try: - channel = self._channels.pop(id) + del self._callbacks[id] except KeyError: - channel = None - return channel + pass - def _local_close(self, id, stickyerror=None): - channel = self._closed(id) + def _local_close(self, id, remoteerror=None): + channel = self._channels.get(id) if channel is None: - if isinstance(stickyerror, RemoteError): - stickyerror.warn() + # channel already in "deleted" state + if remoteerror: + remoteerror.warn() else: - if isinstance(stickyerror, RemoteError): - channel._stickyerror = stickyerror - channel._closed = True + # state transition to "closed" state + if remoteerror: + channel._remoteerrors.append(remoteerror) + channel._closed = True # --> "closed" channel._receiveclosed.set() if channel._items is not None: channel._items.put(ENDMARKER) + self._no_longer_opened(id) def _local_last_message(self, id): - channel = self._closed(id) - if channel is not None: + channel = self._channels.get(id) + if channel is None: + # channel already in "deleted" state + pass + else: + # state transition: if "opened", change to "sendonly" channel._receiveclosed.set() if channel._items is not None: channel._items.put(ENDMARKER) + self._no_longer_opened(id) def _local_receive(self, id, data): # executes in receiver thread - try: - callback = self._callbacks[id] - except KeyError: - try: - channel = self._channels[id] - except KeyError: + callback = self._callbacks.get(id) + if callback is not None: + callback(data) # even if channel may be already closed + else: + channel = self._channels.get(id) + if channel is None or channel._items is None: pass # drop data else: channel._items.put(data) - else: - callback(data) def _finished_receiving(self): - self._callbacks.clear() for id in self._channels.keys(): self._local_last_message(id) + self._callbacks.clear() class ChannelFile: From arigo at codespeak.net Tue Jun 21 18:31:50 2005 From: arigo at codespeak.net (arigo at codespeak.net) Date: Tue, 21 Jun 2005 18:31:50 +0200 (CEST) Subject: [py-svn] r13663 - py/branch/execnet-refactoring/testing Message-ID: <20050621163150.A7AE027B51@code1.codespeak.net> Author: arigo Date: Tue Jun 21 18:31:49 2005 New Revision: 13663 Modified: py/branch/execnet-refactoring/testing/test_gateway.py Log: A test for the "sendonly" state. Modified: py/branch/execnet-refactoring/testing/test_gateway.py ============================================================================== --- py/branch/execnet-refactoring/testing/test_gateway.py (original) +++ py/branch/execnet-refactoring/testing/test_gateway.py Tue Jun 21 18:31:49 2005 @@ -1,4 +1,4 @@ -import os, sys +import os, sys, time import py from py.__.execnet import gateway from py.__.conftest import option @@ -165,6 +165,37 @@ assert l[:2] == [42,13] assert isinstance(l[2], channel.__class__) + def test_channel_callback_stays_active(self, earlyfree=True): + # with 'earlyfree==True', this tests the "sendonly" channel state. + l = [] + channel = self.gw.newchannel(receiver=l.append) + self.gw.remote_exec(channel=channel, source=''' + import thread, time + def producer(channel): + for i in range(5): + time.sleep(0.15) + channel.send(i*100) + thread.start_new_thread(producer, (channel,)) + ''') + if earlyfree: + del channel + for i in range(5): + for _ in range(50): # busy-wait + if l: + break + if not earlyfree: + assert not channel.isclosed() + time.sleep(0.04) + else: + py.test.fail("timed out waiting for the answer[%d]" % i) + res = l.pop(0) + assert res == i*100 + if not earlyfree: + channel.waitclose(1.0) #freed automatically at the end of producer() + + def DEBUGGING_test_channel_callback_remote_freed(self): + self.test_channel_callback_stays_active(False) + def test_remote_redirect_stdout(self): out = py.std.StringIO.StringIO() handle = self.gw.remote_redirect(stdout=out) @@ -215,7 +246,7 @@ """) first = channel.receive() + channel.receive() assert first.strip() == 'hello world' - py.test.raises(EOFError, channel.receive) + py.test.raises(EOFError, channel.receive) #class TestBlockingIssues: # def test_join_blocked_execution_gateway(self): From arigo at codespeak.net Tue Jun 21 19:09:40 2005 From: arigo at codespeak.net (arigo at codespeak.net) Date: Tue, 21 Jun 2005 19:09:40 +0200 (CEST) Subject: [py-svn] r13664 - in py/branch/execnet-refactoring: . testing Message-ID: <20050621170940.0C9D827B68@code1.codespeak.net> Author: arigo Date: Tue Jun 21 19:09:38 2005 New Revision: 13664 Modified: py/branch/execnet-refactoring/gateway.py py/branch/execnet-refactoring/testing/test_gateway.py Log: GC cycles can make Channel objects stay alive a bit longer than expected. For a process that goes to sleep, "a bit longer" can stretch indefinitely. This is what made these two tests fail. Modified: py/branch/execnet-refactoring/gateway.py ============================================================================== --- py/branch/execnet-refactoring/gateway.py (original) +++ py/branch/execnet-refactoring/gateway.py Tue Jun 21 19:09:38 2005 @@ -159,7 +159,12 @@ channel.close(errortext) self.trace(errortext) else: - pass #channel.close() -- should usually be closed by Channel.__del__ + # the channel should usually be closed by Channel.__del__. + # Give it a better chance now. + try: + del loc['channel'] + except KeyError: + pass def _local_schedulexec(self, channel, sourcetask): self.trace("dispatching exec") Modified: py/branch/execnet-refactoring/testing/test_gateway.py ============================================================================== --- py/branch/execnet-refactoring/testing/test_gateway.py (original) +++ py/branch/execnet-refactoring/testing/test_gateway.py Tue Jun 21 19:09:38 2005 @@ -82,6 +82,10 @@ channel = self.gw.remote_exec('pass') channel.waitclose(timeout=1.0) + def test_remote_exec_waitclose_2(self): + channel = self.gw.remote_exec('def gccycle(): pass') + channel.waitclose(timeout=1.0) + def test_remote_exec_error_after_close(self): channel = self.gw.remote_exec('pass') channel.waitclose(timeout=1.0) @@ -178,23 +182,21 @@ thread.start_new_thread(producer, (channel,)) ''') if earlyfree: - del channel - for i in range(5): - for _ in range(50): # busy-wait - if l: - break - if not earlyfree: - assert not channel.isclosed() - time.sleep(0.04) - else: + channel = None + counter = 100 + while len(l) < 5: + if channel and channel.isclosed(): + break + counter -= 1 + if not counter: py.test.fail("timed out waiting for the answer[%d]" % i) - res = l.pop(0) - assert res == i*100 - if not earlyfree: - channel.waitclose(1.0) #freed automatically at the end of producer() - - def DEBUGGING_test_channel_callback_remote_freed(self): - self.test_channel_callback_stays_active(False) + time.sleep(0.04) # busy-wait + assert l == [0, 100, 200, 300, 400] + return channel + + def test_channel_callback_remote_freed(self): + channel = self.test_channel_callback_stays_active(False) + channel.waitclose(1.0) # freed automatically at the end of producer() def test_remote_redirect_stdout(self): out = py.std.StringIO.StringIO() From hpk at codespeak.net Tue Jun 21 22:44:48 2005 From: hpk at codespeak.net (hpk at codespeak.net) Date: Tue, 21 Jun 2005 22:44:48 +0200 (CEST) Subject: [py-svn] r13669 - py/dist/py/documentation Message-ID: <20050621204448.0295D27B5A@code1.codespeak.net> Author: hpk Date: Tue Jun 21 22:44:48 2005 New Revision: 13669 Modified: py/dist/py/documentation/confrest.py Log: activate py lib logo made by Gero Schulze Modified: py/dist/py/documentation/confrest.py ============================================================================== --- py/dist/py/documentation/confrest.py (original) +++ py/dist/py/documentation/confrest.py Tue Jun 21 22:44:48 2005 @@ -66,8 +66,8 @@ super(PyPage, self).fill() # base layout self.body.append( - html.div(html.a(html.img(alt="py lib", id='pyimg', height=75, width=154, - src="http://codespeak.net/img/codespeak1b.png"), + html.div(html.a(html.img(alt="py lib", id='pyimg', height=114, width=154, + src="http://codespeak.net/img/pylib.png"), href="http://codespeak.net", ))) #self.body.append( From hpk at codespeak.net Tue Jun 21 22:47:25 2005 From: hpk at codespeak.net (hpk at codespeak.net) Date: Tue, 21 Jun 2005 22:47:25 +0200 (CEST) Subject: [py-svn] r13670 - py/dist/py/documentation Message-ID: <20050621204725.AF46227B60@code1.codespeak.net> Author: hpk Date: Tue Jun 21 22:47:25 2005 New Revision: 13670 Modified: py/dist/py/documentation/confrest.py Log: tweaks to the web appareance Modified: py/dist/py/documentation/confrest.py ============================================================================== --- py/dist/py/documentation/confrest.py (original) +++ py/dist/py/documentation/confrest.py Tue Jun 21 22:47:25 2005 @@ -15,7 +15,7 @@ content = convert_rest_html(content, txtpath, stylesheet=stylesheet, encoding=encoding) content = strip_html_header(content) - page = PyPage("py lib [%s] " % txtpath.purebasename, stylesheeturl=stylesheet) + page = PyPage("[%s] " % txtpath.purebasename, stylesheeturl=stylesheet) svninfo = txtpath.info() modified = " modified %s by %s" % (worded_diff_time(svninfo.mtime), From arigo at codespeak.net Wed Jun 22 13:08:57 2005 From: arigo at codespeak.net (arigo at codespeak.net) Date: Wed, 22 Jun 2005 13:08:57 +0200 (CEST) Subject: [py-svn] r13690 - py/branch/execnet-refactoring Message-ID: <20050622110857.E2F0B27B6A@code1.codespeak.net> Author: arigo Date: Wed Jun 22 13:08:56 2005 New Revision: 13690 Modified: py/branch/execnet-refactoring/gateway.py Log: Relying on Python's refcounting to close channels doesn't sound like such a great idea. This is what causes troubles in "py.test --exec=python2.4". Trying a (probably too implicit) solution: remote_exec() will by default automatically close the channel on the remote side after it finished executing, unless the "channel=.." argument is passed to remote_exec(), in which case we assume the user wants better control over the channel. Modified: py/branch/execnet-refactoring/gateway.py ============================================================================== --- py/branch/execnet-refactoring/gateway.py (original) +++ py/branch/execnet-refactoring/gateway.py Wed Jun 22 13:08:56 2005 @@ -137,7 +137,7 @@ channel.close() return close - def thread_executor(self, channel, (source, outid, errid)): + def thread_executor(self, channel, (source, outid, errid, autoclose)): """ worker thread to execute source objects from the execution queue. """ from sys import exc_info try: @@ -159,12 +159,15 @@ channel.close(errortext) self.trace(errortext) else: - # the channel should usually be closed by Channel.__del__. - # Give it a better chance now. - try: - del loc['channel'] - except KeyError: - pass + if autoclose: + channel.close() + else: + # the channel should usually be closed by Channel.__del__. + # Give it a better chance now. + try: + del loc['channel'] + except KeyError: + pass def _local_schedulexec(self, channel, sourcetask): self.trace("dispatching exec") @@ -207,10 +210,13 @@ pass if channel is None: channel = self.newchannel() + autoclose = True + else: + autoclose = False outid = self._newredirectchannelid(stdout) errid = self._newredirectchannelid(stderr) self._outgoing.put(Message.CHANNEL_OPEN(channel.id, - (source, outid, errid))) + (source, outid, errid, autoclose))) return channel def remote_redirect(self, stdout=None, stderr=None): From briandorsey at codespeak.net Fri Jun 24 01:16:49 2005 From: briandorsey at codespeak.net (briandorsey at codespeak.net) Date: Fri, 24 Jun 2005 01:16:49 +0200 (CEST) Subject: [py-svn] r13757 - py/dist/py/documentation Message-ID: <20050623231649.6605927B53@code1.codespeak.net> Author: briandorsey Date: Fri Jun 24 01:16:49 2005 New Revision: 13757 Modified: py/dist/py/documentation/test.txt Log: Fixed a small typo. Modified: py/dist/py/documentation/test.txt ============================================================================== --- py/dist/py/documentation/test.txt (original) +++ py/dist/py/documentation/test.txt Fri Jun 24 01:16:49 2005 @@ -38,7 +38,7 @@ This will automatically collect and run any Python module whose filenames start with ``test_`` from the directory and any subdirectories, starting with the current directory, and run them. Each Python test module is -inspect for test methods starting with ``test_``. +inspected for test methods starting with ``test_``. Basic Features of ``py.test`` ============================= From hpk at codespeak.net Sat Jun 25 10:06:40 2005 From: hpk at codespeak.net (hpk at codespeak.net) Date: Sat, 25 Jun 2005 10:06:40 +0200 (CEST) Subject: [py-svn] r13831 - in py/dist/py/log: . testing Message-ID: <20050625080640.585AD27B51@code1.codespeak.net> Author: hpk Date: Sat Jun 25 10:06:38 2005 New Revision: 13831 Modified: py/dist/py/log/consumer.py py/dist/py/log/testing/test_log.py Log: allow setconsumer()'s first argument to be a producer-like object that provides keywords. This allows to do mylog = py.log.Producer("whatever") py.log.setconsumer(mylog, someconsumer) so that one doesn't have to repeat the "whatever" string. Modified: py/dist/py/log/consumer.py ============================================================================== --- py/dist/py/log/consumer.py (original) +++ py/dist/py/log/consumer.py Sat Jun 25 10:06:38 2005 @@ -26,6 +26,8 @@ # normalize to tuples if isinstance(keywords, str): keywords = tuple(map(None, keywords.split())) + elif hasattr(keywords, 'keywords'): + keywords = keywords.keywords elif not isinstance(keywords, tuple): raise TypeError("key %r is not a string or tuple" % (keywords,)) if consumer is not None and not callable(consumer): Modified: py/dist/py/log/testing/test_log.py ============================================================================== --- py/dist/py/log/testing/test_log.py (original) +++ py/dist/py/log/testing/test_log.py Sat Jun 25 10:06:38 2005 @@ -4,6 +4,10 @@ def setup_module(mod): mod.tempdir = py.test.ensuretemp("py.log-test") + mod.logstate = py.log._getstate() + +def teardown_module(mod): + py.log._setstate(mod.logstate) class TestLogProducer: def setup_method(self, meth): @@ -69,6 +73,13 @@ assert l assert l[0].content() == "hello" + def test_setconsumer_with_producer(self): + l = [] + p = py.log.Producer("hello") + py.log.setconsumer(p, l.append) + p("world") + assert str(l[0]) == "[hello] world" + def test_multi_consumer(self): l = [] py.log.setconsumer("x1", l.append) From hpk at codespeak.net Sat Jun 25 18:14:33 2005 From: hpk at codespeak.net (hpk at codespeak.net) Date: Sat, 25 Jun 2005 18:14:33 +0200 (CEST) Subject: [py-svn] r13876 - py/dist/py/test/terminal Message-ID: <20050625161433.243A127B5E@code1.codespeak.net> Author: hpk Date: Sat Jun 25 18:14:31 2005 New Revision: 13876 Modified: py/dist/py/test/terminal/remote.py Log: check for changing .c .py and .h files Modified: py/dist/py/test/terminal/remote.py ============================================================================== --- py/dist/py/test/terminal/remote.py (original) +++ py/dist/py/test/terminal/remote.py Sat Jun 25 18:14:31 2005 @@ -5,7 +5,9 @@ def checkpyfilechange(rootdir, statcache={}): """ wait until project files are changed. """ - fil = py.path.checker(fnmatch='*.py') + def fil(p): + return p.ext in ('.py', '.c', '.h') + #fil = py.path.checker(fnmatch='*.py') rec = py.path.checker(dotfile=0) changed = False for path in rootdir.visit(fil, rec): From hpk at codespeak.net Sun Jun 26 09:35:57 2005 From: hpk at codespeak.net (hpk at codespeak.net) Date: Sun, 26 Jun 2005 09:35:57 +0200 (CEST) Subject: [py-svn] r13911 - py/dist/py/test/terminal Message-ID: <20050626073557.1B31827B51@code1.codespeak.net> Author: hpk Date: Sun Jun 26 09:35:55 2005 New Revision: 13911 Modified: py/dist/py/test/terminal/out.py py/dist/py/test/terminal/terminal.py Log: refining and streamlining the traceback output a bit hope you like it. Modified: py/dist/py/test/terminal/out.py ============================================================================== --- py/dist/py/test/terminal/out.py (original) +++ py/dist/py/test/terminal/out.py Sun Jun 26 09:35:55 2005 @@ -24,6 +24,8 @@ line += sepchar * int(fullwidth-(half*2+size)) else: line = sepchar * int(fullwidth/len(sepchar)) + if len(line) != fullwidth: + line += sepchar self.line(line) class TerminalOut(Out): Modified: py/dist/py/test/terminal/terminal.py ============================================================================== --- py/dist/py/test/terminal/terminal.py (original) +++ py/dist/py/test/terminal/terminal.py Sun Jun 26 09:35:55 2005 @@ -260,9 +260,15 @@ self.out.line("empty traceback from item %r" % (item,)) return last = traceback[-1] + first = traceback[0] recursioncache = {} for entry in traceback: - self.out.line("") + if entry == first: + if item: + self.repr_failure_info(item, entry) + self.out.line() + else: + self.out.line("") if entry == last: indent = self.repr_source(entry, 'E') self.repr_failure_explanation(excinfo, indent) @@ -274,8 +280,8 @@ # trailing info if entry == last: - if item: - self.repr_failure_info(item, entry) + #if item: + # self.repr_failure_info(item, entry) self.repr_out_err(item) self.out.sep("_") else: @@ -310,13 +316,14 @@ assert isinstance(item.parent, py.test.collect.Generator) # a generative test yielded a non-callable fn, lineno = item.parent.getpathlineno() - if fn != entry.frame.code.path or \ - entry.frame.code.firstlineno != lineno: - self.out.line("[testcode : %s:%d]" % (fn, lineno+1)) + # hum, the following overloads traceback output + #if fn != entry.frame.code.path or \ + # entry.frame.code.firstlineno != lineno: + # self.out.line("testcode: %s:%d" % (fn, lineno+1)) if root == fn: - self.out.line("[modulepath: %s]" %(modpath)) + self.out.sep("_", "entrypoint: %s" %(modpath)) else: - self.out.line("[modulepath: %s %s]" %(root.basename, modpath)) + self.out.sep("_", "entrypoint: %s %s" %(root.basename, modpath)) def repr_source(self, entry, marker=">"): try: From hpk at codespeak.net Sun Jun 26 10:11:06 2005 From: hpk at codespeak.net (hpk at codespeak.net) Date: Sun, 26 Jun 2005 10:11:06 +0200 (CEST) Subject: [py-svn] r13912 - py/dist/py/test/terminal Message-ID: <20050626081106.5CBE927B51@code1.codespeak.net> Author: hpk Date: Sun Jun 26 10:11:04 2005 New Revision: 13912 Modified: py/dist/py/test/terminal/terminal.py Log: fix --collectonly output Modified: py/dist/py/test/terminal/terminal.py ============================================================================== --- py/dist/py/test/terminal/terminal.py (original) +++ py/dist/py/test/terminal/terminal.py Sun Jun 26 10:11:04 2005 @@ -63,7 +63,8 @@ def startiteration(self, colitem, subitems): if (isinstance(colitem, py.test.collect.Module) - and self.config.option.verbose == 0): + and self.config.option.verbose == 0 + and not self.config.option.collectonly): try: sum = 0 for sub in subitems: From hpk at codespeak.net Sun Jun 26 10:11:32 2005 From: hpk at codespeak.net (hpk at codespeak.net) Date: Sun, 26 Jun 2005 10:11:32 +0200 (CEST) Subject: [py-svn] r13913 - py/dist/py/documentation Message-ID: <20050626081132.79A9F27B51@code1.codespeak.net> Author: hpk Date: Sun Jun 26 10:11:31 2005 New Revision: 13913 Modified: py/dist/py/documentation/conftest.py Log: shuffle things around a bit (in preparation for EP2005 talk :-) Modified: py/dist/py/documentation/conftest.py ============================================================================== --- py/dist/py/documentation/conftest.py (original) +++ py/dist/py/documentation/conftest.py Sun Jun 26 10:11:31 2005 @@ -97,31 +97,7 @@ return LinkCheckerMaker(name, self) elif name == 'doctest': return self.DoctestText(name, self) -# -# hooking into py.test collector's chain ... -# because we generate subtests for all link checks -# it is a bit more convoluted than is strictly neccessary -# to perform the tests - -class DocDirectory(py.test.collect.Directory): - ReSTChecker = ReSTChecker - def run(self): - results = super(DocDirectory, self).run() - for x in self.fspath.listdir('*.txt', sort=True): - results.append(x.basename) - return results - - def join(self, name): - if not name.endswith('.txt'): - return super(DocDirectory, self).join(name) - p = self.fspath.join(name) - if p.check(file=1): - return self.ReSTChecker(p, parent=self) -Directory = DocDirectory - - - # generating functions + args as single tests def genlinkchecks(path): for lineno, line in py.builtin.enumerate(path.readlines()): @@ -181,3 +157,28 @@ else: py.test.fail("anchor reference error %s#%s in %s:%d" %( tryfn, anchor, path.basename, lineno+1)) + + +# ___________________________________________________________ +# +# hooking into py.test collector's chain ... +# because we generate subtests for all link checks +# it is a bit more convoluted than is strictly neccessary +# to perform the tests + +class DocDirectory(py.test.collect.Directory): + ReSTChecker = ReSTChecker + + def run(self): + results = super(DocDirectory, self).run() + for x in self.fspath.listdir('*.txt', sort=True): + results.append(x.basename) + return results + + def join(self, name): + if not name.endswith('.txt'): + return super(DocDirectory, self).join(name) + p = self.fspath.join(name) + if p.check(file=1): + return self.ReSTChecker(p, parent=self) +Directory = DocDirectory From hpk at codespeak.net Sun Jun 26 10:14:17 2005 From: hpk at codespeak.net (hpk at codespeak.net) Date: Sun, 26 Jun 2005 10:14:17 +0200 (CEST) Subject: [py-svn] r13914 - py/dist/py/test/terminal Message-ID: <20050626081417.05EEF27B51@code1.codespeak.net> Author: hpk Date: Sun Jun 26 10:14:17 2005 New Revision: 13914 Modified: py/dist/py/test/terminal/remote.py Log: try to be a bit more clever in looponfailing mode: if tests ran to completion and were previously failing then don't wait for file changes but just rerun everything. the idea is that py.test -x --looponfailing becomes a very nice test mode (it already is but now it should be even better because after you fixed a test failure it just restarts (without waiting for file changes) and shows you the next problem until you fix that etc.pp.) Modified: py/dist/py/test/terminal/remote.py ============================================================================== --- py/dist/py/test/terminal/remote.py (original) +++ py/dist/py/test/terminal/remote.py Sun Jun 26 10:14:17 2005 @@ -130,10 +130,12 @@ args = list(args) rootdir = getrootdir(config.remaining) #print "rootdir", rootdir + wasfailing = False while 1: - if config.option.looponfailing: + if config.option.looponfailing and (failures or not wasfailing): while not checkpyfilechange(rootdir): py.std.time.sleep(0.4) + wasfailing = len(failures) failures = failure_master(config.option.executable, out, args, failures) if not config.option.looponfailing: break